code
stringlengths
86
54.5k
code_codestyle
int64
0
371
style_context
stringlengths
87
49.2k
style_context_codestyle
int64
0
349
label
int64
0
1
'''simple docstring''' import argparse import os # New Code # import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.utils import find_executable_batch_size ######################################################################## # This is a fully working simple example to use Accelerate, # specifically showcasing how to ensure out-of-memory errors never # interrupt training, and builds off the `nlp_example.py` script. # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # New additions from the base script can be found quickly by # looking for the # New Code # tags # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## __A : int = 16 __A : Union[str, Any] = 32 def UpperCAmelCase ( lowerCamelCase_ :Accelerator , lowerCamelCase_ :int = 16 ): '''simple docstring''' snake_case_ : Tuple = AutoTokenizer.from_pretrained("""bert-base-cased""" ) snake_case_ : List[Any] = load_dataset("""glue""" , """mrpc""" ) def tokenize_function(lowerCamelCase_ :Any ): # max_length=None => use the model max length (it's actually the default) snake_case_ : Dict = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=lowerCamelCase_ , max_length=lowerCamelCase_ ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): snake_case_ : str = datasets.map( lowerCamelCase_ , batched=lowerCamelCase_ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library snake_case_ : Dict = tokenized_datasets.rename_column("""label""" , """labels""" ) def collate_fn(lowerCamelCase_ :Optional[int] ): # On TPU it's best to pad everything to the same length or training will be very slow. snake_case_ : str = 1_28 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": snake_case_ : str = 16 elif accelerator.mixed_precision != "no": snake_case_ : List[Any] = 8 else: snake_case_ : Tuple = None return tokenizer.pad( lowerCamelCase_ , padding="""longest""" , max_length=lowerCamelCase_ , pad_to_multiple_of=lowerCamelCase_ , return_tensors="""pt""" , ) # Instantiate dataloaders. snake_case_ : Any = DataLoader( tokenized_datasets["""train"""] , shuffle=lowerCamelCase_ , collate_fn=lowerCamelCase_ , batch_size=lowerCamelCase_ ) snake_case_ : Optional[int] = DataLoader( tokenized_datasets["""validation"""] , shuffle=lowerCamelCase_ , collate_fn=lowerCamelCase_ , batch_size=lowerCamelCase_ ) return train_dataloader, eval_dataloader # For testing only if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1": from accelerate.test_utils.training import mocked_dataloaders __A : int = mocked_dataloaders # noqa: F811 def UpperCAmelCase ( lowerCamelCase_ :int , lowerCamelCase_ :List[Any] ): '''simple docstring''' # For testing only if os.environ.get("""TESTING_MOCKED_DATALOADERS""" , lowerCamelCase_ ) == "1": snake_case_ : List[Any] = 2 # Initialize accelerator snake_case_ : Any = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs snake_case_ : Optional[Any] = config["""lr"""] snake_case_ : List[str] = int(config["""num_epochs"""] ) snake_case_ : Optional[Any] = int(config["""seed"""] ) snake_case_ : int = int(config["""batch_size"""] ) snake_case_ : Union[str, Any] = evaluate.load("""glue""" , """mrpc""" ) # New Code # # We now can define an inner training loop function. It should take a batch size as the only parameter, # and build the dataloaders in there. # It also gets our decorator @find_executable_batch_size(starting_batch_size=lowerCamelCase_ ) def inner_training_loop(lowerCamelCase_ :Dict ): # And now just move everything below under this function # We need to bring in the Accelerator object from earlier nonlocal accelerator # And reset all of its attributes that could hold onto any memory: accelerator.free_memory() # Then we can declare the model, optimizer, and everything else: set_seed(lowerCamelCase_ ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) snake_case_ : Union[str, Any] = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=lowerCamelCase_ ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). snake_case_ : List[Any] = model.to(accelerator.device ) # Instantiate optimizer snake_case_ : int = AdamW(params=model.parameters() , lr=lowerCamelCase_ ) snake_case_ : Union[str, Any] = get_dataloaders(lowerCamelCase_ , lowerCamelCase_ ) # Instantiate scheduler snake_case_ : int = get_linear_schedule_with_warmup( optimizer=lowerCamelCase_ , num_warmup_steps=1_00 , num_training_steps=(len(lowerCamelCase_ ) * num_epochs) , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. snake_case_ : List[Any] = accelerator.prepare( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) # Now we train the model for epoch in range(lowerCamelCase_ ): model.train() for step, batch in enumerate(lowerCamelCase_ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) snake_case_ : List[Any] = model(**lowerCamelCase_ ) snake_case_ : Dict = outputs.loss accelerator.backward(lowerCamelCase_ ) optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(lowerCamelCase_ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): snake_case_ : str = model(**lowerCamelCase_ ) snake_case_ : int = outputs.logits.argmax(dim=-1 ) snake_case_ : Dict = accelerator.gather_for_metrics((predictions, batch["""labels"""]) ) metric.add_batch( predictions=lowerCamelCase_ , references=lowerCamelCase_ , ) snake_case_ : Dict = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(F'''epoch {epoch}:''' , lowerCamelCase_ ) # New Code # # And call it at the end with no arguments # Note: You could also refactor this outside of your training loop function inner_training_loop() def UpperCAmelCase ( ): '''simple docstring''' snake_case_ : Tuple = argparse.ArgumentParser(description="""Simple example of training script.""" ) parser.add_argument( """--mixed_precision""" , type=lowerCamelCase_ , default=lowerCamelCase_ , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose""" """between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.""" """and an Nvidia Ampere GPU.""" , ) parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" ) snake_case_ : Optional[Any] = parser.parse_args() snake_case_ : int = {"""lr""": 2E-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16} training_function(lowerCamelCase_ , lowerCamelCase_ ) if __name__ == "__main__": main()
356
'''simple docstring''' import itertools import json import linecache import os import pickle import re import socket import string from collections import Counter from logging import getLogger from pathlib import Path from typing import Callable, Dict, Iterable, List import git import torch from torch.utils.data import Dataset from transformers import BartTokenizer, RagTokenizer, TaTokenizer def UpperCAmelCase ( lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :int , lowerCamelCase_ :List[Any] , lowerCamelCase_ :List[Any] , lowerCamelCase_ :str=True , lowerCamelCase_ :str="pt" ): '''simple docstring''' snake_case_ : Tuple = {"""add_prefix_space""": True} if isinstance(lowerCamelCase_ , lowerCamelCase_ ) and not line.startswith(""" """ ) else {} snake_case_ : Union[str, Any] = padding_side return tokenizer( [line] , max_length=lowerCamelCase_ , padding="""max_length""" if pad_to_max_length else None , truncation=lowerCamelCase_ , return_tensors=lowerCamelCase_ , add_special_tokens=lowerCamelCase_ , **lowerCamelCase_ , ) def UpperCAmelCase ( lowerCamelCase_ :Optional[int] , lowerCamelCase_ :str , lowerCamelCase_ :Any=None , ): '''simple docstring''' snake_case_ : Dict = input_ids.ne(lowerCamelCase_ ).any(dim=0 ) if attention_mask is None: return input_ids[:, keep_column_mask] else: return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask]) class __UpperCamelCase ( lowercase__ ): def __init__( self :List[Any] ,_UpperCamelCase :List[Any] ,_UpperCamelCase :Any ,_UpperCamelCase :int ,_UpperCamelCase :Optional[Any] ,_UpperCamelCase :Any="train" ,_UpperCamelCase :Optional[int]=None ,_UpperCamelCase :int=None ,_UpperCamelCase :List[Any]=None ,_UpperCamelCase :Optional[int]="" ,): super().__init__() snake_case_ : List[str] = Path(_UpperCamelCase ).joinpath(type_path + """.source""" ) snake_case_ : int = Path(_UpperCamelCase ).joinpath(type_path + """.target""" ) snake_case_ : Optional[int] = self.get_char_lens(self.src_file ) snake_case_ : List[str] = max_source_length snake_case_ : str = max_target_length assert min(self.src_lens ) > 0, F'''found empty line in {self.src_file}''' snake_case_ : str = tokenizer snake_case_ : str = prefix if n_obs is not None: snake_case_ : int = self.src_lens[:n_obs] snake_case_ : Tuple = src_lang snake_case_ : str = tgt_lang def __len__( self :Any ): return len(self.src_lens ) def __getitem__( self :List[str] ,_UpperCamelCase :Union[str, Any] ): snake_case_ : Optional[int] = index + 1 # linecache starts at 1 snake_case_ : Dict = self.prefix + linecache.getline(str(self.src_file ) ,_UpperCamelCase ).rstrip("""\n""" ) snake_case_ : List[Any] = linecache.getline(str(self.tgt_file ) ,_UpperCamelCase ).rstrip("""\n""" ) assert source_line, F'''empty source line for index {index}''' assert tgt_line, F'''empty tgt line for index {index}''' # Need to add eos token manually for T5 if isinstance(self.tokenizer ,_UpperCamelCase ): source_line += self.tokenizer.eos_token tgt_line += self.tokenizer.eos_token # Pad source and target to the right snake_case_ : int = ( self.tokenizer.question_encoder if isinstance(self.tokenizer ,_UpperCamelCase ) else self.tokenizer ) snake_case_ : Optional[int] = self.tokenizer.generator if isinstance(self.tokenizer ,_UpperCamelCase ) else self.tokenizer snake_case_ : Optional[Any] = encode_line(_UpperCamelCase ,_UpperCamelCase ,self.max_source_length ,"""right""" ) snake_case_ : Tuple = encode_line(_UpperCamelCase ,_UpperCamelCase ,self.max_target_length ,"""right""" ) snake_case_ : int = source_inputs["""input_ids"""].squeeze() snake_case_ : str = target_inputs["""input_ids"""].squeeze() snake_case_ : Union[str, Any] = source_inputs["""attention_mask"""].squeeze() return { "input_ids": source_ids, "attention_mask": src_mask, "decoder_input_ids": target_ids, } @staticmethod def a__ ( _UpperCamelCase :str ): return [len(_UpperCamelCase ) for x in Path(_UpperCamelCase ).open().readlines()] def a__ ( self :Optional[int] ,_UpperCamelCase :List[str] ): snake_case_ : Optional[Any] = torch.stack([x["""input_ids"""] for x in batch] ) snake_case_ : List[Any] = torch.stack([x["""attention_mask"""] for x in batch] ) snake_case_ : Union[str, Any] = torch.stack([x["""decoder_input_ids"""] for x in batch] ) snake_case_ : Optional[Any] = ( self.tokenizer.generator.pad_token_id if isinstance(self.tokenizer ,_UpperCamelCase ) else self.tokenizer.pad_token_id ) snake_case_ : Tuple = ( self.tokenizer.question_encoder.pad_token_id if isinstance(self.tokenizer ,_UpperCamelCase ) else self.tokenizer.pad_token_id ) snake_case_ : Optional[int] = trim_batch(_UpperCamelCase ,_UpperCamelCase ) snake_case_ , snake_case_ : Dict = trim_batch(_UpperCamelCase ,_UpperCamelCase ,attention_mask=_UpperCamelCase ) snake_case_ : Optional[int] = { """input_ids""": source_ids, """attention_mask""": source_mask, """decoder_input_ids""": y, } return batch __A : List[Any] = getLogger(__name__) def UpperCAmelCase ( lowerCamelCase_ :List[List] ): '''simple docstring''' return list(itertools.chain.from_iterable(lowerCamelCase_ ) ) def UpperCAmelCase ( lowerCamelCase_ :str ): '''simple docstring''' snake_case_ : int = get_git_info() save_json(lowerCamelCase_ , os.path.join(lowerCamelCase_ , """git_log.json""" ) ) def UpperCAmelCase ( lowerCamelCase_ :Optional[int] , lowerCamelCase_ :int , lowerCamelCase_ :Optional[int]=4 , **lowerCamelCase_ :Union[str, Any] ): '''simple docstring''' with open(lowerCamelCase_ , """w""" ) as f: json.dump(lowerCamelCase_ , lowerCamelCase_ , indent=lowerCamelCase_ , **lowerCamelCase_ ) def UpperCAmelCase ( lowerCamelCase_ :List[Any] ): '''simple docstring''' with open(lowerCamelCase_ ) as f: return json.load(lowerCamelCase_ ) def UpperCAmelCase ( ): '''simple docstring''' snake_case_ : Optional[Any] = git.Repo(search_parent_directories=lowerCamelCase_ ) snake_case_ : List[str] = { """repo_id""": str(lowerCamelCase_ ), """repo_sha""": str(repo.head.object.hexsha ), """repo_branch""": str(repo.active_branch ), """hostname""": str(socket.gethostname() ), } return repo_infos def UpperCAmelCase ( lowerCamelCase_ :Callable , lowerCamelCase_ :Iterable ): '''simple docstring''' return list(map(lowerCamelCase_ , lowerCamelCase_ ) ) def UpperCAmelCase ( lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :int ): '''simple docstring''' with open(lowerCamelCase_ , """wb""" ) as f: return pickle.dump(lowerCamelCase_ , lowerCamelCase_ ) def UpperCAmelCase ( lowerCamelCase_ :Dict ): '''simple docstring''' def remove_articles(lowerCamelCase_ :str ): return re.sub(R"""\b(a|an|the)\b""" , """ """ , lowerCamelCase_ ) def white_space_fix(lowerCamelCase_ :Optional[Any] ): return " ".join(text.split() ) def remove_punc(lowerCamelCase_ :Tuple ): snake_case_ : Union[str, Any] = set(string.punctuation ) return "".join(ch for ch in text if ch not in exclude ) def lower(lowerCamelCase_ :Optional[Any] ): return text.lower() return white_space_fix(remove_articles(remove_punc(lower(lowerCamelCase_ ) ) ) ) def UpperCAmelCase ( lowerCamelCase_ :List[Any] , lowerCamelCase_ :Optional[int] ): '''simple docstring''' snake_case_ : List[Any] = normalize_answer(lowerCamelCase_ ).split() snake_case_ : Optional[int] = normalize_answer(lowerCamelCase_ ).split() snake_case_ : List[Any] = Counter(lowerCamelCase_ ) & Counter(lowerCamelCase_ ) snake_case_ : Optional[Any] = sum(common.values() ) if num_same == 0: return 0 snake_case_ : Optional[Any] = 1.0 * num_same / len(lowerCamelCase_ ) snake_case_ : Union[str, Any] = 1.0 * num_same / len(lowerCamelCase_ ) snake_case_ : Optional[Any] = (2 * precision * recall) / (precision + recall) return fa def UpperCAmelCase ( lowerCamelCase_ :Optional[Any] , lowerCamelCase_ :Union[str, Any] ): '''simple docstring''' return normalize_answer(lowerCamelCase_ ) == normalize_answer(lowerCamelCase_ ) def UpperCAmelCase ( lowerCamelCase_ :List[str] , lowerCamelCase_ :List[str] ): '''simple docstring''' assert len(lowerCamelCase_ ) == len(lowerCamelCase_ ) snake_case_ : Optional[int] = 0 for hypo, pred in zip(lowerCamelCase_ , lowerCamelCase_ ): em += exact_match_score(lowerCamelCase_ , lowerCamelCase_ ) if len(lowerCamelCase_ ) > 0: em /= len(lowerCamelCase_ ) return {"em": em} def UpperCAmelCase ( lowerCamelCase_ :Any ): '''simple docstring''' return model_prefix.startswith("""rag""" ) def UpperCAmelCase ( lowerCamelCase_ :Optional[int] , lowerCamelCase_ :Any , lowerCamelCase_ :Union[str, Any] ): '''simple docstring''' snake_case_ : List[str] = {p: p for p in extra_params} # T5 models don't have `dropout` param, they have `dropout_rate` instead snake_case_ : Optional[int] = """dropout_rate""" for p in extra_params: if getattr(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ): if not hasattr(lowerCamelCase_ , lowerCamelCase_ ) and not hasattr(lowerCamelCase_ , equivalent_param[p] ): logger.info("""config doesn't have a `{}` attribute""".format(lowerCamelCase_ ) ) delattr(lowerCamelCase_ , lowerCamelCase_ ) continue snake_case_ : str = p if hasattr(lowerCamelCase_ , lowerCamelCase_ ) else equivalent_param[p] setattr(lowerCamelCase_ , lowerCamelCase_ , getattr(lowerCamelCase_ , lowerCamelCase_ ) ) delattr(lowerCamelCase_ , lowerCamelCase_ ) return hparams, config
8
0
'''simple docstring''' from sklearn.metrics import fa_score import datasets __A : Tuple = '\nThe F1 score is the harmonic mean of the precision and recall. It can be computed with the equation:\nF1 = 2 * (precision * recall) / (precision + recall)\n' __A : Tuple = '\nArgs:\n predictions (`list` of `int`): Predicted labels.\n references (`list` of `int`): Ground truth labels.\n labels (`list` of `int`): The set of labels to include when `average` is not set to `\'binary\'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None.\n pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1.\n average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `\'binary\'`.\n\n - \'binary\': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary.\n - \'micro\': Calculate metrics globally by counting the total true positives, false negatives and false positives.\n - \'macro\': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.\n - \'weighted\': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `\'macro\'` to account for label imbalance. This option can result in an F-score that is not between precision and recall.\n - \'samples\': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).\n sample_weight (`list` of `float`): Sample weights Defaults to None.\n\nReturns:\n f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better.\n\nExamples:\n\n Example 1-A simple binary example\n >>> f1_metric = datasets.load_metric("f1")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0])\n >>> print(results)\n {\'f1\': 0.5}\n\n Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`.\n >>> f1_metric = datasets.load_metric("f1")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0)\n >>> print(round(results[\'f1\'], 2))\n 0.67\n\n Example 3-The same simple binary example as in Example 1, but with `sample_weight` included.\n >>> f1_metric = datasets.load_metric("f1")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3])\n >>> print(round(results[\'f1\'], 2))\n 0.35\n\n Example 4-A multiclass example, with different values for the `average` input.\n >>> predictions = [0, 2, 1, 0, 0, 1]\n >>> references = [0, 1, 2, 0, 1, 2]\n >>> results = f1_metric.compute(predictions=predictions, references=references, average="macro")\n >>> print(round(results[\'f1\'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average="micro")\n >>> print(round(results[\'f1\'], 2))\n 0.33\n >>> results = f1_metric.compute(predictions=predictions, references=references, average="weighted")\n >>> print(round(results[\'f1\'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=None)\n >>> print(results)\n {\'f1\': array([0.8, 0. , 0. ])}\n' __A : str = '\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __UpperCamelCase ( datasets.Metric ): def a__ ( self :Optional[int] ): return datasets.MetricInfo( description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features( { """predictions""": datasets.Sequence(datasets.Value("""int32""" ) ), """references""": datasets.Sequence(datasets.Value("""int32""" ) ), } if self.config_name == """multilabel""" else { """predictions""": datasets.Value("""int32""" ), """references""": datasets.Value("""int32""" ), } ) ,reference_urls=["""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html"""] ,) def a__ ( self :int ,_UpperCamelCase :Optional[int] ,_UpperCamelCase :int ,_UpperCamelCase :Dict=None ,_UpperCamelCase :Union[str, Any]=1 ,_UpperCamelCase :Any="binary" ,_UpperCamelCase :Dict=None ): snake_case_ : int = fa_score( _UpperCamelCase ,_UpperCamelCase ,labels=_UpperCamelCase ,pos_label=_UpperCamelCase ,average=_UpperCamelCase ,sample_weight=_UpperCamelCase ) return {"f1": float(_UpperCamelCase ) if score.size == 1 else score}
357
'''simple docstring''' import functools def UpperCAmelCase ( lowerCamelCase_ :str , lowerCamelCase_ :str ): '''simple docstring''' snake_case_ : List[str] = len(lowerCamelCase_ ) snake_case_ : Dict = len(lowerCamelCase_ ) @functools.cache def min_distance(lowerCamelCase_ :int , lowerCamelCase_ :int ) -> int: # if first word index is overflow - delete all from the second word if indexa >= len_worda: return len_worda - indexa # if second word index is overflow - delete all from the first word if indexa >= len_worda: return len_worda - indexa snake_case_ : Union[str, Any] = int(worda[indexa] != worda[indexa] ) # current letters not identical return min( 1 + min_distance(indexa + 1 , lowerCamelCase_ ) , 1 + min_distance(lowerCamelCase_ , indexa + 1 ) , diff + min_distance(indexa + 1 , indexa + 1 ) , ) return min_distance(0 , 0 ) if __name__ == "__main__": import doctest doctest.testmod()
8
0
'''simple docstring''' import argparse import gc import json import os import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.utils.deepspeed import DummyOptim, DummyScheduler __A : Optional[int] = 16 __A : Optional[Any] = 32 def UpperCAmelCase ( lowerCamelCase_ :Union[str, Any] ): '''simple docstring''' return int(x / 2**20 ) class __UpperCamelCase : def __enter__( self :Optional[int] ): gc.collect() torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() # reset the peak gauge to zero snake_case_ : Optional[Any] = torch.cuda.memory_allocated() return self def __exit__( self :List[Any] ,*_UpperCamelCase :Union[str, Any] ): gc.collect() torch.cuda.empty_cache() snake_case_ : int = torch.cuda.memory_allocated() snake_case_ : Optional[int] = torch.cuda.max_memory_allocated() snake_case_ : Dict = bamb(self.end - self.begin ) snake_case_ : List[str] = bamb(self.peak - self.begin ) # print(f"delta used/peak {self.used:4d}/{self.peaked:4d}") def UpperCAmelCase ( lowerCamelCase_ :Accelerator , lowerCamelCase_ :int = 16 , lowerCamelCase_ :str = "bert-base-cased" , lowerCamelCase_ :int = 3_20 , lowerCamelCase_ :int = 1_60 , ): '''simple docstring''' snake_case_ : List[Any] = AutoTokenizer.from_pretrained(lowerCamelCase_ ) snake_case_ : str = load_dataset( """glue""" , """mrpc""" , split={"""train""": F'''train[:{n_train}]''', """validation""": F'''validation[:{n_val}]'''} ) def tokenize_function(lowerCamelCase_ :Any ): # max_length=None => use the model max length (it's actually the default) snake_case_ : Union[str, Any] = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=lowerCamelCase_ , max_length=lowerCamelCase_ ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset snake_case_ : Optional[Any] = datasets.map( lowerCamelCase_ , batched=lowerCamelCase_ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , load_from_cache_file=lowerCamelCase_ ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library snake_case_ : Union[str, Any] = tokenized_datasets.rename_column("""label""" , """labels""" ) def collate_fn(lowerCamelCase_ :List[Any] ): # On TPU it's best to pad everything to the same length or training will be very slow. if accelerator.distributed_type == DistributedType.TPU: return tokenizer.pad(lowerCamelCase_ , padding="""max_length""" , max_length=1_28 , return_tensors="""pt""" ) return tokenizer.pad(lowerCamelCase_ , padding="""longest""" , return_tensors="""pt""" ) # Instantiate dataloaders. snake_case_ : int = DataLoader( tokenized_datasets["""train"""] , shuffle=lowerCamelCase_ , collate_fn=lowerCamelCase_ , batch_size=lowerCamelCase_ ) snake_case_ : str = DataLoader( tokenized_datasets["""validation"""] , shuffle=lowerCamelCase_ , collate_fn=lowerCamelCase_ , batch_size=lowerCamelCase_ ) return train_dataloader, eval_dataloader def UpperCAmelCase ( lowerCamelCase_ :Dict , lowerCamelCase_ :Any ): '''simple docstring''' snake_case_ : List[Any] = Accelerator() # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs snake_case_ : Any = config["""lr"""] snake_case_ : List[Any] = int(config["""num_epochs"""] ) snake_case_ : str = int(config["""seed"""] ) snake_case_ : Union[str, Any] = int(config["""batch_size"""] ) snake_case_ : Optional[Any] = args.model_name_or_path set_seed(lowerCamelCase_ ) snake_case_ : Union[str, Any] = get_dataloaders(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , args.n_train , args.n_val ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) snake_case_ : Optional[Any] = AutoModelForSequenceClassification.from_pretrained(lowerCamelCase_ , return_dict=lowerCamelCase_ ) # Instantiate optimizer snake_case_ : List[Any] = ( AdamW if accelerator.state.deepspeed_plugin is None or """optimizer""" not in accelerator.state.deepspeed_plugin.deepspeed_config else DummyOptim ) snake_case_ : Any = optimizer_cls(params=model.parameters() , lr=lowerCamelCase_ ) if accelerator.state.deepspeed_plugin is not None: snake_case_ : List[Any] = accelerator.state.deepspeed_plugin.deepspeed_config[ """gradient_accumulation_steps""" ] else: snake_case_ : Optional[int] = 1 snake_case_ : Any = (len(lowerCamelCase_ ) * num_epochs) // gradient_accumulation_steps # Instantiate scheduler if ( accelerator.state.deepspeed_plugin is None or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config ): snake_case_ : List[Any] = get_linear_schedule_with_warmup( optimizer=lowerCamelCase_ , num_warmup_steps=0 , num_training_steps=lowerCamelCase_ , ) else: snake_case_ : List[str] = DummyScheduler(lowerCamelCase_ , total_num_steps=lowerCamelCase_ , warmup_num_steps=0 ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. snake_case_ : Any = accelerator.prepare( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) # We need to keep track of how many total steps we have iterated over snake_case_ : List[Any] = 0 # We also need to keep track of the stating epoch so files are named properly snake_case_ : List[str] = 0 # Now we train the model snake_case_ : Tuple = {} for epoch in range(lowerCamelCase_ , lowerCamelCase_ ): with TorchTracemalloc() as tracemalloc: model.train() for step, batch in enumerate(lowerCamelCase_ ): snake_case_ : Any = model(**lowerCamelCase_ ) snake_case_ : List[str] = outputs.loss snake_case_ : Union[str, Any] = loss / gradient_accumulation_steps accelerator.backward(lowerCamelCase_ ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() overall_step += 1 # Printing the GPU memory usage details such as allocated memory, peak memory, and total memory usage accelerator.print("""Memory before entering the train : {}""".format(bamb(tracemalloc.begin ) ) ) accelerator.print("""Memory consumed at the end of the train (end-begin): {}""".format(tracemalloc.used ) ) accelerator.print("""Peak Memory consumed during the train (max-begin): {}""".format(tracemalloc.peaked ) ) accelerator.print( """Total Peak Memory consumed during the train (max): {}""".format( tracemalloc.peaked + bamb(tracemalloc.begin ) ) ) snake_case_ : Optional[Any] = tracemalloc.peaked + bamb(tracemalloc.begin ) if args.peak_memory_upper_bound is not None: assert ( train_total_peak_memory[F'''epoch-{epoch}'''] <= args.peak_memory_upper_bound ), "Peak memory usage exceeded the upper bound" accelerator.wait_for_everyone() if accelerator.is_main_process: with open(os.path.join(args.output_dir , """peak_memory_utilization.json""" ) , """w""" ) as f: json.dump(lowerCamelCase_ , lowerCamelCase_ ) def UpperCAmelCase ( ): '''simple docstring''' snake_case_ : List[Any] = argparse.ArgumentParser(description="""Simple example of training script tracking peak GPU memory usage.""" ) parser.add_argument( """--model_name_or_path""" , type=lowerCamelCase_ , default="""bert-base-cased""" , help="""Path to pretrained model or model identifier from huggingface.co/models.""" , required=lowerCamelCase_ , ) parser.add_argument( """--output_dir""" , type=lowerCamelCase_ , default=""".""" , help="""Optional save directory where all checkpoint folders will be stored. Default is the current working directory.""" , ) parser.add_argument( """--peak_memory_upper_bound""" , type=lowerCamelCase_ , default=lowerCamelCase_ , help="""The upper bound of peak memory usage in MB. If set, the training will throw an error if the peak memory usage exceeds this value.""" , ) parser.add_argument( """--n_train""" , type=lowerCamelCase_ , default=3_20 , help="""Number of training examples to use.""" , ) parser.add_argument( """--n_val""" , type=lowerCamelCase_ , default=1_60 , help="""Number of validation examples to use.""" , ) parser.add_argument( """--num_epochs""" , type=lowerCamelCase_ , default=1 , help="""Number of train epochs.""" , ) snake_case_ : Any = parser.parse_args() snake_case_ : Optional[Any] = {"""lr""": 2E-5, """num_epochs""": args.num_epochs, """seed""": 42, """batch_size""": 16} training_function(lowerCamelCase_ , lowerCamelCase_ ) if __name__ == "__main__": main()
358
'''simple docstring''' import os import textwrap import pyarrow as pa import pytest from datasets import ClassLabel, Features, Image from datasets.packaged_modules.csv.csv import Csv from ..utils import require_pil @pytest.fixture def UpperCAmelCase ( lowerCamelCase_ :str ): '''simple docstring''' snake_case_ : Any = tmp_path / """file.csv""" snake_case_ : Any = textwrap.dedent( """\ header1,header2 1,2 10,20 """ ) with open(lowerCamelCase_ , """w""" ) as f: f.write(lowerCamelCase_ ) return str(lowerCamelCase_ ) @pytest.fixture def UpperCAmelCase ( lowerCamelCase_ :str ): '''simple docstring''' snake_case_ : Optional[int] = tmp_path / """malformed_file.csv""" snake_case_ : int = textwrap.dedent( """\ header1,header2 1,2 10,20, """ ) with open(lowerCamelCase_ , """w""" ) as f: f.write(lowerCamelCase_ ) return str(lowerCamelCase_ ) @pytest.fixture def UpperCAmelCase ( lowerCamelCase_ :Optional[Any] , lowerCamelCase_ :int ): '''simple docstring''' snake_case_ : str = tmp_path / """csv_with_image.csv""" snake_case_ : int = textwrap.dedent( F'''\ image {image_file} ''' ) with open(lowerCamelCase_ , """w""" ) as f: f.write(lowerCamelCase_ ) return str(lowerCamelCase_ ) @pytest.fixture def UpperCAmelCase ( lowerCamelCase_ :Any ): '''simple docstring''' snake_case_ : int = tmp_path / """csv_with_label.csv""" snake_case_ : Tuple = textwrap.dedent( """\ label good bad good """ ) with open(lowerCamelCase_ , """w""" ) as f: f.write(lowerCamelCase_ ) return str(lowerCamelCase_ ) @pytest.fixture def UpperCAmelCase ( lowerCamelCase_ :Union[str, Any] ): '''simple docstring''' snake_case_ : List[str] = tmp_path / """csv_with_int_list.csv""" snake_case_ : str = textwrap.dedent( """\ int_list 1 2 3 4 5 6 7 8 9 """ ) with open(lowerCamelCase_ , """w""" ) as f: f.write(lowerCamelCase_ ) return str(lowerCamelCase_ ) def UpperCAmelCase ( lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :int , lowerCamelCase_ :Tuple ): '''simple docstring''' snake_case_ : int = Csv() snake_case_ : Optional[Any] = csv._generate_tables([[csv_file, malformed_csv_file]] ) with pytest.raises(lowerCamelCase_ , match="""Error tokenizing data""" ): for _ in generator: pass assert any( record.levelname == """ERROR""" and """Failed to read file""" in record.message and os.path.basename(lowerCamelCase_ ) in record.message for record in caplog.records ) @require_pil def UpperCAmelCase ( lowerCamelCase_ :Tuple ): '''simple docstring''' with open(lowerCamelCase_ , encoding="""utf-8""" ) as f: snake_case_ : Tuple = f.read().splitlines()[1] snake_case_ : str = Csv(encoding="""utf-8""" , features=Features({"""image""": Image()} ) ) snake_case_ : Tuple = csv._generate_tables([[csv_file_with_image]] ) snake_case_ : Optional[Any] = pa.concat_tables([table for _, table in generator] ) assert pa_table.schema.field("""image""" ).type == Image()() snake_case_ : List[str] = pa_table.to_pydict()["""image"""] assert generated_content == [{"path": image_file, "bytes": None}] def UpperCAmelCase ( lowerCamelCase_ :int ): '''simple docstring''' with open(lowerCamelCase_ , encoding="""utf-8""" ) as f: snake_case_ : List[Any] = f.read().splitlines()[1:] snake_case_ : Union[str, Any] = Csv(encoding="""utf-8""" , features=Features({"""label""": ClassLabel(names=["""good""", """bad"""] )} ) ) snake_case_ : Optional[Any] = csv._generate_tables([[csv_file_with_label]] ) snake_case_ : Optional[int] = pa.concat_tables([table for _, table in generator] ) assert pa_table.schema.field("""label""" ).type == ClassLabel(names=["""good""", """bad"""] )() snake_case_ : Union[str, Any] = pa_table.to_pydict()["""label"""] assert generated_content == [ClassLabel(names=["""good""", """bad"""] ).straint(lowerCamelCase_ ) for label in labels] def UpperCAmelCase ( lowerCamelCase_ :Union[str, Any] ): '''simple docstring''' snake_case_ : str = Csv(encoding="""utf-8""" , sep=""",""" , converters={"""int_list""": lambda lowerCamelCase_ : [int(lowerCamelCase_ ) for i in x.split()]} ) snake_case_ : Optional[Any] = csv._generate_tables([[csv_file_with_int_list]] ) snake_case_ : Tuple = pa.concat_tables([table for _, table in generator] ) assert pa.types.is_list(pa_table.schema.field("""int_list""" ).type ) snake_case_ : Dict = pa_table.to_pydict()["""int_list"""] assert generated_content == [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
8
0
import warnings from typing import Dict, List, Optional, Tuple from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging __A : Optional[Any] = logging.get_logger(__name__) class __UpperCamelCase ( lowercase__ ): lowercase : Optional[int] = ['input_ids', 'attention_mask'] def __init__( self :List[Any] ,_UpperCamelCase :Tuple="</s>" ,_UpperCamelCase :str="<unk>" ,_UpperCamelCase :str="<pad>" ,_UpperCamelCase :List[str]=1_2_5 ,_UpperCamelCase :List[str]=None ,**_UpperCamelCase :str ,): # Add extra_ids to the special token list if extra_ids > 0 and additional_special_tokens is None: snake_case_ : List[str] = [F'''<extra_id_{i}>''' for i in range(_UpperCamelCase )] elif extra_ids > 0 and additional_special_tokens is not None: # Check that we have the right number of extra_id special tokens snake_case_ : Optional[int] = len(set(filter(lambda _UpperCamelCase : bool("""extra_id""" in str(_UpperCamelCase ) ) ,_UpperCamelCase ) ) ) if extra_tokens != extra_ids: raise ValueError( F'''Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are''' """ provided to ByT5Tokenizer. In this case the additional_special_tokens must include the""" """ extra_ids tokens""" ) snake_case_ : List[str] = AddedToken(_UpperCamelCase ,lstrip=_UpperCamelCase ,rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase ,_UpperCamelCase ) else pad_token snake_case_ : Dict = AddedToken(_UpperCamelCase ,lstrip=_UpperCamelCase ,rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase ,_UpperCamelCase ) else eos_token snake_case_ : Optional[Any] = AddedToken(_UpperCamelCase ,lstrip=_UpperCamelCase ,rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase ,_UpperCamelCase ) else unk_token super().__init__( eos_token=_UpperCamelCase ,unk_token=_UpperCamelCase ,pad_token=_UpperCamelCase ,extra_ids=_UpperCamelCase ,additional_special_tokens=_UpperCamelCase ,**_UpperCamelCase ,) snake_case_ : Any = extra_ids snake_case_ : str = 2**8 # utf is 8 bits # define special tokens dict snake_case_ : Dict[int, str] = { self.pad_token: 0, self.eos_token: 1, self.unk_token: 2, } snake_case_ : List[Any] = len(self.special_tokens_encoder ) snake_case_ : List[Any] = len(_UpperCamelCase ) for i, token in enumerate(_UpperCamelCase ): snake_case_ : Optional[int] = self.vocab_size + i - n snake_case_ : Dict[str, int] = {v: k for k, v in self.special_tokens_encoder.items()} @property def a__ ( self :Union[str, Any] ): return self._utf_vocab_size + self._num_special_tokens + self._extra_ids def a__ ( self :int ,_UpperCamelCase :List[int] ,_UpperCamelCase :Optional[List[int]] = None ,_UpperCamelCase :bool = False ): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_UpperCamelCase ,token_ids_a=_UpperCamelCase ,already_has_special_tokens=_UpperCamelCase ) # normal case: some special tokens if token_ids_a is None: return ([0] * len(_UpperCamelCase )) + [1] return ([0] * len(_UpperCamelCase )) + [1] + ([0] * len(_UpperCamelCase )) + [1] def a__ ( self :int ,_UpperCamelCase :List[int] ): if len(_UpperCamelCase ) > 0 and token_ids[-1] == self.eos_token_id: warnings.warn( F'''This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated''' """ eos tokens being added.""" ) return token_ids else: return token_ids + [self.eos_token_id] def a__ ( self :Optional[Any] ,_UpperCamelCase :List[int] ,_UpperCamelCase :Optional[List[int]] = None ): snake_case_ : Union[str, Any] = [self.eos_token_id] if token_ids_a is None: return len(token_ids_a + eos ) * [0] return len(token_ids_a + eos + token_ids_a + eos ) * [0] def a__ ( self :Optional[int] ,_UpperCamelCase :List[int] ,_UpperCamelCase :Optional[List[int]] = None ): snake_case_ : List[str] = self._add_eos_if_not_present(_UpperCamelCase ) if token_ids_a is None: return token_ids_a else: snake_case_ : Union[str, Any] = self._add_eos_if_not_present(_UpperCamelCase ) return token_ids_a + token_ids_a def a__ ( self :Optional[Any] ,_UpperCamelCase :str ): snake_case_ : List[str] = [chr(_UpperCamelCase ) for i in text.encode("""utf-8""" )] return tokens def a__ ( self :List[str] ,_UpperCamelCase :str ): if token in self.special_tokens_encoder: snake_case_ : List[str] = self.special_tokens_encoder[token] elif token in self.added_tokens_encoder: snake_case_ : Optional[Any] = self.added_tokens_encoder[token] elif len(_UpperCamelCase ) != 1: snake_case_ : Any = self.unk_token_id else: snake_case_ : int = ord(_UpperCamelCase ) + self._num_special_tokens return token_id def a__ ( self :List[str] ,_UpperCamelCase :List[Any] ): if index in self.special_tokens_decoder: snake_case_ : str = self.special_tokens_decoder[index] else: snake_case_ : Tuple = chr(index - self._num_special_tokens ) return token def a__ ( self :List[Any] ,_UpperCamelCase :Optional[int] ): snake_case_ : Optional[int] = B"""""" for token in tokens: if token in self.special_tokens_decoder: snake_case_ : Optional[Any] = self.special_tokens_decoder[token].encode("""utf-8""" ) elif token in self.added_tokens_decoder: snake_case_ : Dict = self.special_tokens_decoder[token].encode("""utf-8""" ) elif token in self.special_tokens_encoder: snake_case_ : List[Any] = token.encode("""utf-8""" ) elif token in self.added_tokens_encoder: snake_case_ : Optional[int] = token.encode("""utf-8""" ) else: snake_case_ : List[Any] = bytes([ord(_UpperCamelCase )] ) bstring += tok_string snake_case_ : Union[str, Any] = bstring.decode("""utf-8""" ,errors="""ignore""" ) return string def a__ ( self :Dict ,_UpperCamelCase :str ,_UpperCamelCase :Optional[str] = None ): return ()
359
'''simple docstring''' import argparse import pickle import numpy as np import torch from torch import nn from transformers import ReformerConfig, ReformerModelWithLMHead from transformers.utils import logging logging.set_verbosity_info() def UpperCAmelCase ( lowerCamelCase_ :Dict , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :Tuple=None ): '''simple docstring''' # set parameter of one layer assert torch_layer.weight.shape == weight.shape, F'''{torch_layer} layer.weight does not match''' snake_case_ : Optional[Any] = nn.Parameter(lowerCamelCase_ ) if bias is not None: assert torch_layer.bias.shape == bias.shape, F'''{torch_layer} layer.bias does not match''' snake_case_ : List[str] = nn.Parameter(lowerCamelCase_ ) def UpperCAmelCase ( lowerCamelCase_ :str , lowerCamelCase_ :Dict , lowerCamelCase_ :List[Any] ): '''simple docstring''' # set torch weights for 1-to-1 comparison snake_case_ : Optional[Any] = np.asarray(weights[0] ) snake_case_ : int = np.asarray(weights[1] ) snake_case_ : Any = np.asarray(weights[2] ) set_param( torch_layer.self_attention.query_key , torch.tensor(lowerCamelCase_ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCamelCase_ ) , ) set_param( torch_layer.self_attention.value , torch.tensor(lowerCamelCase_ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCamelCase_ ) , ) set_param( torch_layer.output.dense , torch.tensor(lowerCamelCase_ ).view(-1 , lowerCamelCase_ ).contiguous().transpose(0 , 1 ) , ) def UpperCAmelCase ( lowerCamelCase_ :List[str] , lowerCamelCase_ :Optional[Any] , lowerCamelCase_ :Optional[Any] ): '''simple docstring''' # set torch weights for 1-to-1 comparison snake_case_ : List[Any] = np.asarray(weights[0] ) snake_case_ : Optional[int] = np.asarray(weights[1] ) snake_case_ : Union[str, Any] = np.asarray(weights[2] ) snake_case_ : int = np.asarray(weights[3] ) set_param( torch_layer.self_attention.query , torch.tensor(lowerCamelCase_ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCamelCase_ ) , ) set_param( torch_layer.self_attention.key , torch.tensor(lowerCamelCase_ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCamelCase_ ) , ) set_param( torch_layer.self_attention.value , torch.tensor(lowerCamelCase_ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCamelCase_ ) , ) set_param( torch_layer.output.dense , torch.tensor(lowerCamelCase_ ).view(-1 , lowerCamelCase_ ).contiguous().transpose(0 , 1 ) , ) def UpperCAmelCase ( lowerCamelCase_ :Any , lowerCamelCase_ :List[str] , lowerCamelCase_ :Optional[int] ): '''simple docstring''' # layernorm 1 snake_case_ : str = weights[0][0][0] snake_case_ : int = np.asarray(layer_norm_a[0] ) snake_case_ : Optional[Any] = np.asarray(layer_norm_a[1] ) set_param( torch_block.attention.layer_norm , torch.tensor(lowerCamelCase_ ) , torch.tensor(lowerCamelCase_ ) , ) # lsh weights + output snake_case_ : Tuple = weights[0][1] if len(lowerCamelCase_ ) < 4: set_layer_weights_in_torch_lsh(lowerCamelCase_ , torch_block.attention , lowerCamelCase_ ) else: set_layer_weights_in_torch_local(lowerCamelCase_ , torch_block.attention , lowerCamelCase_ ) # intermediate weighs snake_case_ : str = weights[2][0][1][2] # Chunked Feed Forward if len(lowerCamelCase_ ) == 4: snake_case_ : List[Any] = intermediate_weights[2] # layernorm 2 snake_case_ : Tuple = np.asarray(intermediate_weights[0][0] ) snake_case_ : Optional[Any] = np.asarray(intermediate_weights[0][1] ) set_param( torch_block.feed_forward.layer_norm , torch.tensor(lowerCamelCase_ ) , torch.tensor(lowerCamelCase_ ) , ) # intermediate dense snake_case_ : Any = np.asarray(intermediate_weights[1][0] ) snake_case_ : List[Any] = np.asarray(intermediate_weights[1][1] ) set_param( torch_block.feed_forward.dense.dense , torch.tensor(lowerCamelCase_ ).transpose(0 , 1 ).contiguous() , torch.tensor(lowerCamelCase_ ) , ) # intermediate out snake_case_ : List[Any] = np.asarray(intermediate_weights[4][0] ) snake_case_ : Union[str, Any] = np.asarray(intermediate_weights[4][1] ) set_param( torch_block.feed_forward.output.dense , torch.tensor(lowerCamelCase_ ).transpose(0 , 1 ).contiguous() , torch.tensor(lowerCamelCase_ ) , ) def UpperCAmelCase ( lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :str , lowerCamelCase_ :Any ): '''simple docstring''' # reformer model snake_case_ : Dict = torch_model.reformer # word embeds snake_case_ : List[Any] = np.asarray(weights[1] ) set_param( torch_model_reformer.embeddings.word_embeddings , torch.tensor(lowerCamelCase_ ) , ) if isinstance(weights[3] , lowerCamelCase_ ): snake_case_ : Tuple = torch_model_reformer.embeddings.position_embeddings for emb_idx in range(len(position_embeddings.weights ) ): snake_case_ : Dict = np.asarray(weights[3][emb_idx][0] ) assert ( position_embeddings.weights[emb_idx].shape == emb_weights.shape ), F'''{position_embeddings[emb_idx]} emb does not match''' snake_case_ : Optional[Any] = nn.Parameter(torch.tensor(lowerCamelCase_ ) ) snake_case_ : List[Any] = weights[5] assert len(torch_model_reformer.encoder.layers ) * 4 == len( lowerCamelCase_ ), "HF and trax model do not have the same number of layers" for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ): snake_case_ : str = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)] set_block_weights_in_torch(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) # output layer norm snake_case_ : Optional[Any] = np.asarray(weights[7][0] ) snake_case_ : List[Any] = np.asarray(weights[7][1] ) set_param( torch_model_reformer.encoder.layer_norm , torch.tensor(lowerCamelCase_ ) , torch.tensor(lowerCamelCase_ ) , ) # output embeddings snake_case_ : Optional[int] = np.asarray(weights[9][0] ) snake_case_ : Any = np.asarray(weights[9][1] ) set_param( torch_model.lm_head.decoder , torch.tensor(lowerCamelCase_ ).transpose(0 , 1 ).contiguous() , torch.tensor(lowerCamelCase_ ) , ) def UpperCAmelCase ( lowerCamelCase_ :Any , lowerCamelCase_ :Dict , lowerCamelCase_ :List[Any] ): '''simple docstring''' # Initialise PyTorch model snake_case_ : List[str] = ReformerConfig.from_json_file(lowerCamelCase_ ) print(F'''Building PyTorch model from configuration: {config}''' ) snake_case_ : str = ReformerModelWithLMHead(lowerCamelCase_ ) with open(lowerCamelCase_ , """rb""" ) as f: snake_case_ : List[Any] = pickle.load(lowerCamelCase_ )["""weights"""] set_model_weights_in_torch(lowerCamelCase_ , lowerCamelCase_ , config.hidden_size ) # Save pytorch-model print(F'''Save PyTorch model to {pytorch_dump_path}''' ) torch.save(model.state_dict() , lowerCamelCase_ ) if __name__ == "__main__": __A : List[str] = argparse.ArgumentParser() # Required parameters parser.add_argument( '--trax_model_pkl_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.' ) parser.add_argument( '--config_file', default=None, type=str, required=True, help=( 'The config json file corresponding to the pre-trained Reformer model. \n' 'This specifies the model architecture.' ), ) parser.add_argument( '--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) __A : List[Any] = parser.parse_args() convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
8
0
'''simple docstring''' import unittest from transformers import is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device if is_torch_available(): from transformers import AutoModelForSeqaSeqLM, AutoTokenizer @require_torch @require_sentencepiece @require_tokenizers class __UpperCamelCase ( unittest.TestCase ): @slow def a__ ( self :Optional[Any] ): snake_case_ : List[Any] = AutoModelForSeqaSeqLM.from_pretrained("""google/mt5-small""" ,return_dict=_UpperCamelCase ).to(_UpperCamelCase ) snake_case_ : List[str] = AutoTokenizer.from_pretrained("""google/mt5-small""" ) snake_case_ : Any = tokenizer("""Hello there""" ,return_tensors="""pt""" ).input_ids snake_case_ : Dict = tokenizer("""Hi I am""" ,return_tensors="""pt""" ).input_ids snake_case_ : Optional[Any] = model(input_ids.to(_UpperCamelCase ) ,labels=labels.to(_UpperCamelCase ) ).loss snake_case_ : Any = -(labels.shape[-1] * loss.item()) snake_case_ : Dict = -84.91_27 self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 )
360
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging __A : List[Any] = logging.get_logger(__name__) __A : str = { 'google/canine-s': 'https://huggingface.co/google/canine-s/resolve/main/config.json', # See all CANINE models at https://huggingface.co/models?filter=canine } class __UpperCamelCase ( lowercase__ ): lowercase : List[Any] = 'canine' def __init__( self :Optional[int] ,_UpperCamelCase :Dict=7_6_8 ,_UpperCamelCase :Union[str, Any]=1_2 ,_UpperCamelCase :int=1_2 ,_UpperCamelCase :int=3_0_7_2 ,_UpperCamelCase :int="gelu" ,_UpperCamelCase :Any=0.1 ,_UpperCamelCase :int=0.1 ,_UpperCamelCase :Any=1_6_3_8_4 ,_UpperCamelCase :Tuple=1_6 ,_UpperCamelCase :List[str]=0.02 ,_UpperCamelCase :Any=1E-1_2 ,_UpperCamelCase :Tuple=0 ,_UpperCamelCase :List[str]=0xE_0_0_0 ,_UpperCamelCase :Optional[Any]=0xE_0_0_1 ,_UpperCamelCase :str=4 ,_UpperCamelCase :Optional[int]=4 ,_UpperCamelCase :str=8 ,_UpperCamelCase :int=1_6_3_8_4 ,_UpperCamelCase :int=1_2_8 ,**_UpperCamelCase :str ,): super().__init__(pad_token_id=_UpperCamelCase ,bos_token_id=_UpperCamelCase ,eos_token_id=_UpperCamelCase ,**_UpperCamelCase ) snake_case_ : List[str] = max_position_embeddings snake_case_ : Union[str, Any] = hidden_size snake_case_ : Dict = num_hidden_layers snake_case_ : Optional[int] = num_attention_heads snake_case_ : Tuple = intermediate_size snake_case_ : str = hidden_act snake_case_ : Union[str, Any] = hidden_dropout_prob snake_case_ : Dict = attention_probs_dropout_prob snake_case_ : Optional[Any] = initializer_range snake_case_ : Optional[int] = type_vocab_size snake_case_ : List[str] = layer_norm_eps # Character config: snake_case_ : Any = downsampling_rate snake_case_ : List[str] = upsampling_kernel_size snake_case_ : int = num_hash_functions snake_case_ : Tuple = num_hash_buckets snake_case_ : Tuple = local_transformer_stride
8
0
'''simple docstring''' import os import unittest from transformers.models.cpmant.tokenization_cpmant import VOCAB_FILES_NAMES, CpmAntTokenizer from transformers.testing_utils import require_jieba, tooslow from ...test_tokenization_common import TokenizerTesterMixin @require_jieba class __UpperCamelCase ( lowercase__ , unittest.TestCase ): lowercase : Any = CpmAntTokenizer lowercase : Tuple = False def a__ ( self :Optional[int] ): super().setUp() snake_case_ : List[Any] = [ """<d>""", """</d>""", """<s>""", """</s>""", """</_>""", """<unk>""", """<pad>""", """</n>""", """我""", """是""", """C""", """P""", """M""", """A""", """n""", """t""", ] snake_case_ : List[Any] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""vocab_file"""] ) with open(self.vocab_file ,"""w""" ,encoding="""utf-8""" ) as vocab_writer: vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) ) @tooslow def a__ ( self :str ): snake_case_ : Dict = CpmAntTokenizer.from_pretrained("""openbmb/cpm-ant-10b""" ) snake_case_ : Optional[int] = """今天天气真好!""" snake_case_ : Optional[int] = ["""今天""", """天气""", """真""", """好""", """!"""] snake_case_ : Optional[Any] = tokenizer.tokenize(_UpperCamelCase ) self.assertListEqual(_UpperCamelCase ,_UpperCamelCase ) snake_case_ : Optional[Any] = """今天天气真好!""" snake_case_ : Any = [tokenizer.bos_token] + tokens snake_case_ : Dict = [6, 9_8_0_2, 1_4_9_6_2, 2_0_8_2, 8_3_1, 2_4_4] self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCamelCase ) ,_UpperCamelCase ) snake_case_ : Optional[Any] = tokenizer.decode(_UpperCamelCase ) self.assertEqual(_UpperCamelCase ,_UpperCamelCase )
361
'''simple docstring''' from typing import List, Optional from tokenizers import ByteLevelBPETokenizer from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_blenderbot_small import BlenderbotSmallTokenizer __A : Tuple = logging.get_logger(__name__) __A : List[Any] = { 'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_config_file': 'tokenizer_config.json', } __A : str = { 'vocab_file': { 'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json' }, 'merges_file': { 'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt' }, 'tokenizer_config_file': { 'facebook/blenderbot_small-90M': ( 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json' ) }, } __A : Optional[Any] = { 'facebook/blenderbot_small-90M': 512, } class __UpperCamelCase ( lowercase__ ): lowercase : str = VOCAB_FILES_NAMES lowercase : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP lowercase : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase : Dict = BlenderbotSmallTokenizer def __init__( self :str ,_UpperCamelCase :Optional[int]=None ,_UpperCamelCase :Union[str, Any]=None ,_UpperCamelCase :Tuple="<|endoftext|>" ,_UpperCamelCase :int="<|endoftext|>" ,_UpperCamelCase :Dict="<|endoftext|>" ,_UpperCamelCase :Optional[Any]=False ,_UpperCamelCase :List[Any]=True ,**_UpperCamelCase :Any ,): super().__init__( ByteLevelBPETokenizer( vocab=_UpperCamelCase ,merges=_UpperCamelCase ,add_prefix_space=_UpperCamelCase ,trim_offsets=_UpperCamelCase ,) ,bos_token=_UpperCamelCase ,eos_token=_UpperCamelCase ,unk_token=_UpperCamelCase ,**_UpperCamelCase ,) snake_case_ : Any = add_prefix_space def a__ ( self :Optional[Any] ,_UpperCamelCase :int ,_UpperCamelCase :Optional[Any]=None ): snake_case_ : List[Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def a__ ( self :int ,_UpperCamelCase :List[int] ,_UpperCamelCase :Optional[List[int]] = None ): snake_case_ : int = [self.sep_token_id] snake_case_ : Tuple = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
8
0
'''simple docstring''' import functools def UpperCAmelCase ( lowerCamelCase_ :str , lowerCamelCase_ :str ): '''simple docstring''' snake_case_ : List[str] = len(lowerCamelCase_ ) snake_case_ : Dict = len(lowerCamelCase_ ) @functools.cache def min_distance(lowerCamelCase_ :int , lowerCamelCase_ :int ) -> int: # if first word index is overflow - delete all from the second word if indexa >= len_worda: return len_worda - indexa # if second word index is overflow - delete all from the first word if indexa >= len_worda: return len_worda - indexa snake_case_ : Union[str, Any] = int(worda[indexa] != worda[indexa] ) # current letters not identical return min( 1 + min_distance(indexa + 1 , lowerCamelCase_ ) , 1 + min_distance(lowerCamelCase_ , indexa + 1 ) , diff + min_distance(indexa + 1 , indexa + 1 ) , ) return min_distance(0 , 0 ) if __name__ == "__main__": import doctest doctest.testmod()
362
'''simple docstring''' def UpperCAmelCase ( lowerCamelCase_ :list ): '''simple docstring''' if len(lowerCamelCase_ ) <= 1: return lst snake_case_ : Union[str, Any] = 1 while i < len(lowerCamelCase_ ): if lst[i - 1] <= lst[i]: i += 1 else: snake_case_ , snake_case_ : Union[str, Any] = lst[i], lst[i - 1] i -= 1 if i == 0: snake_case_ : int = 1 return lst if __name__ == "__main__": __A : Optional[int] = input('Enter numbers separated by a comma:\n').strip() __A : int = [int(item) for item in user_input.split(',')] print(gnome_sort(unsorted))
8
0
'''simple docstring''' import json import os import torch from diffusers import UNetaDModel os.makedirs('hub/hopper-medium-v2/unet/hor32', exist_ok=True) os.makedirs('hub/hopper-medium-v2/unet/hor128', exist_ok=True) os.makedirs('hub/hopper-medium-v2/value_function', exist_ok=True) def UpperCAmelCase ( lowerCamelCase_ :int ): '''simple docstring''' if hor == 1_28: snake_case_ : Tuple = ("""DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""") snake_case_ : Tuple = (32, 1_28, 2_56) snake_case_ : Dict = ("""UpResnetBlock1D""", """UpResnetBlock1D""") elif hor == 32: snake_case_ : str = ("""DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""") snake_case_ : Union[str, Any] = (32, 64, 1_28, 2_56) snake_case_ : str = ("""UpResnetBlock1D""", """UpResnetBlock1D""", """UpResnetBlock1D""") snake_case_ : Dict = torch.load(F'''/Users/bglickenhaus/Documents/diffuser/temporal_unet-hopper-mediumv2-hor{hor}.torch''' ) snake_case_ : str = model.state_dict() snake_case_ : int = { """down_block_types""": down_block_types, """block_out_channels""": block_out_channels, """up_block_types""": up_block_types, """layers_per_block""": 1, """use_timestep_embedding""": True, """out_block_type""": """OutConv1DBlock""", """norm_num_groups""": 8, """downsample_each_block""": False, """in_channels""": 14, """out_channels""": 14, """extra_in_channels""": 0, """time_embedding_type""": """positional""", """flip_sin_to_cos""": False, """freq_shift""": 1, """sample_size""": 6_55_36, """mid_block_type""": """MidResTemporalBlock1D""", """act_fn""": """mish""", } snake_case_ : Optional[Any] = UNetaDModel(**lowerCamelCase_ ) print(F'''length of state dict: {len(state_dict.keys() )}''' ) print(F'''length of value function dict: {len(hf_value_function.state_dict().keys() )}''' ) snake_case_ : str = dict(zip(model.state_dict().keys() , hf_value_function.state_dict().keys() ) ) for k, v in mapping.items(): snake_case_ : List[Any] = state_dict.pop(lowerCamelCase_ ) hf_value_function.load_state_dict(lowerCamelCase_ ) torch.save(hf_value_function.state_dict() , F'''hub/hopper-medium-v2/unet/hor{hor}/diffusion_pytorch_model.bin''' ) with open(F'''hub/hopper-medium-v2/unet/hor{hor}/config.json''' , """w""" ) as f: json.dump(lowerCamelCase_ , lowerCamelCase_ ) def UpperCAmelCase ( ): '''simple docstring''' snake_case_ : str = { """in_channels""": 14, """down_block_types""": ("""DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D"""), """up_block_types""": (), """out_block_type""": """ValueFunction""", """mid_block_type""": """ValueFunctionMidBlock1D""", """block_out_channels""": (32, 64, 1_28, 2_56), """layers_per_block""": 1, """downsample_each_block""": True, """sample_size""": 6_55_36, """out_channels""": 14, """extra_in_channels""": 0, """time_embedding_type""": """positional""", """use_timestep_embedding""": True, """flip_sin_to_cos""": False, """freq_shift""": 1, """norm_num_groups""": 8, """act_fn""": """mish""", } snake_case_ : int = torch.load("""/Users/bglickenhaus/Documents/diffuser/value_function-hopper-mediumv2-hor32.torch""" ) snake_case_ : int = model snake_case_ : Dict = UNetaDModel(**lowerCamelCase_ ) print(F'''length of state dict: {len(state_dict.keys() )}''' ) print(F'''length of value function dict: {len(hf_value_function.state_dict().keys() )}''' ) snake_case_ : Optional[int] = dict(zip(state_dict.keys() , hf_value_function.state_dict().keys() ) ) for k, v in mapping.items(): snake_case_ : List[str] = state_dict.pop(lowerCamelCase_ ) hf_value_function.load_state_dict(lowerCamelCase_ ) torch.save(hf_value_function.state_dict() , """hub/hopper-medium-v2/value_function/diffusion_pytorch_model.bin""" ) with open("""hub/hopper-medium-v2/value_function/config.json""" , """w""" ) as f: json.dump(lowerCamelCase_ , lowerCamelCase_ ) if __name__ == "__main__": unet(32) # unet(128) value_function()
363
'''simple docstring''' from __future__ import annotations import unittest import numpy as np from transformers import BlipTextConfig from transformers.testing_utils import require_tf, slow from transformers.utils import is_tf_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask if is_tf_available(): import tensorflow as tf from transformers import TFBlipTextModel from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST class __UpperCamelCase : def __init__( self :Any ,_UpperCamelCase :Optional[Any] ,_UpperCamelCase :Optional[int]=1_2 ,_UpperCamelCase :Optional[Any]=7 ,_UpperCamelCase :Optional[int]=True ,_UpperCamelCase :Union[str, Any]=True ,_UpperCamelCase :Dict=True ,_UpperCamelCase :Optional[int]=9_9 ,_UpperCamelCase :Dict=3_2 ,_UpperCamelCase :Union[str, Any]=3_2 ,_UpperCamelCase :Union[str, Any]=2 ,_UpperCamelCase :Optional[Any]=4 ,_UpperCamelCase :List[Any]=3_7 ,_UpperCamelCase :Tuple=0.1 ,_UpperCamelCase :Optional[int]=0.1 ,_UpperCamelCase :int=5_1_2 ,_UpperCamelCase :Tuple=0.02 ,_UpperCamelCase :Any=0 ,_UpperCamelCase :str=None ,): snake_case_ : str = parent snake_case_ : int = batch_size snake_case_ : Union[str, Any] = seq_length snake_case_ : List[Any] = is_training snake_case_ : Union[str, Any] = use_input_mask snake_case_ : List[str] = use_labels snake_case_ : int = vocab_size snake_case_ : Any = hidden_size snake_case_ : List[Any] = projection_dim snake_case_ : Dict = num_hidden_layers snake_case_ : Dict = num_attention_heads snake_case_ : str = intermediate_size snake_case_ : int = dropout snake_case_ : int = attention_dropout snake_case_ : Dict = max_position_embeddings snake_case_ : Union[str, Any] = initializer_range snake_case_ : Dict = scope snake_case_ : Union[str, Any] = bos_token_id def a__ ( self :Any ): snake_case_ : Any = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size ) snake_case_ : Union[str, Any] = None if self.use_input_mask: snake_case_ : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] ) if input_mask is not None: snake_case_ : int = input_mask.numpy() snake_case_ , snake_case_ : Tuple = input_mask.shape snake_case_ : Any = np.random.randint(1 ,seq_length - 1 ,size=(batch_size,) ) for batch_idx, start_index in enumerate(_UpperCamelCase ): snake_case_ : Optional[int] = 1 snake_case_ : List[str] = 0 snake_case_ : Tuple = self.get_config() return config, input_ids, tf.convert_to_tensor(_UpperCamelCase ) def a__ ( self :str ): return BlipTextConfig( vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,projection_dim=self.projection_dim ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,dropout=self.dropout ,attention_dropout=self.attention_dropout ,max_position_embeddings=self.max_position_embeddings ,initializer_range=self.initializer_range ,bos_token_id=self.bos_token_id ,) def a__ ( self :List[Any] ,_UpperCamelCase :Union[str, Any] ,_UpperCamelCase :Tuple ,_UpperCamelCase :Optional[int] ): snake_case_ : List[str] = TFBlipTextModel(config=_UpperCamelCase ) snake_case_ : List[Any] = model(_UpperCamelCase ,attention_mask=_UpperCamelCase ,training=_UpperCamelCase ) snake_case_ : Any = model(_UpperCamelCase ,training=_UpperCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.hidden_size) ) def a__ ( self :List[str] ): snake_case_ : Union[str, Any] = self.prepare_config_and_inputs() snake_case_ , snake_case_ , snake_case_ : str = config_and_inputs snake_case_ : Dict = {"""input_ids""": input_ids, """attention_mask""": input_mask} return config, inputs_dict @require_tf class __UpperCamelCase ( lowercase__ , unittest.TestCase ): lowercase : Optional[Any] = (TFBlipTextModel,) if is_tf_available() else () lowercase : int = False lowercase : List[Any] = False lowercase : Dict = False def a__ ( self :List[Any] ): snake_case_ : List[str] = BlipTextModelTester(self ) snake_case_ : Tuple = ConfigTester(self ,config_class=_UpperCamelCase ,hidden_size=3_7 ) def a__ ( self :Union[str, Any] ): self.config_tester.run_common_tests() def a__ ( self :Union[str, Any] ): snake_case_ : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_UpperCamelCase ) def a__ ( self :Tuple ): pass def a__ ( self :Tuple ): pass @unittest.skip(reason="""Blip does not use inputs_embeds""" ) def a__ ( self :Any ): pass @unittest.skip(reason="""BlipTextModel has no base class and is not available in MODEL_MAPPING""" ) def a__ ( self :Tuple ): pass @unittest.skip(reason="""BlipTextModel has no base class and is not available in MODEL_MAPPING""" ) def a__ ( self :List[Any] ): pass @slow def a__ ( self :Any ): for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: snake_case_ : Optional[Any] = TFBlipTextModel.from_pretrained(_UpperCamelCase ) self.assertIsNotNone(_UpperCamelCase ) def a__ ( self :Dict ,_UpperCamelCase :Tuple=True ): super().test_pt_tf_model_equivalence(allow_missing_keys=_UpperCamelCase )
8
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) __A : int = {'configuration_xglm': ['XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP', 'XGLMConfig']} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : List[str] = ['XGLMTokenizer'] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : str = ['XGLMTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : Any = [ 'XGLM_PRETRAINED_MODEL_ARCHIVE_LIST', 'XGLMForCausalLM', 'XGLMModel', 'XGLMPreTrainedModel', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : str = [ 'FlaxXGLMForCausalLM', 'FlaxXGLMModel', 'FlaxXGLMPreTrainedModel', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : Union[str, Any] = [ 'TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFXGLMForCausalLM', 'TFXGLMModel', 'TFXGLMPreTrainedModel', ] if TYPE_CHECKING: from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xglm import XGLMTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xglm_fast import XGLMTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_xglm import ( TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFXGLMForCausalLM, TFXGLMModel, TFXGLMPreTrainedModel, ) else: import sys __A : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure)
364
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) __A : int = { 'configuration_whisper': ['WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'WhisperConfig', 'WhisperOnnxConfig'], 'feature_extraction_whisper': ['WhisperFeatureExtractor'], 'processing_whisper': ['WhisperProcessor'], 'tokenization_whisper': ['WhisperTokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : Union[str, Any] = ['WhisperTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : Any = [ 'WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST', 'WhisperForConditionalGeneration', 'WhisperModel', 'WhisperPreTrainedModel', 'WhisperForAudioClassification', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : List[Any] = [ 'TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFWhisperForConditionalGeneration', 'TFWhisperModel', 'TFWhisperPreTrainedModel', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : Union[str, Any] = [ 'FlaxWhisperForConditionalGeneration', 'FlaxWhisperModel', 'FlaxWhisperPreTrainedModel', 'FlaxWhisperForAudioClassification', ] if TYPE_CHECKING: from .configuration_whisper import WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP, WhisperConfig, WhisperOnnxConfig from .feature_extraction_whisper import WhisperFeatureExtractor from .processing_whisper import WhisperProcessor from .tokenization_whisper import WhisperTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_whisper_fast import WhisperTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_whisper import ( WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST, WhisperForAudioClassification, WhisperForConditionalGeneration, WhisperModel, WhisperPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_whisper import ( TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST, TFWhisperForConditionalGeneration, TFWhisperModel, TFWhisperPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_whisper import ( FlaxWhisperForAudioClassification, FlaxWhisperForConditionalGeneration, FlaxWhisperModel, FlaxWhisperPreTrainedModel, ) else: import sys __A : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
8
0
'''simple docstring''' import unittest from transformers import is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_vision, slow, torch_device if is_torch_available(): import torch from transformers import AutoModelForImageClassification if is_vision_available(): from transformers import AutoImageProcessor @require_torch @require_vision class __UpperCamelCase ( unittest.TestCase ): @slow def a__ ( self :Dict ): snake_case_ : Optional[Any] = AutoImageProcessor.from_pretrained("""microsoft/dit-base-finetuned-rvlcdip""" ) snake_case_ : int = AutoModelForImageClassification.from_pretrained("""microsoft/dit-base-finetuned-rvlcdip""" ) model.to(_UpperCamelCase ) from datasets import load_dataset snake_case_ : Union[str, Any] = load_dataset("""nielsr/rvlcdip-demo""" ) snake_case_ : int = dataset["""train"""][0]["""image"""].convert("""RGB""" ) snake_case_ : Any = image_processor(_UpperCamelCase ,return_tensors="""pt""" ).to(_UpperCamelCase ) # forward pass with torch.no_grad(): snake_case_ : List[str] = model(**_UpperCamelCase ) snake_case_ : List[Any] = outputs.logits snake_case_ : Any = torch.Size((1, 1_6) ) self.assertEqual(logits.shape ,_UpperCamelCase ) snake_case_ : List[str] = torch.tensor( [-0.41_58, -0.40_92, -0.43_47] ,device=_UpperCamelCase ,dtype=torch.float ,) self.assertTrue(torch.allclose(logits[0, :3] ,_UpperCamelCase ,atol=1E-4 ) )
365
'''simple docstring''' import warnings from ...utils import logging from .image_processing_mobilevit import MobileViTImageProcessor __A : Optional[int] = logging.get_logger(__name__) class __UpperCamelCase ( lowercase__ ): def __init__( self :List[str] ,*_UpperCamelCase :str ,**_UpperCamelCase :Optional[int] ): warnings.warn( """The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.""" """ Please use MobileViTImageProcessor instead.""" ,_UpperCamelCase ,) super().__init__(*_UpperCamelCase ,**_UpperCamelCase )
8
0
'''simple docstring''' from statistics import mean import numpy as np def UpperCAmelCase ( lowerCamelCase_ :list , lowerCamelCase_ :list , lowerCamelCase_ :list , lowerCamelCase_ :int ): '''simple docstring''' snake_case_ : Optional[Any] = 0 # Number of processes finished snake_case_ : Dict = 0 # Displays the finished process. # If it is 0, the performance is completed if it is 1, before the performance. snake_case_ : List[str] = [0] * no_of_process # List to include calculation results snake_case_ : int = [0] * no_of_process # Sort by arrival time. snake_case_ : Any = [burst_time[i] for i in np.argsort(lowerCamelCase_ )] snake_case_ : Tuple = [process_name[i] for i in np.argsort(lowerCamelCase_ )] arrival_time.sort() while no_of_process > finished_process_count: snake_case_ : Tuple = 0 while finished_process[i] == 1: i += 1 if current_time < arrival_time[i]: snake_case_ : List[str] = arrival_time[i] snake_case_ : Optional[int] = 0 # Index showing the location of the process being performed snake_case_ : Union[str, Any] = 0 # Saves the current response ratio. snake_case_ : Union[str, Any] = 0 for i in range(0 , lowerCamelCase_ ): if finished_process[i] == 0 and arrival_time[i] <= current_time: snake_case_ : Any = (burst_time[i] + (current_time - arrival_time[i])) / burst_time[ i ] if response_ratio < temp: snake_case_ : Tuple = temp snake_case_ : Optional[Any] = i # Calculate the turn around time snake_case_ : List[Any] = current_time + burst_time[loc] - arrival_time[loc] current_time += burst_time[loc] # Indicates that the process has been performed. snake_case_ : Any = 1 # Increase finished_process_count by 1 finished_process_count += 1 return turn_around_time def UpperCAmelCase ( lowerCamelCase_ :list , lowerCamelCase_ :list , lowerCamelCase_ :list , lowerCamelCase_ :int ): '''simple docstring''' snake_case_ : Union[str, Any] = [0] * no_of_process for i in range(0 , lowerCamelCase_ ): snake_case_ : Any = turn_around_time[i] - burst_time[i] return waiting_time if __name__ == "__main__": __A : Tuple = 5 __A : Dict = ['A', 'B', 'C', 'D', 'E'] __A : Dict = [1, 2, 3, 4, 5] __A : Dict = [1, 2, 3, 4, 5] __A : str = calculate_turn_around_time( process_name, arrival_time, burst_time, no_of_process ) __A : int = calculate_waiting_time( process_name, turn_around_time, burst_time, no_of_process ) print('Process name \tArrival time \tBurst time \tTurn around time \tWaiting time') for i in range(0, no_of_process): print( F'{process_name[i]}\t\t{arrival_time[i]}\t\t{burst_time[i]}\t\t' F'{turn_around_time[i]}\t\t\t{waiting_time[i]}' ) print(F'average waiting time : {mean(waiting_time):.5f}') print(F'average turn around time : {mean(turn_around_time):.5f}')
366
'''simple docstring''' import re def UpperCAmelCase ( lowerCamelCase_ :str ): '''simple docstring''' snake_case_ : List[Any] = re.compile( R"""^(?:0|94|\+94|0{2}94)""" R"""7(0|1|2|4|5|6|7|8)""" R"""(-| |)""" R"""\d{7}$""" ) return bool(re.search(lowerCamelCase_ , lowerCamelCase_ ) ) if __name__ == "__main__": __A : int = '0094702343221' print(is_sri_lankan_phone_number(phone))
8
0
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging __A : List[str] = logging.get_logger(__name__) __A : Optional[Any] = { 'microsoft/biogpt': 'https://huggingface.co/microsoft/biogpt/resolve/main/config.json', # See all BioGPT models at https://huggingface.co/models?filter=biogpt } class __UpperCamelCase ( lowercase__ ): lowercase : Optional[Any] = 'biogpt' def __init__( self :List[str] ,_UpperCamelCase :Any=4_2_3_8_4 ,_UpperCamelCase :int=1_0_2_4 ,_UpperCamelCase :List[str]=2_4 ,_UpperCamelCase :Tuple=1_6 ,_UpperCamelCase :str=4_0_9_6 ,_UpperCamelCase :int="gelu" ,_UpperCamelCase :Optional[Any]=0.1 ,_UpperCamelCase :str=0.1 ,_UpperCamelCase :int=1_0_2_4 ,_UpperCamelCase :Tuple=0.02 ,_UpperCamelCase :List[str]=1E-1_2 ,_UpperCamelCase :Optional[Any]=True ,_UpperCamelCase :int=True ,_UpperCamelCase :Optional[Any]=0.0 ,_UpperCamelCase :int=0.0 ,_UpperCamelCase :str=1 ,_UpperCamelCase :Tuple=0 ,_UpperCamelCase :Optional[Any]=2 ,**_UpperCamelCase :Dict ,): snake_case_ : Tuple = vocab_size snake_case_ : Dict = max_position_embeddings snake_case_ : int = hidden_size snake_case_ : List[Any] = num_hidden_layers snake_case_ : Any = num_attention_heads snake_case_ : str = intermediate_size snake_case_ : Optional[int] = hidden_act snake_case_ : Optional[Any] = hidden_dropout_prob snake_case_ : Dict = attention_probs_dropout_prob snake_case_ : List[str] = initializer_range snake_case_ : Optional[Any] = layer_norm_eps snake_case_ : Optional[Any] = scale_embedding snake_case_ : Union[str, Any] = use_cache snake_case_ : List[str] = layerdrop snake_case_ : Tuple = activation_dropout super().__init__(pad_token_id=_UpperCamelCase ,bos_token_id=_UpperCamelCase ,eos_token_id=_UpperCamelCase ,**_UpperCamelCase )
367
'''simple docstring''' from dataclasses import dataclass from enum import Enum from typing import List, Optional, Union import numpy as np import PIL from PIL import Image from ...utils import BaseOutput, is_torch_available, is_transformers_available @dataclass class __UpperCamelCase ( lowercase__ ): lowercase : Union[List[PIL.Image.Image], np.ndarray] lowercase : Optional[List[bool]] if is_transformers_available() and is_torch_available(): from .pipeline_semantic_stable_diffusion import SemanticStableDiffusionPipeline
8
0
'''simple docstring''' import unittest from transformers import is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow if is_torch_available(): import torch from transformers import XLMRobertaModel @require_sentencepiece @require_tokenizers @require_torch class __UpperCamelCase ( unittest.TestCase ): @slow def a__ ( self :Dict ): snake_case_ : Optional[Any] = XLMRobertaModel.from_pretrained("""xlm-roberta-base""" ) snake_case_ : Optional[int] = torch.tensor([[0, 5_8_1, 1_0_2_6_9, 8_3, 9_9_9_4_2, 1_3_6, 6_0_7_4_2, 2_3, 7_0, 8_0_5_8_3, 1_8_2_7_6, 2]] ) # The dog is cute and lives in the garden house snake_case_ : Tuple = torch.Size((1, 1_2, 7_6_8) ) # batch_size, sequence_length, embedding_vector_dim snake_case_ : Dict = torch.tensor( [[-0.01_01, 0.12_18, -0.08_03, 0.08_01, 0.13_27, 0.07_76, -0.12_15, 0.23_83, 0.33_38, 0.31_06, 0.03_00, 0.02_52]] ) # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.eval() # expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1] with torch.no_grad(): snake_case_ : Tuple = model(_UpperCamelCase )["""last_hidden_state"""].detach() self.assertEqual(output.shape ,_UpperCamelCase ) # compare the actual values for a slice of last dim self.assertTrue(torch.allclose(output[:, :, -1] ,_UpperCamelCase ,atol=1E-3 ) ) @slow def a__ ( self :Union[str, Any] ): snake_case_ : List[Any] = XLMRobertaModel.from_pretrained("""xlm-roberta-large""" ) snake_case_ : Dict = torch.tensor([[0, 5_8_1, 1_0_2_6_9, 8_3, 9_9_9_4_2, 1_3_6, 6_0_7_4_2, 2_3, 7_0, 8_0_5_8_3, 1_8_2_7_6, 2]] ) # The dog is cute and lives in the garden house snake_case_ : List[Any] = torch.Size((1, 1_2, 1_0_2_4) ) # batch_size, sequence_length, embedding_vector_dim snake_case_ : Any = torch.tensor( [[-0.06_99, -0.03_18, 0.07_05, -0.12_41, 0.09_99, -0.05_20, 0.10_04, -0.18_38, -0.47_04, 0.14_37, 0.08_21, 0.01_26]] ) # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large') # xlmr.eval() # expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1] with torch.no_grad(): snake_case_ : str = model(_UpperCamelCase )["""last_hidden_state"""].detach() self.assertEqual(output.shape ,_UpperCamelCase ) # compare the actual values for a slice of last dim self.assertTrue(torch.allclose(output[:, :, -1] ,_UpperCamelCase ,atol=1E-3 ) )
368
'''simple docstring''' import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class __UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ , unittest.TestCase ): lowercase : Dict = StableDiffusionInpaintPipeline lowercase : str = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS lowercase : Union[str, Any] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS lowercase : Dict = frozenset( [] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess lowercase : Optional[int] = frozenset([] ) def a__ ( self :Any ): torch.manual_seed(0 ) snake_case_ : Optional[int] = UNetaDConditionModel( block_out_channels=(3_2, 6_4) ,layers_per_block=2 ,sample_size=3_2 ,in_channels=9 ,out_channels=4 ,down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") ,up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") ,cross_attention_dim=3_2 ,attention_head_dim=(2, 4) ,use_linear_projection=_UpperCamelCase ,) snake_case_ : Tuple = PNDMScheduler(skip_prk_steps=_UpperCamelCase ) torch.manual_seed(0 ) snake_case_ : List[str] = AutoencoderKL( block_out_channels=[3_2, 6_4] ,in_channels=3 ,out_channels=3 ,down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] ,up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] ,latent_channels=4 ,sample_size=1_2_8 ,) torch.manual_seed(0 ) snake_case_ : Optional[int] = CLIPTextConfig( bos_token_id=0 ,eos_token_id=2 ,hidden_size=3_2 ,intermediate_size=3_7 ,layer_norm_eps=1E-0_5 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1_0_0_0 ,hidden_act="""gelu""" ,projection_dim=5_1_2 ,) snake_case_ : Tuple = CLIPTextModel(_UpperCamelCase ) snake_case_ : Optional[int] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) snake_case_ : str = { """unet""": unet, """scheduler""": scheduler, """vae""": vae, """text_encoder""": text_encoder, """tokenizer""": tokenizer, """safety_checker""": None, """feature_extractor""": None, } return components def a__ ( self :str ,_UpperCamelCase :Optional[int] ,_UpperCamelCase :Union[str, Any]=0 ): # TODO: use tensor inputs instead of PIL, this is here just to leave the old expected_slices untouched snake_case_ : List[Any] = floats_tensor((1, 3, 3_2, 3_2) ,rng=random.Random(_UpperCamelCase ) ).to(_UpperCamelCase ) snake_case_ : int = image.cpu().permute(0 ,2 ,3 ,1 )[0] snake_case_ : List[str] = Image.fromarray(np.uinta(_UpperCamelCase ) ).convert("""RGB""" ).resize((6_4, 6_4) ) snake_case_ : Optional[Any] = Image.fromarray(np.uinta(image + 4 ) ).convert("""RGB""" ).resize((6_4, 6_4) ) if str(_UpperCamelCase ).startswith("""mps""" ): snake_case_ : Optional[Any] = torch.manual_seed(_UpperCamelCase ) else: snake_case_ : Optional[int] = torch.Generator(device=_UpperCamelCase ).manual_seed(_UpperCamelCase ) snake_case_ : int = { """prompt""": """A painting of a squirrel eating a burger""", """image""": init_image, """mask_image""": mask_image, """generator""": generator, """num_inference_steps""": 2, """guidance_scale""": 6.0, """output_type""": """numpy""", } return inputs def a__ ( self :Any ): snake_case_ : Union[str, Any] = """cpu""" # ensure determinism for the device-dependent torch.Generator snake_case_ : Optional[Any] = self.get_dummy_components() snake_case_ : Dict = StableDiffusionInpaintPipeline(**_UpperCamelCase ) snake_case_ : List[str] = sd_pipe.to(_UpperCamelCase ) sd_pipe.set_progress_bar_config(disable=_UpperCamelCase ) snake_case_ : Union[str, Any] = self.get_dummy_inputs(_UpperCamelCase ) snake_case_ : Tuple = sd_pipe(**_UpperCamelCase ).images snake_case_ : List[Any] = image[0, -3:, -3:, -1] assert image.shape == (1, 6_4, 6_4, 3) snake_case_ : Dict = np.array([0.47_27, 0.57_35, 0.39_41, 0.54_46, 0.59_26, 0.43_94, 0.50_62, 0.46_54, 0.44_76] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def a__ ( self :Any ): super().test_inference_batch_single_identical(expected_max_diff=3E-3 ) @slow @require_torch_gpu class __UpperCamelCase ( unittest.TestCase ): def a__ ( self :List[Any] ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def a__ ( self :Tuple ): snake_case_ : Union[str, Any] = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/sd2-inpaint/init_image.png""" ) snake_case_ : List[str] = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" ) snake_case_ : Dict = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint""" """/yellow_cat_sitting_on_a_park_bench.npy""" ) snake_case_ : str = """stabilityai/stable-diffusion-2-inpainting""" snake_case_ : Optional[Any] = StableDiffusionInpaintPipeline.from_pretrained(_UpperCamelCase ,safety_checker=_UpperCamelCase ) pipe.to(_UpperCamelCase ) pipe.set_progress_bar_config(disable=_UpperCamelCase ) pipe.enable_attention_slicing() snake_case_ : Optional[Any] = """Face of a yellow cat, high resolution, sitting on a park bench""" snake_case_ : List[str] = torch.manual_seed(0 ) snake_case_ : Dict = pipe( prompt=_UpperCamelCase ,image=_UpperCamelCase ,mask_image=_UpperCamelCase ,generator=_UpperCamelCase ,output_type="""np""" ,) snake_case_ : Union[str, Any] = output.images[0] assert image.shape == (5_1_2, 5_1_2, 3) assert np.abs(expected_image - image ).max() < 9E-3 def a__ ( self :Tuple ): snake_case_ : int = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/sd2-inpaint/init_image.png""" ) snake_case_ : Dict = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" ) snake_case_ : List[str] = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint""" """/yellow_cat_sitting_on_a_park_bench_fp16.npy""" ) snake_case_ : Optional[int] = """stabilityai/stable-diffusion-2-inpainting""" snake_case_ : List[str] = StableDiffusionInpaintPipeline.from_pretrained( _UpperCamelCase ,torch_dtype=torch.floataa ,safety_checker=_UpperCamelCase ,) pipe.to(_UpperCamelCase ) pipe.set_progress_bar_config(disable=_UpperCamelCase ) pipe.enable_attention_slicing() snake_case_ : Optional[Any] = """Face of a yellow cat, high resolution, sitting on a park bench""" snake_case_ : List[Any] = torch.manual_seed(0 ) snake_case_ : Any = pipe( prompt=_UpperCamelCase ,image=_UpperCamelCase ,mask_image=_UpperCamelCase ,generator=_UpperCamelCase ,output_type="""np""" ,) snake_case_ : List[str] = output.images[0] assert image.shape == (5_1_2, 5_1_2, 3) assert np.abs(expected_image - image ).max() < 5E-1 def a__ ( self :Union[str, Any] ): torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() snake_case_ : Optional[int] = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/sd2-inpaint/init_image.png""" ) snake_case_ : int = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" ) snake_case_ : int = """stabilityai/stable-diffusion-2-inpainting""" snake_case_ : Dict = PNDMScheduler.from_pretrained(_UpperCamelCase ,subfolder="""scheduler""" ) snake_case_ : List[Any] = StableDiffusionInpaintPipeline.from_pretrained( _UpperCamelCase ,safety_checker=_UpperCamelCase ,scheduler=_UpperCamelCase ,torch_dtype=torch.floataa ,) pipe.to(_UpperCamelCase ) pipe.set_progress_bar_config(disable=_UpperCamelCase ) pipe.enable_attention_slicing(1 ) pipe.enable_sequential_cpu_offload() snake_case_ : List[Any] = """Face of a yellow cat, high resolution, sitting on a park bench""" snake_case_ : Optional[int] = torch.manual_seed(0 ) snake_case_ : Tuple = pipe( prompt=_UpperCamelCase ,image=_UpperCamelCase ,mask_image=_UpperCamelCase ,generator=_UpperCamelCase ,num_inference_steps=2 ,output_type="""np""" ,) snake_case_ : Any = torch.cuda.max_memory_allocated() # make sure that less than 2.65 GB is allocated assert mem_bytes < 2.65 * 1_0**9
8
0
'''simple docstring''' def UpperCAmelCase ( lowerCamelCase_ :list ): '''simple docstring''' if len(lowerCamelCase_ ) <= 1: return lst snake_case_ : Union[str, Any] = 1 while i < len(lowerCamelCase_ ): if lst[i - 1] <= lst[i]: i += 1 else: snake_case_ : Union[str, Any] = lst[i], lst[i - 1] i -= 1 if i == 0: snake_case_ : int = 1 return lst if __name__ == "__main__": __A : Optional[int] = input('Enter numbers separated by a comma:\n').strip() __A : int = [int(item) for item in user_input.split(',')] print(gnome_sort(unsorted))
369
'''simple docstring''' import collections import os import re from pathlib import Path __A : Dict = 'src/transformers' # Matches is_xxx_available() __A : Dict = re.compile(r'is\_([a-z_]*)_available()') # Catches a one-line _import_struct = {xxx} __A : Any = re.compile(r'^_import_structure\s+=\s+\{([^\}]+)\}') # Catches a line with a key-values pattern: "bla": ["foo", "bar"] __A : Tuple = re.compile(r'\s+"\S*":\s+\[([^\]]*)\]') # Catches a line if not is_foo_available __A : Optional[Any] = re.compile(r'^\s*if\s+not\s+is\_[a-z_]*\_available\(\)') # Catches a line _import_struct["bla"].append("foo") __A : Optional[int] = re.compile(r'^\s*_import_structure\["\S*"\]\.append\("(\S*)"\)') # Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"] __A : List[Any] = re.compile(r'^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]') # Catches a line with an object between quotes and a comma: "MyModel", __A : Union[str, Any] = re.compile(r'^\s+"([^"]+)",') # Catches a line with objects between brackets only: ["foo", "bar"], __A : int = re.compile(r'^\s+\[([^\]]+)\]') # Catches a line with from foo import bar, bla, boo __A : int = re.compile(r'\s+from\s+\S*\s+import\s+([^\(\s].*)\n') # Catches a line with try: __A : List[Any] = re.compile(r'^\s*try:') # Catches a line with else: __A : Any = re.compile(r'^\s*else:') def UpperCAmelCase ( lowerCamelCase_ :str ): '''simple docstring''' if _re_test_backend.search(lowerCamelCase_ ) is None: return None snake_case_ : Tuple = [b[0] for b in _re_backend.findall(lowerCamelCase_ )] backends.sort() return "_and_".join(lowerCamelCase_ ) def UpperCAmelCase ( lowerCamelCase_ :Optional[int] ): '''simple docstring''' with open(lowerCamelCase_ , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f: snake_case_ : str = f.readlines() snake_case_ : List[Any] = 0 while line_index < len(lowerCamelCase_ ) and not lines[line_index].startswith("""_import_structure = {""" ): line_index += 1 # If this is a traditional init, just return. if line_index >= len(lowerCamelCase_ ): return None # First grab the objects without a specific backend in _import_structure snake_case_ : Union[str, Any] = [] while not lines[line_index].startswith("""if TYPE_CHECKING""" ) and find_backend(lines[line_index] ) is None: snake_case_ : str = lines[line_index] # If we have everything on a single line, let's deal with it. if _re_one_line_import_struct.search(lowerCamelCase_ ): snake_case_ : Optional[int] = _re_one_line_import_struct.search(lowerCamelCase_ ).groups()[0] snake_case_ : Union[str, Any] = re.findall(R"""\[([^\]]+)\]""" , lowerCamelCase_ ) for imp in imports: objects.extend([obj[1:-1] for obj in imp.split(""", """ )] ) line_index += 1 continue snake_case_ : Any = _re_import_struct_key_value.search(lowerCamelCase_ ) if single_line_import_search is not None: snake_case_ : Optional[int] = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(""", """ ) if len(lowerCamelCase_ ) > 0] objects.extend(lowerCamelCase_ ) elif line.startswith(""" """ * 8 + """\"""" ): objects.append(line[9:-3] ) line_index += 1 snake_case_ : Union[str, Any] = {"""none""": objects} # Let's continue with backend-specific objects in _import_structure while not lines[line_index].startswith("""if TYPE_CHECKING""" ): # If the line is an if not is_backend_available, we grab all objects associated. snake_case_ : List[str] = find_backend(lines[line_index] ) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1] ) is None: snake_case_ : Tuple = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index] ) is None: line_index += 1 line_index += 1 snake_case_ : Dict = [] # Until we unindent, add backend objects to the list while len(lines[line_index] ) <= 1 or lines[line_index].startswith(""" """ * 4 ): snake_case_ : List[Any] = lines[line_index] if _re_import_struct_add_one.search(lowerCamelCase_ ) is not None: objects.append(_re_import_struct_add_one.search(lowerCamelCase_ ).groups()[0] ) elif _re_import_struct_add_many.search(lowerCamelCase_ ) is not None: snake_case_ : Optional[int] = _re_import_struct_add_many.search(lowerCamelCase_ ).groups()[0].split(""", """ ) snake_case_ : List[str] = [obj[1:-1] for obj in imports if len(lowerCamelCase_ ) > 0] objects.extend(lowerCamelCase_ ) elif _re_between_brackets.search(lowerCamelCase_ ) is not None: snake_case_ : List[str] = _re_between_brackets.search(lowerCamelCase_ ).groups()[0].split(""", """ ) snake_case_ : Any = [obj[1:-1] for obj in imports if len(lowerCamelCase_ ) > 0] objects.extend(lowerCamelCase_ ) elif _re_quote_object.search(lowerCamelCase_ ) is not None: objects.append(_re_quote_object.search(lowerCamelCase_ ).groups()[0] ) elif line.startswith(""" """ * 8 + """\"""" ): objects.append(line[9:-3] ) elif line.startswith(""" """ * 12 + """\"""" ): objects.append(line[13:-3] ) line_index += 1 snake_case_ : int = objects else: line_index += 1 # At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend snake_case_ : List[Any] = [] while ( line_index < len(lowerCamelCase_ ) and find_backend(lines[line_index] ) is None and not lines[line_index].startswith("""else""" ) ): snake_case_ : Union[str, Any] = lines[line_index] snake_case_ : Union[str, Any] = _re_import.search(lowerCamelCase_ ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(""", """ ) ) elif line.startswith(""" """ * 8 ): objects.append(line[8:-2] ) line_index += 1 snake_case_ : Dict = {"""none""": objects} # Let's continue with backend-specific objects while line_index < len(lowerCamelCase_ ): # If the line is an if is_backend_available, we grab all objects associated. snake_case_ : Optional[Any] = find_backend(lines[line_index] ) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1] ) is None: snake_case_ : str = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index] ) is None: line_index += 1 line_index += 1 snake_case_ : Any = [] # Until we unindent, add backend objects to the list while len(lines[line_index] ) <= 1 or lines[line_index].startswith(""" """ * 8 ): snake_case_ : Dict = lines[line_index] snake_case_ : Any = _re_import.search(lowerCamelCase_ ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(""", """ ) ) elif line.startswith(""" """ * 12 ): objects.append(line[12:-2] ) line_index += 1 snake_case_ : int = objects else: line_index += 1 return import_dict_objects, type_hint_objects def UpperCAmelCase ( lowerCamelCase_ :int , lowerCamelCase_ :List[str] ): '''simple docstring''' def find_duplicates(lowerCamelCase_ :Union[str, Any] ): return [k for k, v in collections.Counter(lowerCamelCase_ ).items() if v > 1] if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ): return ["Both sides of the init do not have the same backends!"] snake_case_ : Optional[int] = [] for key in import_dict_objects.keys(): snake_case_ : int = find_duplicates(import_dict_objects[key] ) if duplicate_imports: errors.append(F'''Duplicate _import_structure definitions for: {duplicate_imports}''' ) snake_case_ : List[str] = find_duplicates(type_hint_objects[key] ) if duplicate_type_hints: errors.append(F'''Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}''' ) if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ): snake_case_ : str = """base imports""" if key == """none""" else F'''{key} backend''' errors.append(F'''Differences for {name}:''' ) for a in type_hint_objects[key]: if a not in import_dict_objects[key]: errors.append(F''' {a} in TYPE_HINT but not in _import_structure.''' ) for a in import_dict_objects[key]: if a not in type_hint_objects[key]: errors.append(F''' {a} in _import_structure but not in TYPE_HINT.''' ) return errors def UpperCAmelCase ( ): '''simple docstring''' snake_case_ : Tuple = [] for root, _, files in os.walk(lowerCamelCase_ ): if "__init__.py" in files: snake_case_ : Any = os.path.join(lowerCamelCase_ , """__init__.py""" ) snake_case_ : Dict = parse_init(lowerCamelCase_ ) if objects is not None: snake_case_ : Any = analyze_results(*lowerCamelCase_ ) if len(lowerCamelCase_ ) > 0: snake_case_ : Tuple = F'''Problem in {fname}, both halves do not define the same objects.\n{errors[0]}''' failures.append("""\n""".join(lowerCamelCase_ ) ) if len(lowerCamelCase_ ) > 0: raise ValueError("""\n\n""".join(lowerCamelCase_ ) ) def UpperCAmelCase ( ): '''simple docstring''' snake_case_ : Union[str, Any] = [] for path, directories, files in os.walk(lowerCamelCase_ ): for folder in directories: # Ignore private modules if folder.startswith("""_""" ): directories.remove(lowerCamelCase_ ) continue # Ignore leftovers from branches (empty folders apart from pycache) if len(list((Path(lowerCamelCase_ ) / folder).glob("""*.py""" ) ) ) == 0: continue snake_case_ : Tuple = str((Path(lowerCamelCase_ ) / folder).relative_to(lowerCamelCase_ ) ) snake_case_ : List[str] = short_path.replace(os.path.sep , """.""" ) submodules.append(lowerCamelCase_ ) for fname in files: if fname == "__init__.py": continue snake_case_ : Dict = str((Path(lowerCamelCase_ ) / fname).relative_to(lowerCamelCase_ ) ) snake_case_ : List[str] = short_path.replace(""".py""" , """""" ).replace(os.path.sep , """.""" ) if len(submodule.split(""".""" ) ) == 1: submodules.append(lowerCamelCase_ ) return submodules __A : List[Any] = [ 'convert_pytorch_checkpoint_to_tf2', 'modeling_flax_pytorch_utils', 'models.esm.openfold_utils', ] def UpperCAmelCase ( ): '''simple docstring''' # This is to make sure the transformers module imported is the one in the repo. from transformers.utils import direct_transformers_import snake_case_ : Union[str, Any] = direct_transformers_import(lowerCamelCase_ ) snake_case_ : List[str] = set(transformers._import_structure.keys() ) # This contains all the base keys of the _import_structure object defined in the init, but if the user is missing # some optional dependencies, they may not have all of them. Thus we read the init to read all additions and # (potentiall re-) add them. with open(os.path.join(lowerCamelCase_ , """__init__.py""" ) , """r""" ) as f: snake_case_ : str = f.read() import_structure_keys.update(set(re.findall(R"""import_structure\[\"([^\"]*)\"\]""" , lowerCamelCase_ ) ) ) snake_case_ : Dict = [ module for module in get_transformers_submodules() if module not in IGNORE_SUBMODULES and module not in import_structure_keys ] if len(lowerCamelCase_ ) > 0: snake_case_ : str = """\n""".join(F'''- {module}''' for module in module_not_registered ) raise ValueError( """The following submodules are not properly registed in the main init of Transformers:\n""" F'''{list_of_modules}\n''' """Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.""" ) if __name__ == "__main__": check_all_inits() check_submodules()
8
0
'''simple docstring''' import numpy as np # Importing the Keras libraries and packages import tensorflow as tf from tensorflow.keras import layers, models if __name__ == "__main__": # Initialising the CNN # (Sequential- Building the model layer by layer) __A : str = models.Sequential() # Step 1 - Convolution # Here 64,64 is the length & breadth of dataset images and 3 is for the RGB channel # (3,3) is the kernel size (filter matrix) classifier.add( layers.ConvaD(32, (3, 3), input_shape=(64, 64, 3), activation='relu') ) # Step 2 - Pooling classifier.add(layers.MaxPoolingaD(pool_size=(2, 2))) # Adding a second convolutional layer classifier.add(layers.ConvaD(32, (3, 3), activation='relu')) classifier.add(layers.MaxPoolingaD(pool_size=(2, 2))) # Step 3 - Flattening classifier.add(layers.Flatten()) # Step 4 - Full connection classifier.add(layers.Dense(units=128, activation='relu')) classifier.add(layers.Dense(units=1, activation='sigmoid')) # Compiling the CNN classifier.compile( optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'] ) # Part 2 - Fitting the CNN to the images # Load Trained model weights # from keras.models import load_model # regressor=load_model('cnn.h5') __A : Optional[int] = tf.keras.preprocessing.image.ImageDataGenerator( rescale=1.0 / 255, shear_range=0.2, zoom_range=0.2, horizontal_flip=True ) __A : Optional[Any] = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1.0 / 255) __A : Optional[int] = train_datagen.flow_from_directory( 'dataset/training_set', target_size=(64, 64), batch_size=32, class_mode='binary' ) __A : Optional[Any] = test_datagen.flow_from_directory( 'dataset/test_set', target_size=(64, 64), batch_size=32, class_mode='binary' ) classifier.fit_generator( training_set, steps_per_epoch=5, epochs=30, validation_data=test_set ) classifier.save('cnn.h5') # Part 3 - Making new predictions __A : str = tf.keras.preprocessing.image.load_img( 'dataset/single_prediction/image.png', target_size=(64, 64) ) __A : Any = tf.keras.preprocessing.image.img_to_array(test_image) __A : int = np.expand_dims(test_image, axis=0) __A : Union[str, Any] = classifier.predict(test_image) # training_set.class_indices if result[0][0] == 0: __A : Optional[Any] = 'Normal' if result[0][0] == 1: __A : List[str] = 'Abnormality detected'
370
'''simple docstring''' import unittest import numpy as np from transformers.testing_utils import require_pytesseract, require_torch from transformers.utils import is_pytesseract_available, is_torch_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_pytesseract_available(): from PIL import Image from transformers import LayoutLMvaImageProcessor class __UpperCamelCase ( unittest.TestCase ): def __init__( self :List[Any] ,_UpperCamelCase :List[str] ,_UpperCamelCase :Optional[Any]=7 ,_UpperCamelCase :Union[str, Any]=3 ,_UpperCamelCase :Any=1_8 ,_UpperCamelCase :Optional[Any]=3_0 ,_UpperCamelCase :List[str]=4_0_0 ,_UpperCamelCase :Optional[Any]=True ,_UpperCamelCase :Union[str, Any]=None ,_UpperCamelCase :List[Any]=True ,): snake_case_ : List[str] = size if size is not None else {"""height""": 1_8, """width""": 1_8} snake_case_ : Union[str, Any] = parent snake_case_ : str = batch_size snake_case_ : List[Any] = num_channels snake_case_ : Tuple = image_size snake_case_ : int = min_resolution snake_case_ : int = max_resolution snake_case_ : Union[str, Any] = do_resize snake_case_ : Optional[Any] = size snake_case_ : Any = apply_ocr def a__ ( self :Union[str, Any] ): return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr} @require_torch @require_pytesseract class __UpperCamelCase ( lowercase__ , unittest.TestCase ): lowercase : Tuple = LayoutLMvaImageProcessor if is_pytesseract_available() else None def a__ ( self :List[Any] ): snake_case_ : Union[str, Any] = LayoutLMvaImageProcessingTester(self ) @property def a__ ( self :int ): return self.image_processor_tester.prepare_image_processor_dict() def a__ ( self :Any ): snake_case_ : Tuple = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_UpperCamelCase ,"""do_resize""" ) ) self.assertTrue(hasattr(_UpperCamelCase ,"""size""" ) ) self.assertTrue(hasattr(_UpperCamelCase ,"""apply_ocr""" ) ) def a__ ( self :int ): snake_case_ : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size ,{"""height""": 1_8, """width""": 1_8} ) snake_case_ : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict ,size=4_2 ) self.assertEqual(image_processor.size ,{"""height""": 4_2, """width""": 4_2} ) def a__ ( self :Optional[Any] ): pass def a__ ( self :Union[str, Any] ): # Initialize image_processing snake_case_ : Tuple = self.image_processing_class(**self.image_processor_dict ) # create random PIL images snake_case_ : List[str] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_UpperCamelCase ) for image in image_inputs: self.assertIsInstance(_UpperCamelCase ,Image.Image ) # Test not batched input snake_case_ : List[str] = image_processing(image_inputs[0] ,return_tensors="""pt""" ) self.assertEqual( encoding.pixel_values.shape ,( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) ,) self.assertIsInstance(encoding.words ,_UpperCamelCase ) self.assertIsInstance(encoding.boxes ,_UpperCamelCase ) # Test batched snake_case_ : List[Any] = image_processing(_UpperCamelCase ,return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape ,( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) ,) def a__ ( self :Tuple ): # Initialize image_processing snake_case_ : Tuple = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors snake_case_ : Optional[Any] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_UpperCamelCase ,numpify=_UpperCamelCase ) for image in image_inputs: self.assertIsInstance(_UpperCamelCase ,np.ndarray ) # Test not batched input snake_case_ : Optional[int] = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape ,( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) ,) # Test batched snake_case_ : Any = image_processing(_UpperCamelCase ,return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape ,( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) ,) def a__ ( self :Optional[Any] ): # Initialize image_processing snake_case_ : Any = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors snake_case_ : Optional[int] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_UpperCamelCase ,torchify=_UpperCamelCase ) for image in image_inputs: self.assertIsInstance(_UpperCamelCase ,torch.Tensor ) # Test not batched input snake_case_ : Tuple = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape ,( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) ,) # Test batched snake_case_ : Union[str, Any] = image_processing(_UpperCamelCase ,return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape ,( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) ,) def a__ ( self :List[Any] ): # with apply_OCR = True snake_case_ : Any = LayoutLMvaImageProcessor() from datasets import load_dataset snake_case_ : List[Any] = load_dataset("""hf-internal-testing/fixtures_docvqa""" ,split="""test""" ) snake_case_ : str = Image.open(ds[0]["""file"""] ).convert("""RGB""" ) snake_case_ : Dict = image_processing(_UpperCamelCase ,return_tensors="""pt""" ) self.assertEqual(encoding.pixel_values.shape ,(1, 3, 2_2_4, 2_2_4) ) self.assertEqual(len(encoding.words ) ,len(encoding.boxes ) ) # fmt: off # the words and boxes were obtained with Tesseract 4.1.1 snake_case_ : Tuple = [["""11:14""", """to""", """11:39""", """a.m""", """11:39""", """to""", """11:44""", """a.m.""", """11:44""", """a.m.""", """to""", """12:25""", """p.m.""", """12:25""", """to""", """12:58""", """p.m.""", """12:58""", """to""", """4:00""", """p.m.""", """2:00""", """to""", """5:00""", """p.m.""", """Coffee""", """Break""", """Coffee""", """will""", """be""", """served""", """for""", """men""", """and""", """women""", """in""", """the""", """lobby""", """adjacent""", """to""", """exhibit""", """area.""", """Please""", """move""", """into""", """exhibit""", """area.""", """(Exhibits""", """Open)""", """TRRF""", """GENERAL""", """SESSION""", """(PART""", """|)""", """Presiding:""", """Lee""", """A.""", """Waller""", """TRRF""", """Vice""", """President""", """“Introductory""", """Remarks”""", """Lee""", """A.""", """Waller,""", """TRRF""", """Vice""", """Presi-""", """dent""", """Individual""", """Interviews""", """with""", """TRRF""", """Public""", """Board""", """Members""", """and""", """Sci-""", """entific""", """Advisory""", """Council""", """Mem-""", """bers""", """Conducted""", """by""", """TRRF""", """Treasurer""", """Philip""", """G.""", """Kuehn""", """to""", """get""", """answers""", """which""", """the""", """public""", """refrigerated""", """warehousing""", """industry""", """is""", """looking""", """for.""", """Plus""", """questions""", """from""", """the""", """floor.""", """Dr.""", """Emil""", """M.""", """Mrak,""", """University""", """of""", """Cal-""", """ifornia,""", """Chairman,""", """TRRF""", """Board;""", """Sam""", """R.""", """Cecil,""", """University""", """of""", """Georgia""", """College""", """of""", """Agriculture;""", """Dr.""", """Stanley""", """Charm,""", """Tufts""", """University""", """School""", """of""", """Medicine;""", """Dr.""", """Robert""", """H.""", """Cotton,""", """ITT""", """Continental""", """Baking""", """Company;""", """Dr.""", """Owen""", """Fennema,""", """University""", """of""", """Wis-""", """consin;""", """Dr.""", """Robert""", """E.""", """Hardenburg,""", """USDA.""", """Questions""", """and""", """Answers""", """Exhibits""", """Open""", """Capt.""", """Jack""", """Stoney""", """Room""", """TRRF""", """Scientific""", """Advisory""", """Council""", """Meeting""", """Ballroom""", """Foyer"""]] # noqa: E231 snake_case_ : Any = [[[1_4_1, 5_7, 2_1_4, 6_9], [2_2_8, 5_8, 2_5_2, 6_9], [1_4_1, 7_5, 2_1_6, 8_8], [2_3_0, 7_9, 2_8_0, 8_8], [1_4_2, 2_6_0, 2_1_8, 2_7_3], [2_3_0, 2_6_1, 2_5_5, 2_7_3], [1_4_3, 2_7_9, 2_1_8, 2_9_0], [2_3_1, 2_8_2, 2_9_0, 2_9_1], [1_4_3, 3_4_2, 2_1_8, 3_5_4], [2_3_1, 3_4_5, 2_8_9, 3_5_5], [2_0_2, 3_6_2, 2_2_7, 3_7_3], [1_4_3, 3_7_9, 2_2_0, 3_9_2], [2_3_1, 3_8_2, 2_9_1, 3_9_4], [1_4_4, 7_1_4, 2_2_0, 7_2_6], [2_3_1, 7_1_5, 2_5_6, 7_2_6], [1_4_4, 7_3_2, 2_2_0, 7_4_5], [2_3_2, 7_3_6, 2_9_1, 7_4_7], [1_4_4, 7_6_9, 2_1_8, 7_8_2], [2_3_1, 7_7_0, 2_5_6, 7_8_2], [1_4_1, 7_8_8, 2_0_2, 8_0_1], [2_1_5, 7_9_1, 2_7_4, 8_0_4], [1_4_3, 8_2_6, 2_0_4, 8_3_8], [2_1_5, 8_2_6, 2_4_0, 8_3_8], [1_4_2, 8_4_4, 2_0_2, 8_5_7], [2_1_5, 8_4_7, 2_7_4, 8_5_9], [3_3_4, 5_7, 4_2_7, 6_9], [4_4_0, 5_7, 5_2_2, 6_9], [3_6_9, 7_5, 4_6_1, 8_8], [4_6_9, 7_5, 5_1_6, 8_8], [5_2_8, 7_6, 5_6_2, 8_8], [5_7_0, 7_6, 6_6_7, 8_8], [6_7_5, 7_5, 7_1_1, 8_7], [7_2_1, 7_9, 7_7_8, 8_8], [7_8_9, 7_5, 8_4_0, 8_8], [3_6_9, 9_7, 4_7_0, 1_0_7], [4_8_4, 9_4, 5_0_7, 1_0_6], [5_1_8, 9_4, 5_6_2, 1_0_7], [5_7_6, 9_4, 6_5_5, 1_1_0], [6_6_8, 9_4, 7_9_2, 1_0_9], [8_0_4, 9_5, 8_2_9, 1_0_7], [3_6_9, 1_1_3, 4_6_5, 1_2_5], [4_7_7, 1_1_6, 5_4_7, 1_2_5], [5_6_2, 1_1_3, 6_5_8, 1_2_5], [6_7_1, 1_1_6, 7_4_8, 1_2_5], [7_6_1, 1_1_3, 8_1_1, 1_2_5], [3_6_9, 1_3_1, 4_6_5, 1_4_3], [4_7_7, 1_3_3, 5_4_8, 1_4_3], [5_6_3, 1_3_0, 6_9_8, 1_4_5], [7_1_0, 1_3_0, 8_0_2, 1_4_6], [3_3_6, 1_7_1, 4_1_2, 1_8_3], [4_2_3, 1_7_1, 5_7_2, 1_8_3], [5_8_2, 1_7_0, 7_1_6, 1_8_4], [7_2_8, 1_7_1, 8_1_7, 1_8_7], [8_2_9, 1_7_1, 8_4_4, 1_8_6], [3_3_8, 1_9_7, 4_8_2, 2_1_2], [5_0_7, 1_9_6, 5_5_7, 2_0_9], [5_6_9, 1_9_6, 5_9_5, 2_0_8], [6_1_0, 1_9_6, 7_0_2, 2_0_9], [5_0_5, 2_1_4, 5_8_3, 2_2_6], [5_9_5, 2_1_4, 6_5_6, 2_2_7], [6_7_0, 2_1_5, 8_0_7, 2_2_7], [3_3_5, 2_5_9, 5_4_3, 2_7_4], [5_5_6, 2_5_9, 7_0_8, 2_7_2], [3_7_2, 2_7_9, 4_2_2, 2_9_1], [4_3_5, 2_7_9, 4_6_0, 2_9_1], [4_7_4, 2_7_9, 5_7_4, 2_9_2], [5_8_7, 2_7_8, 6_6_4, 2_9_1], [6_7_6, 2_7_8, 7_3_8, 2_9_1], [7_5_1, 2_7_9, 8_3_4, 2_9_1], [3_7_2, 2_9_8, 4_3_4, 3_1_0], [3_3_5, 3_4_1, 4_8_3, 3_5_4], [4_9_7, 3_4_1, 6_5_5, 3_5_4], [6_6_7, 3_4_1, 7_2_8, 3_5_4], [7_4_0, 3_4_1, 8_2_5, 3_5_4], [3_3_5, 3_6_0, 4_3_0, 3_7_2], [4_4_2, 3_6_0, 5_3_4, 3_7_2], [5_4_5, 3_5_9, 6_8_7, 3_7_2], [6_9_7, 3_6_0, 7_5_4, 3_7_2], [7_6_5, 3_6_0, 8_2_3, 3_7_3], [3_3_4, 3_7_8, 4_2_8, 3_9_1], [4_4_0, 3_7_8, 5_7_7, 3_9_4], [5_9_0, 3_7_8, 7_0_5, 3_9_1], [7_2_0, 3_7_8, 8_0_1, 3_9_1], [3_3_4, 3_9_7, 4_0_0, 4_0_9], [3_7_0, 4_1_6, 5_2_9, 4_2_9], [5_4_4, 4_1_6, 5_7_6, 4_3_2], [5_8_7, 4_1_6, 6_6_5, 4_2_8], [6_7_7, 4_1_6, 8_1_4, 4_2_9], [3_7_2, 4_3_5, 4_5_2, 4_5_0], [4_6_5, 4_3_4, 4_9_5, 4_4_7], [5_1_1, 4_3_4, 6_0_0, 4_4_7], [6_1_1, 4_3_6, 6_3_7, 4_4_7], [6_4_9, 4_3_6, 6_9_4, 4_5_1], [7_0_5, 4_3_8, 8_2_4, 4_4_7], [3_6_9, 4_5_3, 4_5_2, 4_6_6], [4_6_4, 4_5_4, 5_0_9, 4_6_6], [5_2_2, 4_5_3, 6_1_1, 4_6_9], [6_2_5, 4_5_3, 7_9_2, 4_6_9], [3_7_0, 4_7_2, 5_5_6, 4_8_8], [5_7_0, 4_7_2, 6_8_4, 4_8_7], [6_9_7, 4_7_2, 7_1_8, 4_8_5], [7_3_2, 4_7_2, 8_3_5, 4_8_8], [3_6_9, 4_9_0, 4_1_1, 5_0_3], [4_2_5, 4_9_0, 4_8_4, 5_0_3], [4_9_6, 4_9_0, 6_3_5, 5_0_6], [6_4_5, 4_9_0, 7_0_7, 5_0_3], [7_1_8, 4_9_1, 7_6_1, 5_0_3], [7_7_1, 4_9_0, 8_4_0, 5_0_3], [3_3_6, 5_1_0, 3_7_4, 5_2_1], [3_8_8, 5_1_0, 4_4_7, 5_2_2], [4_6_0, 5_1_0, 4_8_9, 5_2_1], [5_0_3, 5_1_0, 5_8_0, 5_2_2], [5_9_2, 5_0_9, 7_3_6, 5_2_5], [7_4_5, 5_0_9, 7_7_0, 5_2_2], [7_8_1, 5_0_9, 8_4_0, 5_2_2], [3_3_8, 5_2_8, 4_3_4, 5_4_1], [4_4_8, 5_2_8, 5_9_6, 5_4_1], [6_0_9, 5_2_7, 6_8_7, 5_4_0], [7_0_0, 5_2_8, 7_9_2, 5_4_1], [3_3_6, 5_4_6, 3_9_7, 5_5_9], [4_0_7, 5_4_6, 4_3_1, 5_5_9], [4_4_3, 5_4_6, 5_2_5, 5_6_0], [5_3_7, 5_4_6, 6_8_0, 5_6_2], [6_8_8, 5_4_6, 7_1_4, 5_5_9], [7_2_2, 5_4_6, 8_3_7, 5_6_2], [3_3_6, 5_6_5, 4_4_9, 5_8_1], [4_6_1, 5_6_5, 4_8_5, 5_7_7], [4_9_7, 5_6_5, 6_6_5, 5_8_1], [6_8_1, 5_6_5, 7_1_8, 5_7_7], [7_3_2, 5_6_5, 8_3_7, 5_8_0], [3_3_7, 5_8_4, 4_3_8, 5_9_7], [4_5_2, 5_8_3, 5_2_1, 5_9_6], [5_3_5, 5_8_4, 6_7_7, 5_9_9], [6_9_0, 5_8_3, 7_8_7, 5_9_6], [8_0_1, 5_8_3, 8_2_5, 5_9_6], [3_3_8, 6_0_2, 4_7_8, 6_1_5], [4_9_2, 6_0_2, 5_3_0, 6_1_4], [5_4_3, 6_0_2, 6_3_8, 6_1_5], [6_5_0, 6_0_2, 6_7_6, 6_1_4], [6_8_8, 6_0_2, 7_8_8, 6_1_5], [8_0_2, 6_0_2, 8_4_3, 6_1_4], [3_3_7, 6_2_1, 5_0_2, 6_3_3], [5_1_6, 6_2_1, 6_1_5, 6_3_7], [6_2_9, 6_2_1, 7_7_4, 6_3_6], [7_8_9, 6_2_1, 8_2_7, 6_3_3], [3_3_7, 6_3_9, 4_1_8, 6_5_2], [4_3_2, 6_4_0, 5_7_1, 6_5_3], [5_8_7, 6_3_9, 7_3_1, 6_5_5], [7_4_3, 6_3_9, 7_6_9, 6_5_2], [7_8_0, 6_3_9, 8_4_1, 6_5_2], [3_3_8, 6_5_8, 4_4_0, 6_7_3], [4_5_5, 6_5_8, 4_9_1, 6_7_0], [5_0_8, 6_5_8, 6_0_2, 6_7_1], [6_1_6, 6_5_8, 6_3_8, 6_7_0], [6_5_4, 6_5_8, 8_3_5, 6_7_4], [3_3_7, 6_7_7, 4_2_9, 6_8_9], [3_3_7, 7_1_4, 4_8_2, 7_2_6], [4_9_5, 7_1_4, 5_4_8, 7_2_6], [5_6_1, 7_1_4, 6_8_3, 7_2_6], [3_3_8, 7_7_0, 4_6_1, 7_8_2], [4_7_4, 7_6_9, 5_5_4, 7_8_5], [4_8_9, 7_8_8, 5_6_2, 8_0_3], [5_7_6, 7_8_8, 6_4_3, 8_0_1], [6_5_6, 7_8_7, 7_5_1, 8_0_4], [7_6_4, 7_8_8, 8_4_4, 8_0_1], [3_3_4, 8_2_5, 4_2_1, 8_3_8], [4_3_0, 8_2_4, 5_7_4, 8_3_8], [5_8_4, 8_2_4, 7_2_3, 8_4_1], [3_3_5, 8_4_4, 4_5_0, 8_5_7], [4_6_4, 8_4_3, 5_8_3, 8_6_0], [6_2_8, 8_6_2, 7_5_5, 8_7_5], [7_6_9, 8_6_1, 8_4_8, 8_7_8]]] # noqa: E231 # fmt: on self.assertListEqual(encoding.words ,_UpperCamelCase ) self.assertListEqual(encoding.boxes ,_UpperCamelCase ) # with apply_OCR = False snake_case_ : Dict = LayoutLMvaImageProcessor(apply_ocr=_UpperCamelCase ) snake_case_ : Optional[int] = image_processing(_UpperCamelCase ,return_tensors="""pt""" ) self.assertEqual(encoding.pixel_values.shape ,(1, 3, 2_2_4, 2_2_4) )
8
0
'''simple docstring''' def UpperCAmelCase ( lowerCamelCase_ :int ): '''simple docstring''' snake_case_ : str = n ** (1 / 3) return (val * val * val) == n if __name__ == "__main__": print(perfect_cube(27)) print(perfect_cube(4))
371
'''simple docstring''' def UpperCAmelCase ( lowerCamelCase_ :int ): '''simple docstring''' snake_case_ : List[Any] = generate_pascal_triangle(lowerCamelCase_ ) for row_idx in range(lowerCamelCase_ ): # Print left spaces for _ in range(num_rows - row_idx - 1 ): print(end=""" """ ) # Print row values for col_idx in range(row_idx + 1 ): if col_idx != row_idx: print(triangle[row_idx][col_idx] , end=""" """ ) else: print(triangle[row_idx][col_idx] , end="""""" ) print() def UpperCAmelCase ( lowerCamelCase_ :int ): '''simple docstring''' if not isinstance(lowerCamelCase_ , lowerCamelCase_ ): raise TypeError("""The input value of 'num_rows' should be 'int'""" ) if num_rows == 0: return [] elif num_rows < 0: raise ValueError( """The input value of 'num_rows' should be greater than or equal to 0""" ) snake_case_ : list[list[int]] = [] for current_row_idx in range(lowerCamelCase_ ): snake_case_ : List[str] = populate_current_row(lowerCamelCase_ , lowerCamelCase_ ) triangle.append(lowerCamelCase_ ) return triangle def UpperCAmelCase ( lowerCamelCase_ :list[list[int]] , lowerCamelCase_ :int ): '''simple docstring''' snake_case_ : Union[str, Any] = [-1] * (current_row_idx + 1) # first and last elements of current row are equal to 1 snake_case_ , snake_case_ : Optional[Any] = 1, 1 for current_col_idx in range(1 , lowerCamelCase_ ): calculate_current_element( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) return current_row def UpperCAmelCase ( lowerCamelCase_ :list[list[int]] , lowerCamelCase_ :list[int] , lowerCamelCase_ :int , lowerCamelCase_ :int , ): '''simple docstring''' snake_case_ : Union[str, Any] = triangle[current_row_idx - 1][current_col_idx - 1] snake_case_ : List[Any] = triangle[current_row_idx - 1][current_col_idx] snake_case_ : Optional[int] = above_to_left_elt + above_to_right_elt def UpperCAmelCase ( lowerCamelCase_ :int ): '''simple docstring''' if not isinstance(lowerCamelCase_ , lowerCamelCase_ ): raise TypeError("""The input value of 'num_rows' should be 'int'""" ) if num_rows == 0: return [] elif num_rows < 0: raise ValueError( """The input value of 'num_rows' should be greater than or equal to 0""" ) snake_case_ : list[list[int]] = [[1]] for row_index in range(1 , lowerCamelCase_ ): snake_case_ : Optional[Any] = [0] + result[-1] + [0] snake_case_ : Dict = row_index + 1 # Calculate the number of distinct elements in a row snake_case_ : Any = sum(divmod(lowerCamelCase_ , 2 ) ) snake_case_ : Tuple = [ temp_row[i - 1] + temp_row[i] for i in range(1 , distinct_elements + 1 ) ] snake_case_ : Optional[int] = row_first_half[: (row_index + 1) // 2] row_second_half.reverse() snake_case_ : str = row_first_half + row_second_half result.append(lowerCamelCase_ ) return result def UpperCAmelCase ( ): '''simple docstring''' from collections.abc import Callable from timeit import timeit def benchmark_a_function(lowerCamelCase_ :Callable , lowerCamelCase_ :int ) -> None: snake_case_ : Dict = F'''{func.__name__}({value})''' snake_case_ : Dict = timeit(F'''__main__.{call}''' , setup="""import __main__""" ) # print(f"{call:38} = {func(value)} -- {timing:.4f} seconds") print(F'''{call:38} -- {timing:.4f} seconds''' ) for value in range(15 ): # (1, 7, 14): for func in (generate_pascal_triangle, generate_pascal_triangle_optimized): benchmark_a_function(lowerCamelCase_ , lowerCamelCase_ ) print() if __name__ == "__main__": import doctest doctest.testmod() benchmark()
8
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __A : Optional[int] = { 'configuration_git': ['GIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GitConfig', 'GitVisionConfig'], 'processing_git': ['GitProcessor'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : Union[str, Any] = [ 'GIT_PRETRAINED_MODEL_ARCHIVE_LIST', 'GitForCausalLM', 'GitModel', 'GitPreTrainedModel', 'GitVisionModel', ] if TYPE_CHECKING: from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig from .processing_git import GitProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_git import ( GIT_PRETRAINED_MODEL_ARCHIVE_LIST, GitForCausalLM, GitModel, GitPreTrainedModel, GitVisionModel, ) else: import sys __A : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
350
'''simple docstring''' import unittest from transformers import is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow if is_torch_available(): import torch from transformers import XLMRobertaModel @require_sentencepiece @require_tokenizers @require_torch class __UpperCamelCase ( unittest.TestCase ): @slow def a__ ( self :Dict ): snake_case_ : Optional[Any] = XLMRobertaModel.from_pretrained("""xlm-roberta-base""" ) snake_case_ : Optional[int] = torch.tensor([[0, 5_8_1, 1_0_2_6_9, 8_3, 9_9_9_4_2, 1_3_6, 6_0_7_4_2, 2_3, 7_0, 8_0_5_8_3, 1_8_2_7_6, 2]] ) # The dog is cute and lives in the garden house snake_case_ : Tuple = torch.Size((1, 1_2, 7_6_8) ) # batch_size, sequence_length, embedding_vector_dim snake_case_ : Dict = torch.tensor( [[-0.01_01, 0.12_18, -0.08_03, 0.08_01, 0.13_27, 0.07_76, -0.12_15, 0.23_83, 0.33_38, 0.31_06, 0.03_00, 0.02_52]] ) # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.eval() # expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1] with torch.no_grad(): snake_case_ : Tuple = model(_UpperCamelCase )["""last_hidden_state"""].detach() self.assertEqual(output.shape ,_UpperCamelCase ) # compare the actual values for a slice of last dim self.assertTrue(torch.allclose(output[:, :, -1] ,_UpperCamelCase ,atol=1E-3 ) ) @slow def a__ ( self :Union[str, Any] ): snake_case_ : List[Any] = XLMRobertaModel.from_pretrained("""xlm-roberta-large""" ) snake_case_ : Dict = torch.tensor([[0, 5_8_1, 1_0_2_6_9, 8_3, 9_9_9_4_2, 1_3_6, 6_0_7_4_2, 2_3, 7_0, 8_0_5_8_3, 1_8_2_7_6, 2]] ) # The dog is cute and lives in the garden house snake_case_ : List[Any] = torch.Size((1, 1_2, 1_0_2_4) ) # batch_size, sequence_length, embedding_vector_dim snake_case_ : Any = torch.tensor( [[-0.06_99, -0.03_18, 0.07_05, -0.12_41, 0.09_99, -0.05_20, 0.10_04, -0.18_38, -0.47_04, 0.14_37, 0.08_21, 0.01_26]] ) # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large') # xlmr.eval() # expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1] with torch.no_grad(): snake_case_ : str = model(_UpperCamelCase )["""last_hidden_state"""].detach() self.assertEqual(output.shape ,_UpperCamelCase ) # compare the actual values for a slice of last dim self.assertTrue(torch.allclose(output[:, :, -1] ,_UpperCamelCase ,atol=1E-3 ) )
8
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) __A : Any = {'configuration_mbart': ['MBART_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MBartConfig', 'MBartOnnxConfig']} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : Union[str, Any] = ['MBartTokenizer'] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : Any = ['MBartTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : Any = [ 'MBART_PRETRAINED_MODEL_ARCHIVE_LIST', 'MBartForCausalLM', 'MBartForConditionalGeneration', 'MBartForQuestionAnswering', 'MBartForSequenceClassification', 'MBartModel', 'MBartPreTrainedModel', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : List[Any] = [ 'TFMBartForConditionalGeneration', 'TFMBartModel', 'TFMBartPreTrainedModel', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : Optional[int] = [ 'FlaxMBartForConditionalGeneration', 'FlaxMBartForQuestionAnswering', 'FlaxMBartForSequenceClassification', 'FlaxMBartModel', 'FlaxMBartPreTrainedModel', ] if TYPE_CHECKING: from .configuration_mbart import MBART_PRETRAINED_CONFIG_ARCHIVE_MAP, MBartConfig, MBartOnnxConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_mbart import MBartTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_mbart_fast import MBartTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mbart import ( MBART_PRETRAINED_MODEL_ARCHIVE_LIST, MBartForCausalLM, MBartForConditionalGeneration, MBartForQuestionAnswering, MBartForSequenceClassification, MBartModel, MBartPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_mbart import TFMBartForConditionalGeneration, TFMBartModel, TFMBartPreTrainedModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_mbart import ( FlaxMBartForConditionalGeneration, FlaxMBartForQuestionAnswering, FlaxMBartForSequenceClassification, FlaxMBartModel, FlaxMBartPreTrainedModel, ) else: import sys __A : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
351
'''simple docstring''' from __future__ import annotations from collections.abc import Callable def UpperCAmelCase ( lowerCamelCase_ :Callable[[int | float], int | float] , lowerCamelCase_ :int | float , lowerCamelCase_ :int | float , lowerCamelCase_ :int = 1_00 , ): '''simple docstring''' snake_case_ : Tuple = x_start snake_case_ : Optional[int] = fnc(lowerCamelCase_ ) snake_case_ : Optional[int] = 0.0 for _ in range(lowerCamelCase_ ): # Approximates small segments of curve as linear and solve # for trapezoidal area snake_case_ : int = (x_end - x_start) / steps + xa snake_case_ : Union[str, Any] = fnc(lowerCamelCase_ ) area += abs(fxa + fxa ) * (xa - xa) / 2 # Increment step snake_case_ : Any = xa snake_case_ : str = fxa return area if __name__ == "__main__": def UpperCAmelCase ( lowerCamelCase_ :Any ): '''simple docstring''' return x**3 + x**2 print('f(x) = x^3 + x^2') print('The area between the curve, x = -5, x = 5 and the x axis is:') __A : List[str] = 10 while i <= 100_000: print(F'with {i} steps: {trapezoidal_area(f, -5, 5, i)}') i *= 10
8
0
'''simple docstring''' from collections import OrderedDict from typing import TYPE_CHECKING, Any, Mapping, Optional from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging if TYPE_CHECKING: from ... import FeatureExtractionMixin, TensorType __A : Optional[Any] = logging.get_logger(__name__) __A : Tuple = { 'openai/imagegpt-small': '', 'openai/imagegpt-medium': '', 'openai/imagegpt-large': '', } class __UpperCamelCase ( lowercase__ ): lowercase : Any = 'imagegpt' lowercase : List[str] = ['past_key_values'] lowercase : str = { 'hidden_size': 'n_embd', 'max_position_embeddings': 'n_positions', 'num_attention_heads': 'n_head', 'num_hidden_layers': 'n_layer', } def __init__( self :Tuple ,_UpperCamelCase :Optional[int]=5_1_2 + 1 ,_UpperCamelCase :Tuple=3_2 * 3_2 ,_UpperCamelCase :Dict=5_1_2 ,_UpperCamelCase :List[Any]=2_4 ,_UpperCamelCase :Any=8 ,_UpperCamelCase :int=None ,_UpperCamelCase :int="quick_gelu" ,_UpperCamelCase :List[str]=0.1 ,_UpperCamelCase :int=0.1 ,_UpperCamelCase :Optional[Any]=0.1 ,_UpperCamelCase :Any=1E-5 ,_UpperCamelCase :Optional[int]=0.02 ,_UpperCamelCase :Optional[Any]=True ,_UpperCamelCase :List[str]=True ,_UpperCamelCase :Dict=False ,_UpperCamelCase :int=False ,_UpperCamelCase :Optional[int]=False ,**_UpperCamelCase :Any ,): snake_case_ : List[str] = vocab_size snake_case_ : Optional[int] = n_positions snake_case_ : Any = n_embd snake_case_ : Optional[int] = n_layer snake_case_ : Optional[int] = n_head snake_case_ : Tuple = n_inner snake_case_ : List[str] = activation_function snake_case_ : str = resid_pdrop snake_case_ : Optional[int] = embd_pdrop snake_case_ : Optional[Any] = attn_pdrop snake_case_ : Tuple = layer_norm_epsilon snake_case_ : Optional[Any] = initializer_range snake_case_ : List[Any] = scale_attn_weights snake_case_ : Tuple = use_cache snake_case_ : Dict = scale_attn_by_inverse_layer_idx snake_case_ : List[str] = reorder_and_upcast_attn snake_case_ : str = tie_word_embeddings super().__init__(tie_word_embeddings=_UpperCamelCase ,**_UpperCamelCase ) class __UpperCamelCase ( lowercase__ ): @property def a__ ( self :int ): return OrderedDict( [ ("""input_ids""", {0: """batch""", 1: """sequence"""}), ] ) def a__ ( self :Tuple ,_UpperCamelCase :"FeatureExtractionMixin" ,_UpperCamelCase :int = 1 ,_UpperCamelCase :int = -1 ,_UpperCamelCase :bool = False ,_UpperCamelCase :Optional["TensorType"] = None ,_UpperCamelCase :int = 3 ,_UpperCamelCase :int = 3_2 ,_UpperCamelCase :int = 3_2 ,): snake_case_ : Dict = self._generate_dummy_images(_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ) snake_case_ : List[str] = dict(preprocessor(images=_UpperCamelCase ,return_tensors=_UpperCamelCase ) ) return inputs
352
'''simple docstring''' import argparse import json import logging import os import shutil import sys import tempfile import unittest from unittest import mock import torch from accelerate.utils import write_basic_config from transformers.testing_utils import TestCasePlus, get_gpu_count, run_command, slow, torch_device from transformers.utils import is_apex_available logging.basicConfig(level=logging.DEBUG) __A : int = logging.getLogger() def UpperCAmelCase ( ): '''simple docstring''' snake_case_ : List[Any] = argparse.ArgumentParser() parser.add_argument("""-f""" ) snake_case_ : int = parser.parse_args() return args.f def UpperCAmelCase ( lowerCamelCase_ :str ): '''simple docstring''' snake_case_ : Optional[Any] = {} snake_case_ : Optional[Any] = os.path.join(lowerCamelCase_ , """all_results.json""" ) if os.path.exists(lowerCamelCase_ ): with open(lowerCamelCase_ , """r""" ) as f: snake_case_ : str = json.load(lowerCamelCase_ ) else: raise ValueError(F'''can\'t find {path}''' ) return results def UpperCAmelCase ( ): '''simple docstring''' snake_case_ : List[str] = torch.cuda.is_available() and torch_device == """cuda""" return is_using_cuda and is_apex_available() __A : Any = logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) class __UpperCamelCase ( lowercase__ ): @classmethod def a__ ( cls :Dict ): # Write Accelerate config, will pick up on CPU, GPU, and multi-GPU snake_case_ : Optional[int] = tempfile.mkdtemp() snake_case_ : Any = os.path.join(cls.tmpdir ,"""default_config.yml""" ) write_basic_config(save_location=cls.configPath ) snake_case_ : List[Any] = ["""accelerate""", """launch""", """--config_file""", cls.configPath] @classmethod def a__ ( cls :int ): shutil.rmtree(cls.tmpdir ) @mock.patch.dict(os.environ ,{"""WANDB_MODE""": """offline"""} ) def a__ ( self :Optional[int] ): snake_case_ : List[Any] = self.get_auto_remove_tmp_dir() snake_case_ : List[str] = F''' {self.examples_dir}/pytorch/text-classification/run_glue_no_trainer.py --model_name_or_path distilbert-base-uncased --output_dir {tmp_dir} --train_file ./tests/fixtures/tests_samples/MRPC/train.csv --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --learning_rate=1e-4 --seed=42 --checkpointing_steps epoch --with_tracking '''.split() if is_cuda_and_apex_available(): testargs.append("""--fp16""" ) run_command(self._launch_args + testargs ) snake_case_ : Dict = get_results(_UpperCamelCase ) self.assertGreaterEqual(result["""eval_accuracy"""] ,0.75 ) self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase ,"""epoch_0""" ) ) ) self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase ,"""glue_no_trainer""" ) ) ) @mock.patch.dict(os.environ ,{"""WANDB_MODE""": """offline"""} ) def a__ ( self :Tuple ): snake_case_ : str = self.get_auto_remove_tmp_dir() snake_case_ : Tuple = F''' {self.examples_dir}/pytorch/language-modeling/run_clm_no_trainer.py --model_name_or_path distilgpt2 --train_file ./tests/fixtures/sample_text.txt --validation_file ./tests/fixtures/sample_text.txt --block_size 128 --per_device_train_batch_size 5 --per_device_eval_batch_size 5 --num_train_epochs 2 --output_dir {tmp_dir} --checkpointing_steps epoch --with_tracking '''.split() if torch.cuda.device_count() > 1: # Skipping because there are not enough batches to train the model + would need a drop_last to work. return run_command(self._launch_args + testargs ) snake_case_ : Optional[int] = get_results(_UpperCamelCase ) self.assertLess(result["""perplexity"""] ,1_0_0 ) self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase ,"""epoch_0""" ) ) ) self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase ,"""clm_no_trainer""" ) ) ) @mock.patch.dict(os.environ ,{"""WANDB_MODE""": """offline"""} ) def a__ ( self :Tuple ): snake_case_ : List[Any] = self.get_auto_remove_tmp_dir() snake_case_ : List[str] = F''' {self.examples_dir}/pytorch/language-modeling/run_mlm_no_trainer.py --model_name_or_path distilroberta-base --train_file ./tests/fixtures/sample_text.txt --validation_file ./tests/fixtures/sample_text.txt --output_dir {tmp_dir} --num_train_epochs=1 --checkpointing_steps epoch --with_tracking '''.split() run_command(self._launch_args + testargs ) snake_case_ : str = get_results(_UpperCamelCase ) self.assertLess(result["""perplexity"""] ,4_2 ) self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase ,"""epoch_0""" ) ) ) self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase ,"""mlm_no_trainer""" ) ) ) @mock.patch.dict(os.environ ,{"""WANDB_MODE""": """offline"""} ) def a__ ( self :List[Any] ): # with so little data distributed training needs more epochs to get the score on par with 0/1 gpu snake_case_ : Dict = 7 if get_gpu_count() > 1 else 2 snake_case_ : str = self.get_auto_remove_tmp_dir() snake_case_ : str = F''' {self.examples_dir}/pytorch/token-classification/run_ner_no_trainer.py --model_name_or_path bert-base-uncased --train_file tests/fixtures/tests_samples/conll/sample.json --validation_file tests/fixtures/tests_samples/conll/sample.json --output_dir {tmp_dir} --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=2 --num_train_epochs={epochs} --seed 7 --checkpointing_steps epoch --with_tracking '''.split() run_command(self._launch_args + testargs ) snake_case_ : Optional[int] = get_results(_UpperCamelCase ) self.assertGreaterEqual(result["""eval_accuracy"""] ,0.75 ) self.assertLess(result["""train_loss"""] ,0.5 ) self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase ,"""epoch_0""" ) ) ) self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase ,"""ner_no_trainer""" ) ) ) @unittest.skip(reason="""Fix me @muellerzr""" ) @mock.patch.dict(os.environ ,{"""WANDB_MODE""": """offline"""} ) def a__ ( self :List[str] ): snake_case_ : List[Any] = self.get_auto_remove_tmp_dir() snake_case_ : Optional[int] = F''' {self.examples_dir}/pytorch/question-answering/run_qa_no_trainer.py --model_name_or_path bert-base-uncased --version_2_with_negative --train_file tests/fixtures/tests_samples/SQUAD/sample.json --validation_file tests/fixtures/tests_samples/SQUAD/sample.json --output_dir {tmp_dir} --seed=42 --max_train_steps=10 --num_warmup_steps=2 --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --checkpointing_steps epoch --with_tracking '''.split() run_command(self._launch_args + testargs ) snake_case_ : str = get_results(_UpperCamelCase ) # Because we use --version_2_with_negative the testing script uses SQuAD v2 metrics. self.assertGreaterEqual(result["""eval_f1"""] ,2_8 ) self.assertGreaterEqual(result["""eval_exact"""] ,2_8 ) self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase ,"""epoch_0""" ) ) ) self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase ,"""qa_no_trainer""" ) ) ) @mock.patch.dict(os.environ ,{"""WANDB_MODE""": """offline"""} ) def a__ ( self :List[Any] ): snake_case_ : str = self.get_auto_remove_tmp_dir() snake_case_ : Union[str, Any] = F''' {self.examples_dir}/pytorch/multiple-choice/run_swag_no_trainer.py --model_name_or_path bert-base-uncased --train_file tests/fixtures/tests_samples/swag/sample.json --validation_file tests/fixtures/tests_samples/swag/sample.json --output_dir {tmp_dir} --max_train_steps=20 --num_warmup_steps=2 --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --with_tracking '''.split() run_command(self._launch_args + testargs ) snake_case_ : Union[str, Any] = get_results(_UpperCamelCase ) self.assertGreaterEqual(result["""eval_accuracy"""] ,0.8 ) self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase ,"""swag_no_trainer""" ) ) ) @slow @mock.patch.dict(os.environ ,{"""WANDB_MODE""": """offline"""} ) def a__ ( self :int ): snake_case_ : List[Any] = self.get_auto_remove_tmp_dir() snake_case_ : List[Any] = F''' {self.examples_dir}/pytorch/summarization/run_summarization_no_trainer.py --model_name_or_path t5-small --train_file tests/fixtures/tests_samples/xsum/sample.json --validation_file tests/fixtures/tests_samples/xsum/sample.json --output_dir {tmp_dir} --max_train_steps=50 --num_warmup_steps=8 --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --checkpointing_steps epoch --with_tracking '''.split() run_command(self._launch_args + testargs ) snake_case_ : int = get_results(_UpperCamelCase ) self.assertGreaterEqual(result["""eval_rouge1"""] ,1_0 ) self.assertGreaterEqual(result["""eval_rouge2"""] ,2 ) self.assertGreaterEqual(result["""eval_rougeL"""] ,7 ) self.assertGreaterEqual(result["""eval_rougeLsum"""] ,7 ) self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase ,"""epoch_0""" ) ) ) self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase ,"""summarization_no_trainer""" ) ) ) @slow @mock.patch.dict(os.environ ,{"""WANDB_MODE""": """offline"""} ) def a__ ( self :int ): snake_case_ : Tuple = self.get_auto_remove_tmp_dir() snake_case_ : Optional[Any] = F''' {self.examples_dir}/pytorch/translation/run_translation_no_trainer.py --model_name_or_path sshleifer/student_marian_en_ro_6_1 --source_lang en --target_lang ro --train_file tests/fixtures/tests_samples/wmt16/sample.json --validation_file tests/fixtures/tests_samples/wmt16/sample.json --output_dir {tmp_dir} --max_train_steps=50 --num_warmup_steps=8 --num_beams=6 --learning_rate=3e-3 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --source_lang en_XX --target_lang ro_RO --checkpointing_steps epoch --with_tracking '''.split() run_command(self._launch_args + testargs ) snake_case_ : Any = get_results(_UpperCamelCase ) self.assertGreaterEqual(result["""eval_bleu"""] ,3_0 ) self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase ,"""epoch_0""" ) ) ) self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase ,"""translation_no_trainer""" ) ) ) @slow def a__ ( self :Optional[Any] ): snake_case_ : List[str] = logging.StreamHandler(sys.stdout ) logger.addHandler(_UpperCamelCase ) snake_case_ : Dict = self.get_auto_remove_tmp_dir() snake_case_ : Tuple = F''' {self.examples_dir}/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py --dataset_name huggingface/semantic-segmentation-test-sample --output_dir {tmp_dir} --max_train_steps=10 --num_warmup_steps=2 --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --checkpointing_steps epoch '''.split() run_command(self._launch_args + testargs ) snake_case_ : str = get_results(_UpperCamelCase ) self.assertGreaterEqual(result["""eval_overall_accuracy"""] ,0.10 ) @mock.patch.dict(os.environ ,{"""WANDB_MODE""": """offline"""} ) def a__ ( self :Any ): snake_case_ : Dict = self.get_auto_remove_tmp_dir() snake_case_ : Tuple = F''' {self.examples_dir}/pytorch/image-classification/run_image_classification_no_trainer.py --model_name_or_path google/vit-base-patch16-224-in21k --dataset_name hf-internal-testing/cats_vs_dogs_sample --learning_rate 1e-4 --per_device_train_batch_size 2 --per_device_eval_batch_size 1 --max_train_steps 2 --train_val_split 0.1 --seed 42 --output_dir {tmp_dir} --with_tracking --checkpointing_steps 1 '''.split() if is_cuda_and_apex_available(): testargs.append("""--fp16""" ) run_command(self._launch_args + testargs ) snake_case_ : str = get_results(_UpperCamelCase ) # The base model scores a 25% self.assertGreaterEqual(result["""eval_accuracy"""] ,0.6 ) self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase ,"""step_1""" ) ) ) self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase ,"""image_classification_no_trainer""" ) ) )
8
0
'''simple docstring''' import argparse import json import requests import timm import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import AutoImageProcessor, SwinConfig, SwinForImageClassification def UpperCAmelCase ( lowerCamelCase_ :List[str] ): '''simple docstring''' snake_case_ : List[Any] = SwinConfig() snake_case_ : Union[str, Any] = swin_name.split("""_""" ) snake_case_ : Tuple = name_split[1] snake_case_ : int = int(name_split[4] ) snake_case_ : Optional[Any] = int(name_split[3][-1] ) if model_size == "tiny": snake_case_ : Optional[Any] = 96 snake_case_ : Tuple = (2, 2, 6, 2) snake_case_ : str = (3, 6, 12, 24) elif model_size == "small": snake_case_ : Optional[Any] = 96 snake_case_ : Optional[Any] = (2, 2, 18, 2) snake_case_ : Any = (3, 6, 12, 24) elif model_size == "base": snake_case_ : List[Any] = 1_28 snake_case_ : str = (2, 2, 18, 2) snake_case_ : List[str] = (4, 8, 16, 32) else: snake_case_ : Union[str, Any] = 1_92 snake_case_ : Optional[int] = (2, 2, 18, 2) snake_case_ : Optional[Any] = (6, 12, 24, 48) if "in22k" in swin_name: snake_case_ : List[str] = 2_18_41 else: snake_case_ : Any = 10_00 snake_case_ : Optional[Any] = """huggingface/label-files""" snake_case_ : Optional[Any] = """imagenet-1k-id2label.json""" snake_case_ : Tuple = json.load(open(hf_hub_download(lowerCamelCase_ , lowerCamelCase_ , repo_type="""dataset""" ) , """r""" ) ) snake_case_ : str = {int(lowerCamelCase_ ): v for k, v in idalabel.items()} snake_case_ : Dict = idalabel snake_case_ : int = {v: k for k, v in idalabel.items()} snake_case_ : Union[str, Any] = img_size snake_case_ : Union[str, Any] = num_classes snake_case_ : List[Any] = embed_dim snake_case_ : str = depths snake_case_ : int = num_heads snake_case_ : List[str] = window_size return config def UpperCAmelCase ( lowerCamelCase_ :Any ): '''simple docstring''' if "patch_embed.proj" in name: snake_case_ : Union[str, Any] = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" ) if "patch_embed.norm" in name: snake_case_ : Tuple = name.replace("""patch_embed.norm""" , """embeddings.norm""" ) if "layers" in name: snake_case_ : str = """encoder.""" + name if "attn.proj" in name: snake_case_ : List[str] = name.replace("""attn.proj""" , """attention.output.dense""" ) if "attn" in name: snake_case_ : Tuple = name.replace("""attn""" , """attention.self""" ) if "norm1" in name: snake_case_ : Dict = name.replace("""norm1""" , """layernorm_before""" ) if "norm2" in name: snake_case_ : Dict = name.replace("""norm2""" , """layernorm_after""" ) if "mlp.fc1" in name: snake_case_ : Optional[int] = name.replace("""mlp.fc1""" , """intermediate.dense""" ) if "mlp.fc2" in name: snake_case_ : int = name.replace("""mlp.fc2""" , """output.dense""" ) if name == "norm.weight": snake_case_ : List[str] = """layernorm.weight""" if name == "norm.bias": snake_case_ : Any = """layernorm.bias""" if "head" in name: snake_case_ : List[Any] = name.replace("""head""" , """classifier""" ) else: snake_case_ : str = """swin.""" + name return name def UpperCAmelCase ( lowerCamelCase_ :str , lowerCamelCase_ :str ): '''simple docstring''' for key in orig_state_dict.copy().keys(): snake_case_ : Optional[int] = orig_state_dict.pop(lowerCamelCase_ ) if "mask" in key: continue elif "qkv" in key: snake_case_ : Union[str, Any] = key.split(""".""" ) snake_case_ : str = int(key_split[1] ) snake_case_ : Optional[int] = int(key_split[3] ) snake_case_ : List[Any] = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size if "weight" in key: snake_case_ : List[Any] = val[:dim, :] snake_case_ : List[str] = val[ dim : dim * 2, : ] snake_case_ : int = val[-dim:, :] else: snake_case_ : Dict = val[ :dim ] snake_case_ : List[str] = val[ dim : dim * 2 ] snake_case_ : str = val[ -dim: ] else: snake_case_ : Optional[Any] = val return orig_state_dict def UpperCAmelCase ( lowerCamelCase_ :List[Any] , lowerCamelCase_ :List[str] ): '''simple docstring''' snake_case_ : int = timm.create_model(lowerCamelCase_ , pretrained=lowerCamelCase_ ) timm_model.eval() snake_case_ : Any = get_swin_config(lowerCamelCase_ ) snake_case_ : Any = SwinForImageClassification(lowerCamelCase_ ) model.eval() snake_case_ : Dict = convert_state_dict(timm_model.state_dict() , lowerCamelCase_ ) model.load_state_dict(lowerCamelCase_ ) snake_case_ : List[str] = """http://images.cocodataset.org/val2017/000000039769.jpg""" snake_case_ : str = AutoImageProcessor.from_pretrained("""microsoft/{}""".format(swin_name.replace("""_""" , """-""" ) ) ) snake_case_ : int = Image.open(requests.get(lowerCamelCase_ , stream=lowerCamelCase_ ).raw ) snake_case_ : Any = image_processor(images=lowerCamelCase_ , return_tensors="""pt""" ) snake_case_ : List[str] = timm_model(inputs["""pixel_values"""] ) snake_case_ : List[str] = model(**lowerCamelCase_ ).logits assert torch.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1E-3 ) print(F'''Saving model {swin_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(lowerCamelCase_ ) print(F'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(lowerCamelCase_ ) if __name__ == "__main__": __A : List[str] = argparse.ArgumentParser() # Required parameters parser.add_argument( '--swin_name', default='swin_tiny_patch4_window7_224', type=str, help='Name of the Swin timm model you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.' ) __A : int = parser.parse_args() convert_swin_checkpoint(args.swin_name, args.pytorch_dump_folder_path)
353
'''simple docstring''' from typing import List, Optional, Union import numpy as np from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import PaddingStrategy, TensorType, logging __A : Tuple = logging.get_logger(__name__) class __UpperCamelCase ( lowercase__ ): lowercase : str = ['input_values', 'padding_mask'] def __init__( self :Optional[int] ,_UpperCamelCase :int = 1 ,_UpperCamelCase :int = 2_4_0_0_0 ,_UpperCamelCase :float = 0.0 ,_UpperCamelCase :float = None ,_UpperCamelCase :float = None ,**_UpperCamelCase :List[Any] ,): super().__init__(feature_size=_UpperCamelCase ,sampling_rate=_UpperCamelCase ,padding_value=_UpperCamelCase ,**_UpperCamelCase ) snake_case_ : Dict = chunk_length_s snake_case_ : str = overlap @property def a__ ( self :Any ): if self.chunk_length_s is None: return None else: return int(self.chunk_length_s * self.sampling_rate ) @property def a__ ( self :List[str] ): if self.chunk_length_s is None or self.overlap is None: return None else: return max(1 ,int((1.0 - self.overlap) * self.chunk_length ) ) def __call__( self :Optional[Any] ,_UpperCamelCase :Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] ,_UpperCamelCase :Optional[Union[bool, str, PaddingStrategy]] = None ,_UpperCamelCase :Optional[bool] = False ,_UpperCamelCase :Optional[int] = None ,_UpperCamelCase :Optional[Union[str, TensorType]] = None ,_UpperCamelCase :Optional[int] = None ,): if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( F'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of''' F''' {self.sampling_rate}. Please make sure that the provided audio input was sampled with''' F''' {self.sampling_rate} and not {sampling_rate}.''' ) else: logger.warning( """It is strongly recommended to pass the `sampling_rate` argument to this function. """ """Failing to do so can result in silent errors that might be hard to debug.""" ) if padding and truncation: raise ValueError("""Both padding and truncation were set. Make sure you only set one.""" ) elif padding is None: # by default let's pad the inputs snake_case_ : Tuple = True snake_case_ : str = bool( isinstance(_UpperCamelCase ,(list, tuple) ) and (isinstance(raw_audio[0] ,(np.ndarray, tuple, list) )) ) if is_batched: snake_case_ : Any = [np.asarray(_UpperCamelCase ,dtype=np.floataa ).T for audio in raw_audio] elif not is_batched and not isinstance(_UpperCamelCase ,np.ndarray ): snake_case_ : Optional[int] = np.asarray(_UpperCamelCase ,dtype=np.floataa ) elif isinstance(_UpperCamelCase ,np.ndarray ) and raw_audio.dtype is np.dtype(np.floataa ): snake_case_ : List[str] = raw_audio.astype(np.floataa ) # always return batch if not is_batched: snake_case_ : Optional[Any] = [np.asarray(_UpperCamelCase ).T] # verify inputs are valid for idx, example in enumerate(_UpperCamelCase ): if example.ndim > 2: raise ValueError(F'''Expected input shape (channels, length) but got shape {example.shape}''' ) if self.feature_size == 1 and example.ndim != 1: raise ValueError(F'''Expected mono audio but example has {example.shape[-1]} channels''' ) if self.feature_size == 2 and example.shape[-1] != 2: raise ValueError(F'''Expected stereo audio but example has {example.shape[-1]} channels''' ) snake_case_ : Tuple = None snake_case_ : Optional[Any] = BatchFeature({"""input_values""": raw_audio} ) if self.chunk_stride is not None and self.chunk_length is not None and max_length is None: if truncation: snake_case_ : Union[str, Any] = min(array.shape[0] for array in raw_audio ) snake_case_ : Dict = int(np.floor(max_length / self.chunk_stride ) ) snake_case_ : Union[str, Any] = (nb_step - 1) * self.chunk_stride + self.chunk_length elif padding: snake_case_ : Any = max(array.shape[0] for array in raw_audio ) snake_case_ : List[Any] = int(np.ceil(max_length / self.chunk_stride ) ) snake_case_ : Any = (nb_step - 1) * self.chunk_stride + self.chunk_length snake_case_ : Union[str, Any] = """max_length""" else: snake_case_ : int = input_values # normal padding on batch if padded_inputs is None: snake_case_ : Optional[int] = self.pad( _UpperCamelCase ,max_length=_UpperCamelCase ,truncation=_UpperCamelCase ,padding=_UpperCamelCase ,return_attention_mask=_UpperCamelCase ,) if padding: snake_case_ : Tuple = padded_inputs.pop("""attention_mask""" ) snake_case_ : Optional[int] = [] for example in padded_inputs.pop("""input_values""" ): if self.feature_size == 1: snake_case_ : Dict = example[..., None] input_values.append(example.T ) snake_case_ : List[Any] = input_values if return_tensors is not None: snake_case_ : Tuple = padded_inputs.convert_to_tensors(_UpperCamelCase ) return padded_inputs
8
0
'''simple docstring''' import argparse import logging import os import time import timeit import datasets import numpy as np import pycuda.autoinit # noqa: F401 import pycuda.driver as cuda import tensorrt as trt import torch from absl import logging as absl_logging from accelerate import Accelerator from datasets import load_dataset, load_metric from torch.utils.data import DataLoader from utils_qa import postprocess_qa_predictions import transformers from transformers import AutoTokenizer, EvalPrediction, default_data_collator, set_seed from transformers.trainer_pt_utils import nested_concat, nested_truncate __A : Any = trt.Logger(trt.Logger.WARNING) __A : List[Any] = absl_logging.get_absl_logger() absl_logger.setLevel(logging.WARNING) __A : str = logging.getLogger(__name__) __A : Any = argparse.ArgumentParser() # Required parameters parser.add_argument( '--onnx_model_path', default=None, type=str, required=True, help='Path to ONNX model: ', ) parser.add_argument( '--output_dir', default=None, type=str, required=True, help='The output directory where the model checkpoints and predictions will be written.', ) # Other parameters parser.add_argument( '--tokenizer_name', default='', type=str, required=True, help='Pretrained tokenizer name or path if not the same as model_name', ) parser.add_argument( '--version_2_with_negative', action='store_true', help='If true, the SQuAD examples contain some that do not have an answer.', ) parser.add_argument( '--null_score_diff_threshold', type=float, default=0.0, help='If null_score - best_non_null is greater than the threshold predict null.', ) parser.add_argument( '--max_seq_length', default=384, type=int, help=( 'The maximum total input sequence length after WordPiece tokenization. Sequences ' 'longer than this will be truncated, and sequences shorter than this will be padded.' ), ) parser.add_argument( '--doc_stride', default=128, type=int, help='When splitting up a long document into chunks, how much stride to take between chunks.', ) parser.add_argument('--per_device_eval_batch_size', default=8, type=int, help='Batch size per GPU/CPU for evaluation.') parser.add_argument( '--n_best_size', default=20, type=int, help='The total number of n-best predictions to generate in the nbest_predictions.json output file.', ) parser.add_argument( '--max_answer_length', default=30, type=int, help=( 'The maximum length of an answer that can be generated. This is needed because the start ' 'and end predictions are not conditioned on one another.' ), ) parser.add_argument('--seed', type=int, default=42, help='random seed for initialization') parser.add_argument( '--dataset_name', type=str, default=None, required=True, help='The name of the dataset to use (via the datasets library).', ) parser.add_argument( '--dataset_config_name', type=str, default=None, help='The configuration name of the dataset to use (via the datasets library).', ) parser.add_argument( '--preprocessing_num_workers', type=int, default=4, help='A csv or a json file containing the training data.' ) parser.add_argument('--overwrite_cache', action='store_true', help='Overwrite the cached training and evaluation sets') parser.add_argument( '--fp16', action='store_true', help='Whether to use 16-bit (mixed) precision instead of 32-bit', ) parser.add_argument( '--int8', action='store_true', help='Whether to use INT8', ) __A : List[str] = parser.parse_args() if args.tokenizer_name: __A : List[str] = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=True) else: raise ValueError( 'You are instantiating a new tokenizer from scratch. This is not supported by this script.' 'You can do it from another script, save it, and load it from here, using --tokenizer_name.' ) logger.info('Training/evaluation parameters %s', args) __A : Union[str, Any] = args.per_device_eval_batch_size __A : Any = (args.eval_batch_size, args.max_seq_length) # TRT Engine properties __A : List[Any] = True __A : Dict = 'temp_engine/bert-fp32.engine' if args.fpaa: __A : Any = 'temp_engine/bert-fp16.engine' if args.inta: __A : Union[str, Any] = 'temp_engine/bert-int8.engine' # import ONNX file if not os.path.exists('temp_engine'): os.makedirs('temp_engine') __A : List[str] = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH) with trt.Builder(TRT_LOGGER) as builder, builder.create_network(EXPLICIT_BATCH) as network, trt.OnnxParser( network, TRT_LOGGER ) as parser: with open(args.onnx_model_path, 'rb') as model: if not parser.parse(model.read()): for error in range(parser.num_errors): print(parser.get_error(error)) # Query input names and shapes from parsed TensorRT network __A : int = [network.get_input(i) for i in range(network.num_inputs)] __A : Optional[Any] = [_input.name for _input in network_inputs] # ex: ["actual_input1"] with builder.create_builder_config() as config: __A : Tuple = 1 << 50 if STRICT_TYPES: config.set_flag(trt.BuilderFlag.STRICT_TYPES) if args.fpaa: config.set_flag(trt.BuilderFlag.FPaa) if args.inta: config.set_flag(trt.BuilderFlag.INTa) __A : List[Any] = builder.create_optimization_profile() config.add_optimization_profile(profile) for i in range(len(input_names)): profile.set_shape(input_names[i], INPUT_SHAPE, INPUT_SHAPE, INPUT_SHAPE) __A : str = builder.build_engine(network, config) # serialize_engine and store in file (can be directly loaded and deserialized): with open(engine_name, 'wb') as f: f.write(engine.serialize()) def UpperCAmelCase ( lowerCamelCase_ :Tuple , lowerCamelCase_ :List[str] , lowerCamelCase_ :List[Any] , lowerCamelCase_ :str , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :str , lowerCamelCase_ :List[str] , lowerCamelCase_ :List[str] ): '''simple docstring''' snake_case_ : Tuple = np.asarray(inputs["""input_ids"""] , dtype=np.intaa ) snake_case_ : List[str] = np.asarray(inputs["""attention_mask"""] , dtype=np.intaa ) snake_case_ : Optional[Any] = np.asarray(inputs["""token_type_ids"""] , dtype=np.intaa ) # Copy inputs cuda.memcpy_htod_async(d_inputs[0] , input_ids.ravel() , lowerCamelCase_ ) cuda.memcpy_htod_async(d_inputs[1] , attention_mask.ravel() , lowerCamelCase_ ) cuda.memcpy_htod_async(d_inputs[2] , token_type_ids.ravel() , lowerCamelCase_ ) # start time snake_case_ : int = time.time() # Run inference context.execute_async( bindings=[int(lowerCamelCase_ ) for d_inp in d_inputs] + [int(lowerCamelCase_ ), int(lowerCamelCase_ )] , stream_handle=stream.handle ) # Transfer predictions back from GPU cuda.memcpy_dtoh_async(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) cuda.memcpy_dtoh_async(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) # Synchronize the stream and take time stream.synchronize() # end time snake_case_ : Dict = time.time() snake_case_ : List[Any] = end_time - start_time snake_case_ : Tuple = (h_outputa, h_outputa) # print(outputs) return outputs, infer_time # Initialize the accelerator. We will let the accelerator handle device placement for us in this example. __A : int = Accelerator() # Make one log on every process with the configuration for debugging. logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO, ) # Setup logging, we only want one process per machine to log things on the screen. # accelerator.is_local_main_process is only True for one process per machine. logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR) if accelerator.is_local_main_process: datasets.utils.logging.set_verbosity_warning() transformers.utils.logging.set_verbosity_info() else: datasets.utils.logging.set_verbosity_error() transformers.utils.logging.set_verbosity_error() # If passed along, set the training seed now. if args.seed is not None: set_seed(args.seed) # Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below) # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/ # (the dataset will be downloaded automatically from the datasets Hub). # # For CSV/JSON files, this script will use the column called 'text' or the first column if no column called # 'text' is found. You can easily tweak this behavior (see below). if args.dataset_name is not None: # Downloading and loading a dataset from the hub. __A : Dict = load_dataset(args.dataset_name, args.dataset_config_name) else: raise ValueError('Evaluation requires a dataset name') # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at # https://huggingface.co/docs/datasets/loading_datasets.html. # Preprocessing the datasets. # Preprocessing is slighlty different for training and evaluation. __A : str = raw_datasets['validation'].column_names __A : Optional[Any] = 'question' if 'question' in column_names else column_names[0] __A : Any = 'context' if 'context' in column_names else column_names[1] __A : str = 'answers' if 'answers' in column_names else column_names[2] # Padding side determines if we do (question|context) or (context|question). __A : Optional[Any] = tokenizer.padding_side == 'right' if args.max_seq_length > tokenizer.model_max_length: logger.warning( F'The max_seq_length passed ({args.max_seq_length}) is larger than the maximum length for the' F'model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.' ) __A : List[Any] = min(args.max_seq_length, tokenizer.model_max_length) def UpperCAmelCase ( lowerCamelCase_ :Tuple ): '''simple docstring''' snake_case_ : Union[str, Any] = [q.lstrip() for q in examples[question_column_name]] # Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results # in one example possible giving several features when a context is long, each of those features having a # context that overlaps a bit the context of the previous feature. snake_case_ : List[str] = tokenizer( examples[question_column_name if pad_on_right else context_column_name] , examples[context_column_name if pad_on_right else question_column_name] , truncation="""only_second""" if pad_on_right else """only_first""" , max_length=lowerCamelCase_ , stride=args.doc_stride , return_overflowing_tokens=lowerCamelCase_ , return_offsets_mapping=lowerCamelCase_ , padding="""max_length""" , ) # Since one example might give us several features if it has a long context, we need a map from a feature to # its corresponding example. This key gives us just that. snake_case_ : List[str] = tokenized_examples.pop("""overflow_to_sample_mapping""" ) # For evaluation, we will need to convert our predictions to substrings of the context, so we keep the # corresponding example_id and we will store the offset mappings. snake_case_ : Dict = [] for i in range(len(tokenized_examples["""input_ids"""] ) ): # Grab the sequence corresponding to that example (to know what is the context and what is the question). snake_case_ : int = tokenized_examples.sequence_ids(lowerCamelCase_ ) snake_case_ : Any = 1 if pad_on_right else 0 # One example can give several spans, this is the index of the example containing this span of text. snake_case_ : Union[str, Any] = sample_mapping[i] tokenized_examples["example_id"].append(examples["""id"""][sample_index] ) # Set to None the offset_mapping that are not part of the context so it's easy to determine if a token # position is part of the context or not. snake_case_ : Optional[int] = [ (o if sequence_ids[k] == context_index else None) for k, o in enumerate(tokenized_examples["""offset_mapping"""][i] ) ] return tokenized_examples __A : List[Any] = raw_datasets['validation'] # Validation Feature Creation __A : Tuple = eval_examples.map( prepare_validation_features, batched=True, num_proc=args.preprocessing_num_workers, remove_columns=column_names, load_from_cache_file=not args.overwrite_cache, desc='Running tokenizer on validation dataset', ) __A : Dict = default_data_collator __A : List[str] = eval_dataset.remove_columns(['example_id', 'offset_mapping']) __A : Optional[int] = DataLoader( eval_dataset_for_model, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size ) def UpperCAmelCase ( lowerCamelCase_ :Tuple , lowerCamelCase_ :Any , lowerCamelCase_ :Dict , lowerCamelCase_ :Any="eval" ): '''simple docstring''' snake_case_ : int = postprocess_qa_predictions( examples=lowerCamelCase_ , features=lowerCamelCase_ , predictions=lowerCamelCase_ , version_2_with_negative=args.version_2_with_negative , n_best_size=args.n_best_size , max_answer_length=args.max_answer_length , null_score_diff_threshold=args.null_score_diff_threshold , output_dir=args.output_dir , prefix=lowerCamelCase_ , ) # Format the result to the format the metric expects. if args.version_2_with_negative: snake_case_ : Any = [ {"""id""": k, """prediction_text""": v, """no_answer_probability""": 0.0} for k, v in predictions.items() ] else: snake_case_ : Union[str, Any] = [{"""id""": k, """prediction_text""": v} for k, v in predictions.items()] snake_case_ : Any = [{"""id""": ex["""id"""], """answers""": ex[answer_column_name]} for ex in examples] return EvalPrediction(predictions=lowerCamelCase_ , label_ids=lowerCamelCase_ ) __A : Any = load_metric('squad_v2' if args.version_2_with_negative else 'squad') # Evaluation! logger.info('Loading ONNX model %s for evaluation', args.onnx_model_path) with open(engine_name, 'rb') as f, trt.Runtime(TRT_LOGGER) as runtime, runtime.deserialize_cuda_engine( f.read() ) as engine, engine.create_execution_context() as context: # setup for TRT inferrence for i in range(len(input_names)): context.set_binding_shape(i, INPUT_SHAPE) assert context.all_binding_shapes_specified def UpperCAmelCase ( lowerCamelCase_ :Optional[Any] ): '''simple docstring''' return trt.volume(engine.get_binding_shape(lowerCamelCase_ ) ) * engine.get_binding_dtype(lowerCamelCase_ ).itemsize # Allocate device memory for inputs and outputs. __A : Optional[Any] = [cuda.mem_alloc(binding_nbytes(binding)) for binding in engine if engine.binding_is_input(binding)] # Allocate output buffer __A : Optional[int] = cuda.pagelocked_empty(tuple(context.get_binding_shape(3)), dtype=np.floataa) __A : Union[str, Any] = cuda.pagelocked_empty(tuple(context.get_binding_shape(4)), dtype=np.floataa) __A : Optional[int] = cuda.mem_alloc(h_outputa.nbytes) __A : Union[str, Any] = cuda.mem_alloc(h_outputa.nbytes) # Create a stream in which to copy inputs/outputs and run inference. __A : Any = cuda.Stream() # Evaluation logger.info('***** Running Evaluation *****') logger.info(F' Num examples = {len(eval_dataset)}') logger.info(F' Batch size = {args.per_device_eval_batch_size}') __A : Union[str, Any] = 0.0 __A : Tuple = 0 __A : str = timeit.default_timer() __A : Any = None for step, batch in enumerate(eval_dataloader): __A : Any = model_infer(batch, context, d_inputs, h_outputa, h_outputa, d_outputa, d_outputa, stream) total_time += infer_time niter += 1 __A : Dict = outputs __A : Union[str, Any] = torch.tensor(start_logits) __A : Optional[Any] = torch.tensor(end_logits) # necessary to pad predictions and labels for being gathered __A : Any = accelerator.pad_across_processes(start_logits, dim=1, pad_index=-100) __A : List[Any] = accelerator.pad_across_processes(end_logits, dim=1, pad_index=-100) __A : Optional[int] = (accelerator.gather(start_logits).cpu().numpy(), accelerator.gather(end_logits).cpu().numpy()) __A : List[Any] = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-100) if all_preds is not None: __A : str = nested_truncate(all_preds, len(eval_dataset)) __A : int = timeit.default_timer() - start_time logger.info(' Evaluation done in total %f secs (%f sec per example)', evalTime, evalTime / len(eval_dataset)) # Inference time from TRT logger.info('Average Inference Time = {:.3f} ms'.format(total_time * 1_000 / niter)) logger.info('Total Inference Time = {:.3f} ms'.format(total_time * 1_000)) logger.info('Total Number of Inference = %d', niter) __A : Any = post_processing_function(eval_examples, eval_dataset, all_preds) __A : Union[str, Any] = metric.compute(predictions=prediction.predictions, references=prediction.label_ids) logger.info(F'Evaluation metrics: {eval_metric}')
354
'''simple docstring''' from __future__ import annotations from typing import Dict from ...configuration_utils import PretrainedConfig __A : Dict = { 'susnato/ernie-m-base_pytorch': 'https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/config.json', 'susnato/ernie-m-large_pytorch': 'https://huggingface.co/susnato/ernie-m-large_pytorch/blob/main/config.json', } class __UpperCamelCase ( lowercase__ ): lowercase : Optional[int] = 'ernie_m' lowercase : Dict[str, str] = {"dropout": "classifier_dropout", "num_classes": "num_labels"} def __init__( self :Optional[Any] ,_UpperCamelCase :int = 2_5_0_0_0_2 ,_UpperCamelCase :int = 7_6_8 ,_UpperCamelCase :int = 1_2 ,_UpperCamelCase :int = 1_2 ,_UpperCamelCase :int = 3_0_7_2 ,_UpperCamelCase :str = "gelu" ,_UpperCamelCase :float = 0.1 ,_UpperCamelCase :float = 0.1 ,_UpperCamelCase :int = 5_1_4 ,_UpperCamelCase :float = 0.02 ,_UpperCamelCase :int = 1 ,_UpperCamelCase :float = 1E-0_5 ,_UpperCamelCase :List[Any]=None ,_UpperCamelCase :List[str]=False ,_UpperCamelCase :Optional[int]=0.0 ,**_UpperCamelCase :List[Any] ,): super().__init__(pad_token_id=_UpperCamelCase ,**_UpperCamelCase ) snake_case_ : Optional[int] = vocab_size snake_case_ : Any = hidden_size snake_case_ : Union[str, Any] = num_hidden_layers snake_case_ : Union[str, Any] = num_attention_heads snake_case_ : Any = intermediate_size snake_case_ : Any = hidden_act snake_case_ : Tuple = hidden_dropout_prob snake_case_ : Union[str, Any] = attention_probs_dropout_prob snake_case_ : str = max_position_embeddings snake_case_ : int = initializer_range snake_case_ : Optional[Any] = layer_norm_eps snake_case_ : Union[str, Any] = classifier_dropout snake_case_ : Tuple = is_decoder snake_case_ : int = act_dropout
8
0
'''simple docstring''' def UpperCAmelCase ( lowerCamelCase_ :Dict , lowerCamelCase_ :Tuple , lowerCamelCase_ :Any , lowerCamelCase_ :Union[str, Any] ): '''simple docstring''' if height >= 1: move_tower(height - 1 , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) move_disk(lowerCamelCase_ , lowerCamelCase_ ) move_tower(height - 1 , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) def UpperCAmelCase ( lowerCamelCase_ :Optional[Any] , lowerCamelCase_ :List[Any] ): '''simple docstring''' print("""moving disk from""" , lowerCamelCase_ , """to""" , lowerCamelCase_ ) def UpperCAmelCase ( ): '''simple docstring''' snake_case_ : str = int(input("""Height of hanoi: """ ).strip() ) move_tower(lowerCamelCase_ , """A""" , """B""" , """C""" ) if __name__ == "__main__": main()
355
'''simple docstring''' from typing import Any, Dict, Optional import torch import torch.nn.functional as F from torch import nn from ..utils import maybe_allow_in_graph from .activations import get_activation from .attention_processor import Attention from .embeddings import CombinedTimestepLabelEmbeddings @maybe_allow_in_graph class __UpperCamelCase ( nn.Module ): def __init__( self :Any ,_UpperCamelCase :int ,_UpperCamelCase :int ,_UpperCamelCase :int ,_UpperCamelCase :int=0.0 ,_UpperCamelCase :Optional[int] = None ,_UpperCamelCase :str = "geglu" ,_UpperCamelCase :Optional[int] = None ,_UpperCamelCase :bool = False ,_UpperCamelCase :bool = False ,_UpperCamelCase :bool = False ,_UpperCamelCase :bool = False ,_UpperCamelCase :bool = True ,_UpperCamelCase :str = "layer_norm" ,_UpperCamelCase :bool = False ,): super().__init__() snake_case_ : Any = only_cross_attention snake_case_ : Union[str, Any] = (num_embeds_ada_norm is not None) and norm_type == """ada_norm_zero""" snake_case_ : Any = (num_embeds_ada_norm is not None) and norm_type == """ada_norm""" if norm_type in ("ada_norm", "ada_norm_zero") and num_embeds_ada_norm is None: raise ValueError( F'''`norm_type` is set to {norm_type}, but `num_embeds_ada_norm` is not defined. Please make sure to''' F''' define `num_embeds_ada_norm` if setting `norm_type` to {norm_type}.''' ) # Define 3 blocks. Each block has its own normalization layer. # 1. Self-Attn if self.use_ada_layer_norm: snake_case_ : Dict = AdaLayerNorm(_UpperCamelCase ,_UpperCamelCase ) elif self.use_ada_layer_norm_zero: snake_case_ : str = AdaLayerNormZero(_UpperCamelCase ,_UpperCamelCase ) else: snake_case_ : List[Any] = nn.LayerNorm(_UpperCamelCase ,elementwise_affine=_UpperCamelCase ) snake_case_ : List[str] = Attention( query_dim=_UpperCamelCase ,heads=_UpperCamelCase ,dim_head=_UpperCamelCase ,dropout=_UpperCamelCase ,bias=_UpperCamelCase ,cross_attention_dim=cross_attention_dim if only_cross_attention else None ,upcast_attention=_UpperCamelCase ,) # 2. Cross-Attn if cross_attention_dim is not None or double_self_attention: # We currently only use AdaLayerNormZero for self attention where there will only be one attention block. # I.e. the number of returned modulation chunks from AdaLayerZero would not make sense if returned during # the second cross attention block. snake_case_ : str = ( AdaLayerNorm(_UpperCamelCase ,_UpperCamelCase ) if self.use_ada_layer_norm else nn.LayerNorm(_UpperCamelCase ,elementwise_affine=_UpperCamelCase ) ) snake_case_ : List[str] = Attention( query_dim=_UpperCamelCase ,cross_attention_dim=cross_attention_dim if not double_self_attention else None ,heads=_UpperCamelCase ,dim_head=_UpperCamelCase ,dropout=_UpperCamelCase ,bias=_UpperCamelCase ,upcast_attention=_UpperCamelCase ,) # is self-attn if encoder_hidden_states is none else: snake_case_ : Any = None snake_case_ : Optional[Any] = None # 3. Feed-forward snake_case_ : List[str] = nn.LayerNorm(_UpperCamelCase ,elementwise_affine=_UpperCamelCase ) snake_case_ : Union[str, Any] = FeedForward(_UpperCamelCase ,dropout=_UpperCamelCase ,activation_fn=_UpperCamelCase ,final_dropout=_UpperCamelCase ) # let chunk size default to None snake_case_ : Optional[int] = None snake_case_ : Dict = 0 def a__ ( self :List[Any] ,_UpperCamelCase :Optional[int] ,_UpperCamelCase :int ): # Sets chunk feed-forward snake_case_ : Optional[Any] = chunk_size snake_case_ : Optional[Any] = dim def a__ ( self :List[str] ,_UpperCamelCase :torch.FloatTensor ,_UpperCamelCase :Optional[torch.FloatTensor] = None ,_UpperCamelCase :Optional[torch.FloatTensor] = None ,_UpperCamelCase :Optional[torch.FloatTensor] = None ,_UpperCamelCase :Optional[torch.LongTensor] = None ,_UpperCamelCase :Dict[str, Any] = None ,_UpperCamelCase :Optional[torch.LongTensor] = None ,): # Notice that normalization is always applied before the real computation in the following blocks. # 1. Self-Attention if self.use_ada_layer_norm: snake_case_ : Optional[Any] = self.norma(_UpperCamelCase ,_UpperCamelCase ) elif self.use_ada_layer_norm_zero: snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ : Union[str, Any] = self.norma( _UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,hidden_dtype=hidden_states.dtype ) else: snake_case_ : Optional[int] = self.norma(_UpperCamelCase ) snake_case_ : int = cross_attention_kwargs if cross_attention_kwargs is not None else {} snake_case_ : Union[str, Any] = self.attna( _UpperCamelCase ,encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None ,attention_mask=_UpperCamelCase ,**_UpperCamelCase ,) if self.use_ada_layer_norm_zero: snake_case_ : Union[str, Any] = gate_msa.unsqueeze(1 ) * attn_output snake_case_ : Union[str, Any] = attn_output + hidden_states # 2. Cross-Attention if self.attna is not None: snake_case_ : Any = ( self.norma(_UpperCamelCase ,_UpperCamelCase ) if self.use_ada_layer_norm else self.norma(_UpperCamelCase ) ) snake_case_ : List[Any] = self.attna( _UpperCamelCase ,encoder_hidden_states=_UpperCamelCase ,attention_mask=_UpperCamelCase ,**_UpperCamelCase ,) snake_case_ : Tuple = attn_output + hidden_states # 3. Feed-forward snake_case_ : Optional[Any] = self.norma(_UpperCamelCase ) if self.use_ada_layer_norm_zero: snake_case_ : Dict = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None] if self._chunk_size is not None: # "feed_forward_chunk_size" can be used to save memory if norm_hidden_states.shape[self._chunk_dim] % self._chunk_size != 0: raise ValueError( F'''`hidden_states` dimension to be chunked: {norm_hidden_states.shape[self._chunk_dim]} has to be divisible by chunk size: {self._chunk_size}. Make sure to set an appropriate `chunk_size` when calling `unet.enable_forward_chunking`.''' ) snake_case_ : Union[str, Any] = norm_hidden_states.shape[self._chunk_dim] // self._chunk_size snake_case_ : int = torch.cat( [self.ff(_UpperCamelCase ) for hid_slice in norm_hidden_states.chunk(_UpperCamelCase ,dim=self._chunk_dim )] ,dim=self._chunk_dim ,) else: snake_case_ : List[str] = self.ff(_UpperCamelCase ) if self.use_ada_layer_norm_zero: snake_case_ : Union[str, Any] = gate_mlp.unsqueeze(1 ) * ff_output snake_case_ : Any = ff_output + hidden_states return hidden_states class __UpperCamelCase ( nn.Module ): def __init__( self :Dict ,_UpperCamelCase :int ,_UpperCamelCase :Optional[int] = None ,_UpperCamelCase :int = 4 ,_UpperCamelCase :float = 0.0 ,_UpperCamelCase :str = "geglu" ,_UpperCamelCase :bool = False ,): super().__init__() snake_case_ : Tuple = int(dim * mult ) snake_case_ : Optional[int] = dim_out if dim_out is not None else dim if activation_fn == "gelu": snake_case_ : Any = GELU(_UpperCamelCase ,_UpperCamelCase ) if activation_fn == "gelu-approximate": snake_case_ : Tuple = GELU(_UpperCamelCase ,_UpperCamelCase ,approximate="""tanh""" ) elif activation_fn == "geglu": snake_case_ : Dict = GEGLU(_UpperCamelCase ,_UpperCamelCase ) elif activation_fn == "geglu-approximate": snake_case_ : Optional[Any] = ApproximateGELU(_UpperCamelCase ,_UpperCamelCase ) snake_case_ : Dict = nn.ModuleList([] ) # project in self.net.append(_UpperCamelCase ) # project dropout self.net.append(nn.Dropout(_UpperCamelCase ) ) # project out self.net.append(nn.Linear(_UpperCamelCase ,_UpperCamelCase ) ) # FF as used in Vision Transformer, MLP-Mixer, etc. have a final dropout if final_dropout: self.net.append(nn.Dropout(_UpperCamelCase ) ) def a__ ( self :Tuple ,_UpperCamelCase :Union[str, Any] ): for module in self.net: snake_case_ : Tuple = module(_UpperCamelCase ) return hidden_states class __UpperCamelCase ( nn.Module ): def __init__( self :Optional[Any] ,_UpperCamelCase :int ,_UpperCamelCase :int ,_UpperCamelCase :str = "none" ): super().__init__() snake_case_ : Union[str, Any] = nn.Linear(_UpperCamelCase ,_UpperCamelCase ) snake_case_ : Optional[Any] = approximate def a__ ( self :str ,_UpperCamelCase :int ): if gate.device.type != "mps": return F.gelu(_UpperCamelCase ,approximate=self.approximate ) # mps: gelu is not implemented for float16 return F.gelu(gate.to(dtype=torch.floataa ) ,approximate=self.approximate ).to(dtype=gate.dtype ) def a__ ( self :Optional[int] ,_UpperCamelCase :Optional[Any] ): snake_case_ : Optional[Any] = self.proj(_UpperCamelCase ) snake_case_ : int = self.gelu(_UpperCamelCase ) return hidden_states class __UpperCamelCase ( nn.Module ): def __init__( self :List[Any] ,_UpperCamelCase :int ,_UpperCamelCase :int ): super().__init__() snake_case_ : str = nn.Linear(_UpperCamelCase ,dim_out * 2 ) def a__ ( self :Dict ,_UpperCamelCase :List[str] ): if gate.device.type != "mps": return F.gelu(_UpperCamelCase ) # mps: gelu is not implemented for float16 return F.gelu(gate.to(dtype=torch.floataa ) ).to(dtype=gate.dtype ) def a__ ( self :Optional[Any] ,_UpperCamelCase :Optional[int] ): snake_case_ , snake_case_ : Dict = self.proj(_UpperCamelCase ).chunk(2 ,dim=-1 ) return hidden_states * self.gelu(_UpperCamelCase ) class __UpperCamelCase ( nn.Module ): def __init__( self :List[str] ,_UpperCamelCase :int ,_UpperCamelCase :int ): super().__init__() snake_case_ : int = nn.Linear(_UpperCamelCase ,_UpperCamelCase ) def a__ ( self :Optional[int] ,_UpperCamelCase :Optional[int] ): snake_case_ : int = self.proj(_UpperCamelCase ) return x * torch.sigmoid(1.7_02 * x ) class __UpperCamelCase ( nn.Module ): def __init__( self :int ,_UpperCamelCase :str ,_UpperCamelCase :List[Any] ): super().__init__() snake_case_ : int = nn.Embedding(_UpperCamelCase ,_UpperCamelCase ) snake_case_ : Union[str, Any] = nn.SiLU() snake_case_ : Any = nn.Linear(_UpperCamelCase ,embedding_dim * 2 ) snake_case_ : Dict = nn.LayerNorm(_UpperCamelCase ,elementwise_affine=_UpperCamelCase ) def a__ ( self :int ,_UpperCamelCase :List[str] ,_UpperCamelCase :int ): snake_case_ : Union[str, Any] = self.linear(self.silu(self.emb(_UpperCamelCase ) ) ) snake_case_ , snake_case_ : Tuple = torch.chunk(_UpperCamelCase ,2 ) snake_case_ : Tuple = self.norm(_UpperCamelCase ) * (1 + scale) + shift return x class __UpperCamelCase ( nn.Module ): def __init__( self :List[str] ,_UpperCamelCase :Tuple ,_UpperCamelCase :int ): super().__init__() snake_case_ : int = CombinedTimestepLabelEmbeddings(_UpperCamelCase ,_UpperCamelCase ) snake_case_ : int = nn.SiLU() snake_case_ : List[str] = nn.Linear(_UpperCamelCase ,6 * embedding_dim ,bias=_UpperCamelCase ) snake_case_ : str = nn.LayerNorm(_UpperCamelCase ,elementwise_affine=_UpperCamelCase ,eps=1E-6 ) def a__ ( self :Union[str, Any] ,_UpperCamelCase :Any ,_UpperCamelCase :Tuple ,_UpperCamelCase :Optional[Any] ,_UpperCamelCase :str=None ): snake_case_ : Union[str, Any] = self.linear(self.silu(self.emb(_UpperCamelCase ,_UpperCamelCase ,hidden_dtype=_UpperCamelCase ) ) ) snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ : Any = emb.chunk(6 ,dim=1 ) snake_case_ : str = self.norm(_UpperCamelCase ) * (1 + scale_msa[:, None]) + shift_msa[:, None] return x, gate_msa, shift_mlp, scale_mlp, gate_mlp class __UpperCamelCase ( nn.Module ): def __init__( self :Optional[int] ,_UpperCamelCase :int ,_UpperCamelCase :int ,_UpperCamelCase :int ,_UpperCamelCase :Optional[str] = None ,_UpperCamelCase :float = 1E-5 ): super().__init__() snake_case_ : Optional[int] = num_groups snake_case_ : List[Any] = eps if act_fn is None: snake_case_ : int = None else: snake_case_ : Dict = get_activation(_UpperCamelCase ) snake_case_ : Optional[int] = nn.Linear(_UpperCamelCase ,out_dim * 2 ) def a__ ( self :List[Any] ,_UpperCamelCase :Optional[Any] ,_UpperCamelCase :List[str] ): if self.act: snake_case_ : Any = self.act(_UpperCamelCase ) snake_case_ : Optional[int] = self.linear(_UpperCamelCase ) snake_case_ : Dict = emb[:, :, None, None] snake_case_ , snake_case_ : str = emb.chunk(2 ,dim=1 ) snake_case_ : str = F.group_norm(_UpperCamelCase ,self.num_groups ,eps=self.eps ) snake_case_ : List[str] = x * (1 + scale) + shift return x
8
0
'''simple docstring''' import numpy as np import skfuzzy as fuzz if __name__ == "__main__": # Create universe of discourse in Python using linspace () __A : List[str] = np.linspace(start=0, stop=75, num=75, endpoint=True, retstep=False) # Create two fuzzy sets by defining any membership function # (trapmf(), gbellmf(), gaussmf(), etc). __A : Tuple = [0, 25, 50] __A : Any = [25, 50, 75] __A : List[Any] = fuzz.membership.trimf(X, abca) __A : Optional[Any] = fuzz.membership.trimf(X, abca) # Compute the different operations using inbuilt functions. __A : Tuple = np.ones(75) __A : Dict = np.zeros((75,)) # 1. Union = max(µA(x), µB(x)) __A : List[str] = fuzz.fuzzy_or(X, young, X, middle_aged)[1] # 2. Intersection = min(µA(x), µB(x)) __A : List[str] = fuzz.fuzzy_and(X, young, X, middle_aged)[1] # 3. Complement (A) = (1- min(µA(x)) __A : Union[str, Any] = fuzz.fuzzy_not(young) # 4. Difference (A/B) = min(µA(x),(1- µB(x))) __A : Tuple = fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1] # 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))] __A : int = young + middle_aged - (young * middle_aged) # 6. Algebraic Product = (µA(x) * µB(x)) __A : List[Any] = young * middle_aged # 7. Bounded Sum = min[1,(µA(x), µB(x))] __A : str = fuzz.fuzzy_and(X, one, X, young + middle_aged)[1] # 8. Bounded difference = min[0,(µA(x), µB(x))] __A : Union[str, Any] = fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1] # max-min composition # max-product composition # Plot each set A, set B and each operation result using plot() and subplot(). from matplotlib import pyplot as plt plt.figure() plt.subplot(4, 3, 1) plt.plot(X, young) plt.title('Young') plt.grid(True) plt.subplot(4, 3, 2) plt.plot(X, middle_aged) plt.title('Middle aged') plt.grid(True) plt.subplot(4, 3, 3) plt.plot(X, union) plt.title('union') plt.grid(True) plt.subplot(4, 3, 4) plt.plot(X, intersection) plt.title('intersection') plt.grid(True) plt.subplot(4, 3, 5) plt.plot(X, complement_a) plt.title('complement_a') plt.grid(True) plt.subplot(4, 3, 6) plt.plot(X, difference) plt.title('difference a/b') plt.grid(True) plt.subplot(4, 3, 7) plt.plot(X, alg_sum) plt.title('alg_sum') plt.grid(True) plt.subplot(4, 3, 8) plt.plot(X, alg_product) plt.title('alg_product') plt.grid(True) plt.subplot(4, 3, 9) plt.plot(X, bdd_sum) plt.title('bdd_sum') plt.grid(True) plt.subplot(4, 3, 10) plt.plot(X, bdd_difference) plt.title('bdd_difference') plt.grid(True) plt.subplots_adjust(hspace=0.5) plt.show()
356
'''simple docstring''' import itertools import json import linecache import os import pickle import re import socket import string from collections import Counter from logging import getLogger from pathlib import Path from typing import Callable, Dict, Iterable, List import git import torch from torch.utils.data import Dataset from transformers import BartTokenizer, RagTokenizer, TaTokenizer def UpperCAmelCase ( lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :int , lowerCamelCase_ :List[Any] , lowerCamelCase_ :List[Any] , lowerCamelCase_ :str=True , lowerCamelCase_ :str="pt" ): '''simple docstring''' snake_case_ : Tuple = {"""add_prefix_space""": True} if isinstance(lowerCamelCase_ , lowerCamelCase_ ) and not line.startswith(""" """ ) else {} snake_case_ : Union[str, Any] = padding_side return tokenizer( [line] , max_length=lowerCamelCase_ , padding="""max_length""" if pad_to_max_length else None , truncation=lowerCamelCase_ , return_tensors=lowerCamelCase_ , add_special_tokens=lowerCamelCase_ , **lowerCamelCase_ , ) def UpperCAmelCase ( lowerCamelCase_ :Optional[int] , lowerCamelCase_ :str , lowerCamelCase_ :Any=None , ): '''simple docstring''' snake_case_ : Dict = input_ids.ne(lowerCamelCase_ ).any(dim=0 ) if attention_mask is None: return input_ids[:, keep_column_mask] else: return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask]) class __UpperCamelCase ( lowercase__ ): def __init__( self :List[Any] ,_UpperCamelCase :List[Any] ,_UpperCamelCase :Any ,_UpperCamelCase :int ,_UpperCamelCase :Optional[Any] ,_UpperCamelCase :Any="train" ,_UpperCamelCase :Optional[int]=None ,_UpperCamelCase :int=None ,_UpperCamelCase :List[Any]=None ,_UpperCamelCase :Optional[int]="" ,): super().__init__() snake_case_ : List[str] = Path(_UpperCamelCase ).joinpath(type_path + """.source""" ) snake_case_ : int = Path(_UpperCamelCase ).joinpath(type_path + """.target""" ) snake_case_ : Optional[int] = self.get_char_lens(self.src_file ) snake_case_ : List[str] = max_source_length snake_case_ : str = max_target_length assert min(self.src_lens ) > 0, F'''found empty line in {self.src_file}''' snake_case_ : str = tokenizer snake_case_ : str = prefix if n_obs is not None: snake_case_ : int = self.src_lens[:n_obs] snake_case_ : Tuple = src_lang snake_case_ : str = tgt_lang def __len__( self :Any ): return len(self.src_lens ) def __getitem__( self :List[str] ,_UpperCamelCase :Union[str, Any] ): snake_case_ : Optional[int] = index + 1 # linecache starts at 1 snake_case_ : Dict = self.prefix + linecache.getline(str(self.src_file ) ,_UpperCamelCase ).rstrip("""\n""" ) snake_case_ : List[Any] = linecache.getline(str(self.tgt_file ) ,_UpperCamelCase ).rstrip("""\n""" ) assert source_line, F'''empty source line for index {index}''' assert tgt_line, F'''empty tgt line for index {index}''' # Need to add eos token manually for T5 if isinstance(self.tokenizer ,_UpperCamelCase ): source_line += self.tokenizer.eos_token tgt_line += self.tokenizer.eos_token # Pad source and target to the right snake_case_ : int = ( self.tokenizer.question_encoder if isinstance(self.tokenizer ,_UpperCamelCase ) else self.tokenizer ) snake_case_ : Optional[int] = self.tokenizer.generator if isinstance(self.tokenizer ,_UpperCamelCase ) else self.tokenizer snake_case_ : Optional[Any] = encode_line(_UpperCamelCase ,_UpperCamelCase ,self.max_source_length ,"""right""" ) snake_case_ : Tuple = encode_line(_UpperCamelCase ,_UpperCamelCase ,self.max_target_length ,"""right""" ) snake_case_ : int = source_inputs["""input_ids"""].squeeze() snake_case_ : str = target_inputs["""input_ids"""].squeeze() snake_case_ : Union[str, Any] = source_inputs["""attention_mask"""].squeeze() return { "input_ids": source_ids, "attention_mask": src_mask, "decoder_input_ids": target_ids, } @staticmethod def a__ ( _UpperCamelCase :str ): return [len(_UpperCamelCase ) for x in Path(_UpperCamelCase ).open().readlines()] def a__ ( self :Optional[int] ,_UpperCamelCase :List[str] ): snake_case_ : Optional[Any] = torch.stack([x["""input_ids"""] for x in batch] ) snake_case_ : List[Any] = torch.stack([x["""attention_mask"""] for x in batch] ) snake_case_ : Union[str, Any] = torch.stack([x["""decoder_input_ids"""] for x in batch] ) snake_case_ : Optional[Any] = ( self.tokenizer.generator.pad_token_id if isinstance(self.tokenizer ,_UpperCamelCase ) else self.tokenizer.pad_token_id ) snake_case_ : Tuple = ( self.tokenizer.question_encoder.pad_token_id if isinstance(self.tokenizer ,_UpperCamelCase ) else self.tokenizer.pad_token_id ) snake_case_ : Optional[int] = trim_batch(_UpperCamelCase ,_UpperCamelCase ) snake_case_ , snake_case_ : Dict = trim_batch(_UpperCamelCase ,_UpperCamelCase ,attention_mask=_UpperCamelCase ) snake_case_ : Optional[int] = { """input_ids""": source_ids, """attention_mask""": source_mask, """decoder_input_ids""": y, } return batch __A : List[Any] = getLogger(__name__) def UpperCAmelCase ( lowerCamelCase_ :List[List] ): '''simple docstring''' return list(itertools.chain.from_iterable(lowerCamelCase_ ) ) def UpperCAmelCase ( lowerCamelCase_ :str ): '''simple docstring''' snake_case_ : int = get_git_info() save_json(lowerCamelCase_ , os.path.join(lowerCamelCase_ , """git_log.json""" ) ) def UpperCAmelCase ( lowerCamelCase_ :Optional[int] , lowerCamelCase_ :int , lowerCamelCase_ :Optional[int]=4 , **lowerCamelCase_ :Union[str, Any] ): '''simple docstring''' with open(lowerCamelCase_ , """w""" ) as f: json.dump(lowerCamelCase_ , lowerCamelCase_ , indent=lowerCamelCase_ , **lowerCamelCase_ ) def UpperCAmelCase ( lowerCamelCase_ :List[Any] ): '''simple docstring''' with open(lowerCamelCase_ ) as f: return json.load(lowerCamelCase_ ) def UpperCAmelCase ( ): '''simple docstring''' snake_case_ : Optional[Any] = git.Repo(search_parent_directories=lowerCamelCase_ ) snake_case_ : List[str] = { """repo_id""": str(lowerCamelCase_ ), """repo_sha""": str(repo.head.object.hexsha ), """repo_branch""": str(repo.active_branch ), """hostname""": str(socket.gethostname() ), } return repo_infos def UpperCAmelCase ( lowerCamelCase_ :Callable , lowerCamelCase_ :Iterable ): '''simple docstring''' return list(map(lowerCamelCase_ , lowerCamelCase_ ) ) def UpperCAmelCase ( lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :int ): '''simple docstring''' with open(lowerCamelCase_ , """wb""" ) as f: return pickle.dump(lowerCamelCase_ , lowerCamelCase_ ) def UpperCAmelCase ( lowerCamelCase_ :Dict ): '''simple docstring''' def remove_articles(lowerCamelCase_ :str ): return re.sub(R"""\b(a|an|the)\b""" , """ """ , lowerCamelCase_ ) def white_space_fix(lowerCamelCase_ :Optional[Any] ): return " ".join(text.split() ) def remove_punc(lowerCamelCase_ :Tuple ): snake_case_ : Union[str, Any] = set(string.punctuation ) return "".join(ch for ch in text if ch not in exclude ) def lower(lowerCamelCase_ :Optional[Any] ): return text.lower() return white_space_fix(remove_articles(remove_punc(lower(lowerCamelCase_ ) ) ) ) def UpperCAmelCase ( lowerCamelCase_ :List[Any] , lowerCamelCase_ :Optional[int] ): '''simple docstring''' snake_case_ : List[Any] = normalize_answer(lowerCamelCase_ ).split() snake_case_ : Optional[int] = normalize_answer(lowerCamelCase_ ).split() snake_case_ : List[Any] = Counter(lowerCamelCase_ ) & Counter(lowerCamelCase_ ) snake_case_ : Optional[Any] = sum(common.values() ) if num_same == 0: return 0 snake_case_ : Optional[Any] = 1.0 * num_same / len(lowerCamelCase_ ) snake_case_ : Union[str, Any] = 1.0 * num_same / len(lowerCamelCase_ ) snake_case_ : Optional[Any] = (2 * precision * recall) / (precision + recall) return fa def UpperCAmelCase ( lowerCamelCase_ :Optional[Any] , lowerCamelCase_ :Union[str, Any] ): '''simple docstring''' return normalize_answer(lowerCamelCase_ ) == normalize_answer(lowerCamelCase_ ) def UpperCAmelCase ( lowerCamelCase_ :List[str] , lowerCamelCase_ :List[str] ): '''simple docstring''' assert len(lowerCamelCase_ ) == len(lowerCamelCase_ ) snake_case_ : Optional[int] = 0 for hypo, pred in zip(lowerCamelCase_ , lowerCamelCase_ ): em += exact_match_score(lowerCamelCase_ , lowerCamelCase_ ) if len(lowerCamelCase_ ) > 0: em /= len(lowerCamelCase_ ) return {"em": em} def UpperCAmelCase ( lowerCamelCase_ :Any ): '''simple docstring''' return model_prefix.startswith("""rag""" ) def UpperCAmelCase ( lowerCamelCase_ :Optional[int] , lowerCamelCase_ :Any , lowerCamelCase_ :Union[str, Any] ): '''simple docstring''' snake_case_ : List[str] = {p: p for p in extra_params} # T5 models don't have `dropout` param, they have `dropout_rate` instead snake_case_ : Optional[int] = """dropout_rate""" for p in extra_params: if getattr(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ): if not hasattr(lowerCamelCase_ , lowerCamelCase_ ) and not hasattr(lowerCamelCase_ , equivalent_param[p] ): logger.info("""config doesn't have a `{}` attribute""".format(lowerCamelCase_ ) ) delattr(lowerCamelCase_ , lowerCamelCase_ ) continue snake_case_ : str = p if hasattr(lowerCamelCase_ , lowerCamelCase_ ) else equivalent_param[p] setattr(lowerCamelCase_ , lowerCamelCase_ , getattr(lowerCamelCase_ , lowerCamelCase_ ) ) delattr(lowerCamelCase_ , lowerCamelCase_ ) return hparams, config
8
0
'''simple docstring''' from math import pi def UpperCAmelCase ( lowerCamelCase_ :int , lowerCamelCase_ :int ): '''simple docstring''' return 2 * pi * radius * (angle / 3_60) if __name__ == "__main__": print(arc_length(90, 10))
357
'''simple docstring''' import functools def UpperCAmelCase ( lowerCamelCase_ :str , lowerCamelCase_ :str ): '''simple docstring''' snake_case_ : List[str] = len(lowerCamelCase_ ) snake_case_ : Dict = len(lowerCamelCase_ ) @functools.cache def min_distance(lowerCamelCase_ :int , lowerCamelCase_ :int ) -> int: # if first word index is overflow - delete all from the second word if indexa >= len_worda: return len_worda - indexa # if second word index is overflow - delete all from the first word if indexa >= len_worda: return len_worda - indexa snake_case_ : Union[str, Any] = int(worda[indexa] != worda[indexa] ) # current letters not identical return min( 1 + min_distance(indexa + 1 , lowerCamelCase_ ) , 1 + min_distance(lowerCamelCase_ , indexa + 1 ) , diff + min_distance(indexa + 1 , indexa + 1 ) , ) return min_distance(0 , 0 ) if __name__ == "__main__": import doctest doctest.testmod()
8
0
'''simple docstring''' from collections import Counter from timeit import timeit def UpperCAmelCase ( lowerCamelCase_ :str = "" , ): '''simple docstring''' return sum(c % 2 for c in Counter(input_str.replace(""" """ , """""" ).lower() ).values() ) < 2 def UpperCAmelCase ( lowerCamelCase_ :str = "" ): '''simple docstring''' if len(lowerCamelCase_ ) == 0: return True snake_case_ : List[Any] = input_str.replace(""" """ , """""" ).lower() # character_freq_dict: Stores the frequency of every character in the input string snake_case_ : dict[str, int] = {} for character in lower_case_input_str: snake_case_ : Dict = character_freq_dict.get(lowerCamelCase_ , 0 ) + 1 snake_case_ : List[Any] = 0 for character_count in character_freq_dict.values(): if character_count % 2: odd_char += 1 if odd_char > 1: return False return True def UpperCAmelCase ( lowerCamelCase_ :str = "" ): '''simple docstring''' print("""\nFor string = """ , lowerCamelCase_ , """:""" ) print( """> can_string_be_rearranged_as_palindrome_counter()""" , """\tans =""" , can_string_be_rearranged_as_palindrome_counter(lowerCamelCase_ ) , """\ttime =""" , timeit( """z.can_string_be_rearranged_as_palindrome_counter(z.check_str)""" , setup="""import __main__ as z""" , ) , """seconds""" , ) print( """> can_string_be_rearranged_as_palindrome()""" , """\tans =""" , can_string_be_rearranged_as_palindrome(lowerCamelCase_ ) , """\ttime =""" , timeit( """z.can_string_be_rearranged_as_palindrome(z.check_str)""" , setup="""import __main__ as z""" , ) , """seconds""" , ) if __name__ == "__main__": __A : str = input( 'Enter string to determine if it can be rearranged as a palindrome or not: ' ).strip() benchmark(check_str) __A : int = can_string_be_rearranged_as_palindrome_counter(check_str) print(F'{check_str} can {"" if status else "not "}be rearranged as a palindrome')
358
'''simple docstring''' import os import textwrap import pyarrow as pa import pytest from datasets import ClassLabel, Features, Image from datasets.packaged_modules.csv.csv import Csv from ..utils import require_pil @pytest.fixture def UpperCAmelCase ( lowerCamelCase_ :str ): '''simple docstring''' snake_case_ : Any = tmp_path / """file.csv""" snake_case_ : Any = textwrap.dedent( """\ header1,header2 1,2 10,20 """ ) with open(lowerCamelCase_ , """w""" ) as f: f.write(lowerCamelCase_ ) return str(lowerCamelCase_ ) @pytest.fixture def UpperCAmelCase ( lowerCamelCase_ :str ): '''simple docstring''' snake_case_ : Optional[int] = tmp_path / """malformed_file.csv""" snake_case_ : int = textwrap.dedent( """\ header1,header2 1,2 10,20, """ ) with open(lowerCamelCase_ , """w""" ) as f: f.write(lowerCamelCase_ ) return str(lowerCamelCase_ ) @pytest.fixture def UpperCAmelCase ( lowerCamelCase_ :Optional[Any] , lowerCamelCase_ :int ): '''simple docstring''' snake_case_ : str = tmp_path / """csv_with_image.csv""" snake_case_ : int = textwrap.dedent( F'''\ image {image_file} ''' ) with open(lowerCamelCase_ , """w""" ) as f: f.write(lowerCamelCase_ ) return str(lowerCamelCase_ ) @pytest.fixture def UpperCAmelCase ( lowerCamelCase_ :Any ): '''simple docstring''' snake_case_ : int = tmp_path / """csv_with_label.csv""" snake_case_ : Tuple = textwrap.dedent( """\ label good bad good """ ) with open(lowerCamelCase_ , """w""" ) as f: f.write(lowerCamelCase_ ) return str(lowerCamelCase_ ) @pytest.fixture def UpperCAmelCase ( lowerCamelCase_ :Union[str, Any] ): '''simple docstring''' snake_case_ : List[str] = tmp_path / """csv_with_int_list.csv""" snake_case_ : str = textwrap.dedent( """\ int_list 1 2 3 4 5 6 7 8 9 """ ) with open(lowerCamelCase_ , """w""" ) as f: f.write(lowerCamelCase_ ) return str(lowerCamelCase_ ) def UpperCAmelCase ( lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :int , lowerCamelCase_ :Tuple ): '''simple docstring''' snake_case_ : int = Csv() snake_case_ : Optional[Any] = csv._generate_tables([[csv_file, malformed_csv_file]] ) with pytest.raises(lowerCamelCase_ , match="""Error tokenizing data""" ): for _ in generator: pass assert any( record.levelname == """ERROR""" and """Failed to read file""" in record.message and os.path.basename(lowerCamelCase_ ) in record.message for record in caplog.records ) @require_pil def UpperCAmelCase ( lowerCamelCase_ :Tuple ): '''simple docstring''' with open(lowerCamelCase_ , encoding="""utf-8""" ) as f: snake_case_ : Tuple = f.read().splitlines()[1] snake_case_ : str = Csv(encoding="""utf-8""" , features=Features({"""image""": Image()} ) ) snake_case_ : Tuple = csv._generate_tables([[csv_file_with_image]] ) snake_case_ : Optional[Any] = pa.concat_tables([table for _, table in generator] ) assert pa_table.schema.field("""image""" ).type == Image()() snake_case_ : List[str] = pa_table.to_pydict()["""image"""] assert generated_content == [{"path": image_file, "bytes": None}] def UpperCAmelCase ( lowerCamelCase_ :int ): '''simple docstring''' with open(lowerCamelCase_ , encoding="""utf-8""" ) as f: snake_case_ : List[Any] = f.read().splitlines()[1:] snake_case_ : Union[str, Any] = Csv(encoding="""utf-8""" , features=Features({"""label""": ClassLabel(names=["""good""", """bad"""] )} ) ) snake_case_ : Optional[Any] = csv._generate_tables([[csv_file_with_label]] ) snake_case_ : Optional[int] = pa.concat_tables([table for _, table in generator] ) assert pa_table.schema.field("""label""" ).type == ClassLabel(names=["""good""", """bad"""] )() snake_case_ : Union[str, Any] = pa_table.to_pydict()["""label"""] assert generated_content == [ClassLabel(names=["""good""", """bad"""] ).straint(lowerCamelCase_ ) for label in labels] def UpperCAmelCase ( lowerCamelCase_ :Union[str, Any] ): '''simple docstring''' snake_case_ : str = Csv(encoding="""utf-8""" , sep=""",""" , converters={"""int_list""": lambda lowerCamelCase_ : [int(lowerCamelCase_ ) for i in x.split()]} ) snake_case_ : Optional[Any] = csv._generate_tables([[csv_file_with_int_list]] ) snake_case_ : Tuple = pa.concat_tables([table for _, table in generator] ) assert pa.types.is_list(pa_table.schema.field("""int_list""" ).type ) snake_case_ : Dict = pa_table.to_pydict()["""int_list"""] assert generated_content == [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
8
0
from dataclasses import dataclass from enum import Enum from typing import List, Optional, Union import numpy as np import PIL from PIL import Image from ...utils import BaseOutput, is_torch_available, is_transformers_available @dataclass class __UpperCamelCase ( lowercase__ ): lowercase : Union[List[PIL.Image.Image], np.ndarray] lowercase : Optional[List[bool]] if is_transformers_available() and is_torch_available(): from .pipeline_semantic_stable_diffusion import SemanticStableDiffusionPipeline
359
'''simple docstring''' import argparse import pickle import numpy as np import torch from torch import nn from transformers import ReformerConfig, ReformerModelWithLMHead from transformers.utils import logging logging.set_verbosity_info() def UpperCAmelCase ( lowerCamelCase_ :Dict , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :Tuple=None ): '''simple docstring''' # set parameter of one layer assert torch_layer.weight.shape == weight.shape, F'''{torch_layer} layer.weight does not match''' snake_case_ : Optional[Any] = nn.Parameter(lowerCamelCase_ ) if bias is not None: assert torch_layer.bias.shape == bias.shape, F'''{torch_layer} layer.bias does not match''' snake_case_ : List[str] = nn.Parameter(lowerCamelCase_ ) def UpperCAmelCase ( lowerCamelCase_ :str , lowerCamelCase_ :Dict , lowerCamelCase_ :List[Any] ): '''simple docstring''' # set torch weights for 1-to-1 comparison snake_case_ : Optional[Any] = np.asarray(weights[0] ) snake_case_ : int = np.asarray(weights[1] ) snake_case_ : Any = np.asarray(weights[2] ) set_param( torch_layer.self_attention.query_key , torch.tensor(lowerCamelCase_ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCamelCase_ ) , ) set_param( torch_layer.self_attention.value , torch.tensor(lowerCamelCase_ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCamelCase_ ) , ) set_param( torch_layer.output.dense , torch.tensor(lowerCamelCase_ ).view(-1 , lowerCamelCase_ ).contiguous().transpose(0 , 1 ) , ) def UpperCAmelCase ( lowerCamelCase_ :List[str] , lowerCamelCase_ :Optional[Any] , lowerCamelCase_ :Optional[Any] ): '''simple docstring''' # set torch weights for 1-to-1 comparison snake_case_ : List[Any] = np.asarray(weights[0] ) snake_case_ : Optional[int] = np.asarray(weights[1] ) snake_case_ : Union[str, Any] = np.asarray(weights[2] ) snake_case_ : int = np.asarray(weights[3] ) set_param( torch_layer.self_attention.query , torch.tensor(lowerCamelCase_ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCamelCase_ ) , ) set_param( torch_layer.self_attention.key , torch.tensor(lowerCamelCase_ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCamelCase_ ) , ) set_param( torch_layer.self_attention.value , torch.tensor(lowerCamelCase_ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCamelCase_ ) , ) set_param( torch_layer.output.dense , torch.tensor(lowerCamelCase_ ).view(-1 , lowerCamelCase_ ).contiguous().transpose(0 , 1 ) , ) def UpperCAmelCase ( lowerCamelCase_ :Any , lowerCamelCase_ :List[str] , lowerCamelCase_ :Optional[int] ): '''simple docstring''' # layernorm 1 snake_case_ : str = weights[0][0][0] snake_case_ : int = np.asarray(layer_norm_a[0] ) snake_case_ : Optional[Any] = np.asarray(layer_norm_a[1] ) set_param( torch_block.attention.layer_norm , torch.tensor(lowerCamelCase_ ) , torch.tensor(lowerCamelCase_ ) , ) # lsh weights + output snake_case_ : Tuple = weights[0][1] if len(lowerCamelCase_ ) < 4: set_layer_weights_in_torch_lsh(lowerCamelCase_ , torch_block.attention , lowerCamelCase_ ) else: set_layer_weights_in_torch_local(lowerCamelCase_ , torch_block.attention , lowerCamelCase_ ) # intermediate weighs snake_case_ : str = weights[2][0][1][2] # Chunked Feed Forward if len(lowerCamelCase_ ) == 4: snake_case_ : List[Any] = intermediate_weights[2] # layernorm 2 snake_case_ : Tuple = np.asarray(intermediate_weights[0][0] ) snake_case_ : Optional[Any] = np.asarray(intermediate_weights[0][1] ) set_param( torch_block.feed_forward.layer_norm , torch.tensor(lowerCamelCase_ ) , torch.tensor(lowerCamelCase_ ) , ) # intermediate dense snake_case_ : Any = np.asarray(intermediate_weights[1][0] ) snake_case_ : List[Any] = np.asarray(intermediate_weights[1][1] ) set_param( torch_block.feed_forward.dense.dense , torch.tensor(lowerCamelCase_ ).transpose(0 , 1 ).contiguous() , torch.tensor(lowerCamelCase_ ) , ) # intermediate out snake_case_ : List[Any] = np.asarray(intermediate_weights[4][0] ) snake_case_ : Union[str, Any] = np.asarray(intermediate_weights[4][1] ) set_param( torch_block.feed_forward.output.dense , torch.tensor(lowerCamelCase_ ).transpose(0 , 1 ).contiguous() , torch.tensor(lowerCamelCase_ ) , ) def UpperCAmelCase ( lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :str , lowerCamelCase_ :Any ): '''simple docstring''' # reformer model snake_case_ : Dict = torch_model.reformer # word embeds snake_case_ : List[Any] = np.asarray(weights[1] ) set_param( torch_model_reformer.embeddings.word_embeddings , torch.tensor(lowerCamelCase_ ) , ) if isinstance(weights[3] , lowerCamelCase_ ): snake_case_ : Tuple = torch_model_reformer.embeddings.position_embeddings for emb_idx in range(len(position_embeddings.weights ) ): snake_case_ : Dict = np.asarray(weights[3][emb_idx][0] ) assert ( position_embeddings.weights[emb_idx].shape == emb_weights.shape ), F'''{position_embeddings[emb_idx]} emb does not match''' snake_case_ : Optional[Any] = nn.Parameter(torch.tensor(lowerCamelCase_ ) ) snake_case_ : List[Any] = weights[5] assert len(torch_model_reformer.encoder.layers ) * 4 == len( lowerCamelCase_ ), "HF and trax model do not have the same number of layers" for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ): snake_case_ : str = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)] set_block_weights_in_torch(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) # output layer norm snake_case_ : Optional[Any] = np.asarray(weights[7][0] ) snake_case_ : List[Any] = np.asarray(weights[7][1] ) set_param( torch_model_reformer.encoder.layer_norm , torch.tensor(lowerCamelCase_ ) , torch.tensor(lowerCamelCase_ ) , ) # output embeddings snake_case_ : Optional[int] = np.asarray(weights[9][0] ) snake_case_ : Any = np.asarray(weights[9][1] ) set_param( torch_model.lm_head.decoder , torch.tensor(lowerCamelCase_ ).transpose(0 , 1 ).contiguous() , torch.tensor(lowerCamelCase_ ) , ) def UpperCAmelCase ( lowerCamelCase_ :Any , lowerCamelCase_ :Dict , lowerCamelCase_ :List[Any] ): '''simple docstring''' # Initialise PyTorch model snake_case_ : List[str] = ReformerConfig.from_json_file(lowerCamelCase_ ) print(F'''Building PyTorch model from configuration: {config}''' ) snake_case_ : str = ReformerModelWithLMHead(lowerCamelCase_ ) with open(lowerCamelCase_ , """rb""" ) as f: snake_case_ : List[Any] = pickle.load(lowerCamelCase_ )["""weights"""] set_model_weights_in_torch(lowerCamelCase_ , lowerCamelCase_ , config.hidden_size ) # Save pytorch-model print(F'''Save PyTorch model to {pytorch_dump_path}''' ) torch.save(model.state_dict() , lowerCamelCase_ ) if __name__ == "__main__": __A : List[str] = argparse.ArgumentParser() # Required parameters parser.add_argument( '--trax_model_pkl_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.' ) parser.add_argument( '--config_file', default=None, type=str, required=True, help=( 'The config json file corresponding to the pre-trained Reformer model. \n' 'This specifies the model architecture.' ), ) parser.add_argument( '--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) __A : List[Any] = parser.parse_args() convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
8
0
'''simple docstring''' from __future__ import annotations from math import pi, sqrt def UpperCAmelCase ( lowerCamelCase_ :float , lowerCamelCase_ :float ): '''simple docstring''' if inductance <= 0: raise ValueError("""Inductance cannot be 0 or negative""" ) elif capacitance <= 0: raise ValueError("""Capacitance cannot be 0 or negative""" ) else: return ( "Resonant frequency", float(1 / (2 * pi * (sqrt(inductance * capacitance ))) ), ) if __name__ == "__main__": import doctest doctest.testmod()
360
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging __A : List[Any] = logging.get_logger(__name__) __A : str = { 'google/canine-s': 'https://huggingface.co/google/canine-s/resolve/main/config.json', # See all CANINE models at https://huggingface.co/models?filter=canine } class __UpperCamelCase ( lowercase__ ): lowercase : List[Any] = 'canine' def __init__( self :Optional[int] ,_UpperCamelCase :Dict=7_6_8 ,_UpperCamelCase :Union[str, Any]=1_2 ,_UpperCamelCase :int=1_2 ,_UpperCamelCase :int=3_0_7_2 ,_UpperCamelCase :int="gelu" ,_UpperCamelCase :Any=0.1 ,_UpperCamelCase :int=0.1 ,_UpperCamelCase :Any=1_6_3_8_4 ,_UpperCamelCase :Tuple=1_6 ,_UpperCamelCase :List[str]=0.02 ,_UpperCamelCase :Any=1E-1_2 ,_UpperCamelCase :Tuple=0 ,_UpperCamelCase :List[str]=0xE_0_0_0 ,_UpperCamelCase :Optional[Any]=0xE_0_0_1 ,_UpperCamelCase :str=4 ,_UpperCamelCase :Optional[int]=4 ,_UpperCamelCase :str=8 ,_UpperCamelCase :int=1_6_3_8_4 ,_UpperCamelCase :int=1_2_8 ,**_UpperCamelCase :str ,): super().__init__(pad_token_id=_UpperCamelCase ,bos_token_id=_UpperCamelCase ,eos_token_id=_UpperCamelCase ,**_UpperCamelCase ) snake_case_ : List[str] = max_position_embeddings snake_case_ : Union[str, Any] = hidden_size snake_case_ : Dict = num_hidden_layers snake_case_ : Optional[int] = num_attention_heads snake_case_ : Tuple = intermediate_size snake_case_ : str = hidden_act snake_case_ : Union[str, Any] = hidden_dropout_prob snake_case_ : Dict = attention_probs_dropout_prob snake_case_ : Optional[Any] = initializer_range snake_case_ : Optional[int] = type_vocab_size snake_case_ : List[str] = layer_norm_eps # Character config: snake_case_ : Any = downsampling_rate snake_case_ : List[str] = upsampling_kernel_size snake_case_ : int = num_hash_functions snake_case_ : Tuple = num_hash_buckets snake_case_ : Tuple = local_transformer_stride
8
0
'''simple docstring''' from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __A : Tuple = { 'configuration_informer': [ 'INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'InformerConfig', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : Optional[int] = [ 'INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST', 'InformerForPrediction', 'InformerModel', 'InformerPreTrainedModel', ] if TYPE_CHECKING: from .configuration_informer import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, InformerConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_informer import ( INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, InformerForPrediction, InformerModel, InformerPreTrainedModel, ) else: import sys __A : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
361
'''simple docstring''' from typing import List, Optional from tokenizers import ByteLevelBPETokenizer from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_blenderbot_small import BlenderbotSmallTokenizer __A : Tuple = logging.get_logger(__name__) __A : List[Any] = { 'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_config_file': 'tokenizer_config.json', } __A : str = { 'vocab_file': { 'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json' }, 'merges_file': { 'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt' }, 'tokenizer_config_file': { 'facebook/blenderbot_small-90M': ( 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json' ) }, } __A : Optional[Any] = { 'facebook/blenderbot_small-90M': 512, } class __UpperCamelCase ( lowercase__ ): lowercase : str = VOCAB_FILES_NAMES lowercase : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP lowercase : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase : Dict = BlenderbotSmallTokenizer def __init__( self :str ,_UpperCamelCase :Optional[int]=None ,_UpperCamelCase :Union[str, Any]=None ,_UpperCamelCase :Tuple="<|endoftext|>" ,_UpperCamelCase :int="<|endoftext|>" ,_UpperCamelCase :Dict="<|endoftext|>" ,_UpperCamelCase :Optional[Any]=False ,_UpperCamelCase :List[Any]=True ,**_UpperCamelCase :Any ,): super().__init__( ByteLevelBPETokenizer( vocab=_UpperCamelCase ,merges=_UpperCamelCase ,add_prefix_space=_UpperCamelCase ,trim_offsets=_UpperCamelCase ,) ,bos_token=_UpperCamelCase ,eos_token=_UpperCamelCase ,unk_token=_UpperCamelCase ,**_UpperCamelCase ,) snake_case_ : Any = add_prefix_space def a__ ( self :Optional[Any] ,_UpperCamelCase :int ,_UpperCamelCase :Optional[Any]=None ): snake_case_ : List[Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def a__ ( self :int ,_UpperCamelCase :List[int] ,_UpperCamelCase :Optional[List[int]] = None ): snake_case_ : int = [self.sep_token_id] snake_case_ : Tuple = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
8
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) __A : List[str] = {'configuration_opt': ['OPT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'OPTConfig']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : Optional[Any] = [ 'OPT_PRETRAINED_MODEL_ARCHIVE_LIST', 'OPTForCausalLM', 'OPTModel', 'OPTPreTrainedModel', 'OPTForSequenceClassification', 'OPTForQuestionAnswering', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : Any = ['TFOPTForCausalLM', 'TFOPTModel', 'TFOPTPreTrainedModel'] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : Tuple = [ 'FlaxOPTForCausalLM', 'FlaxOPTModel', 'FlaxOPTPreTrainedModel', ] if TYPE_CHECKING: from .configuration_opt import OPT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPTConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_opt import ( OPT_PRETRAINED_MODEL_ARCHIVE_LIST, OPTForCausalLM, OPTForQuestionAnswering, OPTForSequenceClassification, OPTModel, OPTPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_opt import TFOPTForCausalLM, TFOPTModel, TFOPTPreTrainedModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_opt import FlaxOPTForCausalLM, FlaxOPTModel, FlaxOPTPreTrainedModel else: import sys __A : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
362
'''simple docstring''' def UpperCAmelCase ( lowerCamelCase_ :list ): '''simple docstring''' if len(lowerCamelCase_ ) <= 1: return lst snake_case_ : Union[str, Any] = 1 while i < len(lowerCamelCase_ ): if lst[i - 1] <= lst[i]: i += 1 else: snake_case_ , snake_case_ : Union[str, Any] = lst[i], lst[i - 1] i -= 1 if i == 0: snake_case_ : int = 1 return lst if __name__ == "__main__": __A : Optional[int] = input('Enter numbers separated by a comma:\n').strip() __A : int = [int(item) for item in user_input.split(',')] print(gnome_sort(unsorted))
8
0
'''simple docstring''' import json import os import shutil import tempfile from unittest import TestCase from transformers import BartTokenizer, BartTokenizerFast, DPRQuestionEncoderTokenizer, DPRQuestionEncoderTokenizerFast from transformers.models.bart.configuration_bart import BartConfig from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES from transformers.models.dpr.configuration_dpr import DPRConfig from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES from transformers.testing_utils import require_faiss, require_tokenizers, require_torch, slow from transformers.utils import is_datasets_available, is_faiss_available, is_torch_available if is_torch_available() and is_datasets_available() and is_faiss_available(): from transformers.models.rag.configuration_rag import RagConfig from transformers.models.rag.tokenization_rag import RagTokenizer @require_faiss @require_torch class __UpperCamelCase ( lowercase__ ): def a__ ( self :str ): snake_case_ : Any = tempfile.mkdtemp() snake_case_ : Tuple = 8 # DPR tok snake_case_ : Optional[int] = [ """[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing""", """,""", """low""", """lowest""", ] snake_case_ : List[str] = os.path.join(self.tmpdirname ,"""dpr_tokenizer""" ) os.makedirs(_UpperCamelCase ,exist_ok=_UpperCamelCase ) snake_case_ : List[Any] = os.path.join(_UpperCamelCase ,DPR_VOCAB_FILES_NAMES["""vocab_file"""] ) with open(self.vocab_file ,"""w""" ,encoding="""utf-8""" ) as vocab_writer: vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) ) # BART tok snake_case_ : Dict = [ """l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """\u0120""", """\u0120l""", """\u0120n""", """\u0120lo""", """\u0120low""", """er""", """\u0120lowest""", """\u0120newer""", """\u0120wider""", """<unk>""", ] snake_case_ : Tuple = dict(zip(_UpperCamelCase ,range(len(_UpperCamelCase ) ) ) ) snake_case_ : Optional[Any] = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""] snake_case_ : Union[str, Any] = {"""unk_token""": """<unk>"""} snake_case_ : List[Any] = os.path.join(self.tmpdirname ,"""bart_tokenizer""" ) os.makedirs(_UpperCamelCase ,exist_ok=_UpperCamelCase ) snake_case_ : List[str] = os.path.join(_UpperCamelCase ,BART_VOCAB_FILES_NAMES["""vocab_file"""] ) snake_case_ : Tuple = os.path.join(_UpperCamelCase ,BART_VOCAB_FILES_NAMES["""merges_file"""] ) with open(self.vocab_file ,"""w""" ,encoding="""utf-8""" ) as fp: fp.write(json.dumps(_UpperCamelCase ) + """\n""" ) with open(self.merges_file ,"""w""" ,encoding="""utf-8""" ) as fp: fp.write("""\n""".join(_UpperCamelCase ) ) def a__ ( self :Tuple ): return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname ,"""dpr_tokenizer""" ) ) def a__ ( self :Dict ): return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname ,"""bart_tokenizer""" ) ) def a__ ( self :Optional[Any] ): shutil.rmtree(self.tmpdirname ) @require_tokenizers def a__ ( self :str ): snake_case_ : Tuple = os.path.join(self.tmpdirname ,"""rag_tokenizer""" ) snake_case_ : Optional[int] = RagConfig(question_encoder=DPRConfig().to_dict() ,generator=BartConfig().to_dict() ) snake_case_ : List[Any] = RagTokenizer(question_encoder=self.get_dpr_tokenizer() ,generator=self.get_bart_tokenizer() ) rag_config.save_pretrained(_UpperCamelCase ) rag_tokenizer.save_pretrained(_UpperCamelCase ) snake_case_ : Union[str, Any] = RagTokenizer.from_pretrained(_UpperCamelCase ,config=_UpperCamelCase ) self.assertIsInstance(new_rag_tokenizer.question_encoder ,_UpperCamelCase ) self.assertEqual(new_rag_tokenizer.question_encoder.get_vocab() ,rag_tokenizer.question_encoder.get_vocab() ) self.assertIsInstance(new_rag_tokenizer.generator ,_UpperCamelCase ) self.assertEqual(new_rag_tokenizer.generator.get_vocab() ,rag_tokenizer.generator.get_vocab() ) @slow def a__ ( self :Any ): snake_case_ : List[Any] = RagTokenizer.from_pretrained("""facebook/rag-token-nq""" ) snake_case_ : Union[str, Any] = [ """who got the first nobel prize in physics""", """when is the next deadpool movie being released""", """which mode is used for short wave broadcast service""", """who is the owner of reading football club""", """when is the next scandal episode coming out""", """when is the last time the philadelphia won the superbowl""", """what is the most current adobe flash player version""", """how many episodes are there in dragon ball z""", """what is the first step in the evolution of the eye""", """where is gall bladder situated in human body""", """what is the main mineral in lithium batteries""", """who is the president of usa right now""", """where do the greasers live in the outsiders""", """panda is a national animal of which country""", """what is the name of manchester united stadium""", ] snake_case_ : Tuple = tokenizer(_UpperCamelCase ) self.assertIsNotNone(_UpperCamelCase ) @slow def a__ ( self :Dict ): snake_case_ : Union[str, Any] = RagTokenizer.from_pretrained("""facebook/rag-sequence-nq""" ) snake_case_ : Optional[Any] = [ """who got the first nobel prize in physics""", """when is the next deadpool movie being released""", """which mode is used for short wave broadcast service""", """who is the owner of reading football club""", """when is the next scandal episode coming out""", """when is the last time the philadelphia won the superbowl""", """what is the most current adobe flash player version""", """how many episodes are there in dragon ball z""", """what is the first step in the evolution of the eye""", """where is gall bladder situated in human body""", """what is the main mineral in lithium batteries""", """who is the president of usa right now""", """where do the greasers live in the outsiders""", """panda is a national animal of which country""", """what is the name of manchester united stadium""", ] snake_case_ : str = tokenizer(_UpperCamelCase ) self.assertIsNotNone(_UpperCamelCase )
363
'''simple docstring''' from __future__ import annotations import unittest import numpy as np from transformers import BlipTextConfig from transformers.testing_utils import require_tf, slow from transformers.utils import is_tf_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask if is_tf_available(): import tensorflow as tf from transformers import TFBlipTextModel from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST class __UpperCamelCase : def __init__( self :Any ,_UpperCamelCase :Optional[Any] ,_UpperCamelCase :Optional[int]=1_2 ,_UpperCamelCase :Optional[Any]=7 ,_UpperCamelCase :Optional[int]=True ,_UpperCamelCase :Union[str, Any]=True ,_UpperCamelCase :Dict=True ,_UpperCamelCase :Optional[int]=9_9 ,_UpperCamelCase :Dict=3_2 ,_UpperCamelCase :Union[str, Any]=3_2 ,_UpperCamelCase :Union[str, Any]=2 ,_UpperCamelCase :Optional[Any]=4 ,_UpperCamelCase :List[Any]=3_7 ,_UpperCamelCase :Tuple=0.1 ,_UpperCamelCase :Optional[int]=0.1 ,_UpperCamelCase :int=5_1_2 ,_UpperCamelCase :Tuple=0.02 ,_UpperCamelCase :Any=0 ,_UpperCamelCase :str=None ,): snake_case_ : str = parent snake_case_ : int = batch_size snake_case_ : Union[str, Any] = seq_length snake_case_ : List[Any] = is_training snake_case_ : Union[str, Any] = use_input_mask snake_case_ : List[str] = use_labels snake_case_ : int = vocab_size snake_case_ : Any = hidden_size snake_case_ : List[Any] = projection_dim snake_case_ : Dict = num_hidden_layers snake_case_ : Dict = num_attention_heads snake_case_ : str = intermediate_size snake_case_ : int = dropout snake_case_ : int = attention_dropout snake_case_ : Dict = max_position_embeddings snake_case_ : Union[str, Any] = initializer_range snake_case_ : Dict = scope snake_case_ : Union[str, Any] = bos_token_id def a__ ( self :Any ): snake_case_ : Any = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size ) snake_case_ : Union[str, Any] = None if self.use_input_mask: snake_case_ : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] ) if input_mask is not None: snake_case_ : int = input_mask.numpy() snake_case_ , snake_case_ : Tuple = input_mask.shape snake_case_ : Any = np.random.randint(1 ,seq_length - 1 ,size=(batch_size,) ) for batch_idx, start_index in enumerate(_UpperCamelCase ): snake_case_ : Optional[int] = 1 snake_case_ : List[str] = 0 snake_case_ : Tuple = self.get_config() return config, input_ids, tf.convert_to_tensor(_UpperCamelCase ) def a__ ( self :str ): return BlipTextConfig( vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,projection_dim=self.projection_dim ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,dropout=self.dropout ,attention_dropout=self.attention_dropout ,max_position_embeddings=self.max_position_embeddings ,initializer_range=self.initializer_range ,bos_token_id=self.bos_token_id ,) def a__ ( self :List[Any] ,_UpperCamelCase :Union[str, Any] ,_UpperCamelCase :Tuple ,_UpperCamelCase :Optional[int] ): snake_case_ : List[str] = TFBlipTextModel(config=_UpperCamelCase ) snake_case_ : List[Any] = model(_UpperCamelCase ,attention_mask=_UpperCamelCase ,training=_UpperCamelCase ) snake_case_ : Any = model(_UpperCamelCase ,training=_UpperCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.hidden_size) ) def a__ ( self :List[str] ): snake_case_ : Union[str, Any] = self.prepare_config_and_inputs() snake_case_ , snake_case_ , snake_case_ : str = config_and_inputs snake_case_ : Dict = {"""input_ids""": input_ids, """attention_mask""": input_mask} return config, inputs_dict @require_tf class __UpperCamelCase ( lowercase__ , unittest.TestCase ): lowercase : Optional[Any] = (TFBlipTextModel,) if is_tf_available() else () lowercase : int = False lowercase : List[Any] = False lowercase : Dict = False def a__ ( self :List[Any] ): snake_case_ : List[str] = BlipTextModelTester(self ) snake_case_ : Tuple = ConfigTester(self ,config_class=_UpperCamelCase ,hidden_size=3_7 ) def a__ ( self :Union[str, Any] ): self.config_tester.run_common_tests() def a__ ( self :Union[str, Any] ): snake_case_ : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_UpperCamelCase ) def a__ ( self :Tuple ): pass def a__ ( self :Tuple ): pass @unittest.skip(reason="""Blip does not use inputs_embeds""" ) def a__ ( self :Any ): pass @unittest.skip(reason="""BlipTextModel has no base class and is not available in MODEL_MAPPING""" ) def a__ ( self :Tuple ): pass @unittest.skip(reason="""BlipTextModel has no base class and is not available in MODEL_MAPPING""" ) def a__ ( self :List[Any] ): pass @slow def a__ ( self :Any ): for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: snake_case_ : Optional[Any] = TFBlipTextModel.from_pretrained(_UpperCamelCase ) self.assertIsNotNone(_UpperCamelCase ) def a__ ( self :Dict ,_UpperCamelCase :Tuple=True ): super().test_pt_tf_model_equivalence(allow_missing_keys=_UpperCamelCase )
8
0
'''simple docstring''' from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer from .base import PipelineTool class __UpperCamelCase ( lowercase__ ): lowercase : Tuple = 'philschmid/bart-large-cnn-samsum' lowercase : Tuple = ( 'This is a tool that summarizes an English text. It takes an input `text` containing the text to summarize, ' 'and returns a summary of the text.' ) lowercase : Union[str, Any] = 'summarizer' lowercase : List[str] = AutoTokenizer lowercase : List[Any] = AutoModelForSeqaSeqLM lowercase : Union[str, Any] = ['text'] lowercase : Tuple = ['text'] def a__ ( self :str ,_UpperCamelCase :Optional[int] ): return self.pre_processor(_UpperCamelCase ,return_tensors="""pt""" ,truncation=_UpperCamelCase ) def a__ ( self :Optional[Any] ,_UpperCamelCase :str ): return self.model.generate(**_UpperCamelCase )[0] def a__ ( self :str ,_UpperCamelCase :str ): return self.pre_processor.decode(_UpperCamelCase ,skip_special_tokens=_UpperCamelCase ,clean_up_tokenization_spaces=_UpperCamelCase )
364
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) __A : int = { 'configuration_whisper': ['WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'WhisperConfig', 'WhisperOnnxConfig'], 'feature_extraction_whisper': ['WhisperFeatureExtractor'], 'processing_whisper': ['WhisperProcessor'], 'tokenization_whisper': ['WhisperTokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : Union[str, Any] = ['WhisperTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : Any = [ 'WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST', 'WhisperForConditionalGeneration', 'WhisperModel', 'WhisperPreTrainedModel', 'WhisperForAudioClassification', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : List[Any] = [ 'TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFWhisperForConditionalGeneration', 'TFWhisperModel', 'TFWhisperPreTrainedModel', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : Union[str, Any] = [ 'FlaxWhisperForConditionalGeneration', 'FlaxWhisperModel', 'FlaxWhisperPreTrainedModel', 'FlaxWhisperForAudioClassification', ] if TYPE_CHECKING: from .configuration_whisper import WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP, WhisperConfig, WhisperOnnxConfig from .feature_extraction_whisper import WhisperFeatureExtractor from .processing_whisper import WhisperProcessor from .tokenization_whisper import WhisperTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_whisper_fast import WhisperTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_whisper import ( WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST, WhisperForAudioClassification, WhisperForConditionalGeneration, WhisperModel, WhisperPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_whisper import ( TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST, TFWhisperForConditionalGeneration, TFWhisperModel, TFWhisperPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_whisper import ( FlaxWhisperForAudioClassification, FlaxWhisperForConditionalGeneration, FlaxWhisperModel, FlaxWhisperPreTrainedModel, ) else: import sys __A : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
8
0
'''simple docstring''' import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_albert import AlbertTokenizer else: __A : Any = None __A : List[Any] = logging.get_logger(__name__) __A : Any = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'} __A : List[Any] = { 'vocab_file': { 'albert-base-v1': 'https://huggingface.co/albert-base-v1/resolve/main/spiece.model', 'albert-large-v1': 'https://huggingface.co/albert-large-v1/resolve/main/spiece.model', 'albert-xlarge-v1': 'https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model', 'albert-xxlarge-v1': 'https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model', 'albert-base-v2': 'https://huggingface.co/albert-base-v2/resolve/main/spiece.model', 'albert-large-v2': 'https://huggingface.co/albert-large-v2/resolve/main/spiece.model', 'albert-xlarge-v2': 'https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model', 'albert-xxlarge-v2': 'https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model', }, 'tokenizer_file': { 'albert-base-v1': 'https://huggingface.co/albert-base-v1/resolve/main/tokenizer.json', 'albert-large-v1': 'https://huggingface.co/albert-large-v1/resolve/main/tokenizer.json', 'albert-xlarge-v1': 'https://huggingface.co/albert-xlarge-v1/resolve/main/tokenizer.json', 'albert-xxlarge-v1': 'https://huggingface.co/albert-xxlarge-v1/resolve/main/tokenizer.json', 'albert-base-v2': 'https://huggingface.co/albert-base-v2/resolve/main/tokenizer.json', 'albert-large-v2': 'https://huggingface.co/albert-large-v2/resolve/main/tokenizer.json', 'albert-xlarge-v2': 'https://huggingface.co/albert-xlarge-v2/resolve/main/tokenizer.json', 'albert-xxlarge-v2': 'https://huggingface.co/albert-xxlarge-v2/resolve/main/tokenizer.json', }, } __A : Optional[int] = { 'albert-base-v1': 512, 'albert-large-v1': 512, 'albert-xlarge-v1': 512, 'albert-xxlarge-v1': 512, 'albert-base-v2': 512, 'albert-large-v2': 512, 'albert-xlarge-v2': 512, 'albert-xxlarge-v2': 512, } __A : List[str] = '▁' class __UpperCamelCase ( lowercase__ ): lowercase : Tuple = VOCAB_FILES_NAMES lowercase : List[Any] = PRETRAINED_VOCAB_FILES_MAP lowercase : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase : Optional[Any] = AlbertTokenizer def __init__( self :Optional[int] ,_UpperCamelCase :Optional[int]=None ,_UpperCamelCase :Dict=None ,_UpperCamelCase :List[str]=True ,_UpperCamelCase :Optional[Any]=True ,_UpperCamelCase :int=False ,_UpperCamelCase :int="[CLS]" ,_UpperCamelCase :Optional[Any]="[SEP]" ,_UpperCamelCase :str="<unk>" ,_UpperCamelCase :int="[SEP]" ,_UpperCamelCase :Optional[int]="<pad>" ,_UpperCamelCase :Any="[CLS]" ,_UpperCamelCase :Union[str, Any]="[MASK]" ,**_UpperCamelCase :List[Any] ,): # Mask token behave like a normal word, i.e. include the space before it and # is included in the raw text, there should be a match in a non-normalized sentence. snake_case_ : List[str] = ( AddedToken(_UpperCamelCase ,lstrip=_UpperCamelCase ,rstrip=_UpperCamelCase ,normalized=_UpperCamelCase ) if isinstance(_UpperCamelCase ,_UpperCamelCase ) else mask_token ) super().__init__( _UpperCamelCase ,tokenizer_file=_UpperCamelCase ,do_lower_case=_UpperCamelCase ,remove_space=_UpperCamelCase ,keep_accents=_UpperCamelCase ,bos_token=_UpperCamelCase ,eos_token=_UpperCamelCase ,unk_token=_UpperCamelCase ,sep_token=_UpperCamelCase ,pad_token=_UpperCamelCase ,cls_token=_UpperCamelCase ,mask_token=_UpperCamelCase ,**_UpperCamelCase ,) snake_case_ : Tuple = do_lower_case snake_case_ : str = remove_space snake_case_ : Dict = keep_accents snake_case_ : Optional[int] = vocab_file snake_case_ : Union[str, Any] = False if not self.vocab_file else True def a__ ( self :Dict ,_UpperCamelCase :List[int] ,_UpperCamelCase :Optional[List[int]] = None ): snake_case_ : Optional[Any] = [self.sep_token_id] snake_case_ : Union[str, Any] = [self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def a__ ( self :Union[str, Any] ,_UpperCamelCase :List[int] ,_UpperCamelCase :Optional[List[int]] = None ): snake_case_ : Optional[Any] = [self.sep_token_id] snake_case_ : Optional[Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def a__ ( self :Tuple ,_UpperCamelCase :str ,_UpperCamelCase :Optional[str] = None ): if not self.can_save_slow_tokenizer: raise ValueError( """Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """ """tokenizer.""" ) if not os.path.isdir(_UpperCamelCase ): logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' ) return snake_case_ : Dict = os.path.join( _UpperCamelCase ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCamelCase ): copyfile(self.vocab_file ,_UpperCamelCase ) return (out_vocab_file,)
365
'''simple docstring''' import warnings from ...utils import logging from .image_processing_mobilevit import MobileViTImageProcessor __A : Optional[int] = logging.get_logger(__name__) class __UpperCamelCase ( lowercase__ ): def __init__( self :List[str] ,*_UpperCamelCase :str ,**_UpperCamelCase :Optional[int] ): warnings.warn( """The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.""" """ Please use MobileViTImageProcessor instead.""" ,_UpperCamelCase ,) super().__init__(*_UpperCamelCase ,**_UpperCamelCase )
8
0
'''simple docstring''' def UpperCAmelCase ( lowerCamelCase_ :int , lowerCamelCase_ :int , lowerCamelCase_ :int ): '''simple docstring''' if exponent == 1: return base if exponent % 2 == 0: snake_case_ : str = _modexpt(lowerCamelCase_ , exponent // 2 , lowerCamelCase_ ) % modulo_value return (x * x) % modulo_value else: return (base * _modexpt(lowerCamelCase_ , exponent - 1 , lowerCamelCase_ )) % modulo_value def UpperCAmelCase ( lowerCamelCase_ :int = 17_77 , lowerCamelCase_ :int = 18_55 , lowerCamelCase_ :int = 8 ): '''simple docstring''' snake_case_ : str = base for _ in range(1 , lowerCamelCase_ ): snake_case_ : List[str] = _modexpt(lowerCamelCase_ , lowerCamelCase_ , 10**digits ) return result if __name__ == "__main__": print(F'{solution() = }')
366
'''simple docstring''' import re def UpperCAmelCase ( lowerCamelCase_ :str ): '''simple docstring''' snake_case_ : List[Any] = re.compile( R"""^(?:0|94|\+94|0{2}94)""" R"""7(0|1|2|4|5|6|7|8)""" R"""(-| |)""" R"""\d{7}$""" ) return bool(re.search(lowerCamelCase_ , lowerCamelCase_ ) ) if __name__ == "__main__": __A : int = '0094702343221' print(is_sri_lankan_phone_number(phone))
8
0
'''simple docstring''' import pytest from datasets.parallel import ParallelBackendConfig, parallel_backend from datasets.utils.py_utils import map_nested from .utils import require_dill_gt_0_3_2, require_joblibspark, require_not_windows def UpperCAmelCase ( lowerCamelCase_ :List[Any] ): # picklable for multiprocessing '''simple docstring''' return i + 1 @require_dill_gt_0_3_2 @require_joblibspark @require_not_windows def UpperCAmelCase ( ): '''simple docstring''' with parallel_backend("""spark""" ): assert ParallelBackendConfig.backend_name == "spark" snake_case_ : Optional[int] = [1, 2, 3] with pytest.raises(lowerCamelCase_ ): with parallel_backend("""unsupported backend""" ): map_nested(lowerCamelCase_ , lowerCamelCase_ , num_proc=2 ) with pytest.raises(lowerCamelCase_ ): with parallel_backend("""unsupported backend""" ): map_nested(lowerCamelCase_ , lowerCamelCase_ , num_proc=-1 ) @require_dill_gt_0_3_2 @require_joblibspark @require_not_windows @pytest.mark.parametrize("""num_proc""" , [2, -1] ) def UpperCAmelCase ( lowerCamelCase_ :Optional[Any] ): '''simple docstring''' snake_case_ : Optional[Any] = [1, 2] snake_case_ : str = {"""a""": 1, """b""": 2} snake_case_ : List[str] = {"""a""": [1, 2], """b""": [3, 4]} snake_case_ : Union[str, Any] = {"""a""": {"""1""": 1}, """b""": 2} snake_case_ : Union[str, Any] = {"""a""": 1, """b""": 2, """c""": 3, """d""": 4} snake_case_ : Optional[int] = [2, 3] snake_case_ : Dict = {"""a""": 2, """b""": 3} snake_case_ : Optional[Any] = {"""a""": [2, 3], """b""": [4, 5]} snake_case_ : Dict = {"""a""": {"""1""": 2}, """b""": 3} snake_case_ : str = {"""a""": 2, """b""": 3, """c""": 4, """d""": 5} with parallel_backend("""spark""" ): assert map_nested(lowerCamelCase_ , lowerCamelCase_ , num_proc=lowerCamelCase_ ) == expected_map_nested_sa assert map_nested(lowerCamelCase_ , lowerCamelCase_ , num_proc=lowerCamelCase_ ) == expected_map_nested_sa assert map_nested(lowerCamelCase_ , lowerCamelCase_ , num_proc=lowerCamelCase_ ) == expected_map_nested_sa assert map_nested(lowerCamelCase_ , lowerCamelCase_ , num_proc=lowerCamelCase_ ) == expected_map_nested_sa assert map_nested(lowerCamelCase_ , lowerCamelCase_ , num_proc=lowerCamelCase_ ) == expected_map_nested_sa
367
'''simple docstring''' from dataclasses import dataclass from enum import Enum from typing import List, Optional, Union import numpy as np import PIL from PIL import Image from ...utils import BaseOutput, is_torch_available, is_transformers_available @dataclass class __UpperCamelCase ( lowercase__ ): lowercase : Union[List[PIL.Image.Image], np.ndarray] lowercase : Optional[List[bool]] if is_transformers_available() and is_torch_available(): from .pipeline_semantic_stable_diffusion import SemanticStableDiffusionPipeline
8
0
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging __A : List[str] = logging.get_logger(__name__) __A : Optional[Any] = { 'google/vivit-b-16x2-kinetics400': ( 'https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json' ), # See all Vivit models at https://huggingface.co/models?filter=vivit } class __UpperCamelCase ( lowercase__ ): lowercase : Any = 'vivit' def __init__( self :Any ,_UpperCamelCase :Optional[Any]=2_2_4 ,_UpperCamelCase :List[Any]=3_2 ,_UpperCamelCase :List[str]=[2, 1_6, 1_6] ,_UpperCamelCase :Tuple=3 ,_UpperCamelCase :Optional[int]=7_6_8 ,_UpperCamelCase :Tuple=1_2 ,_UpperCamelCase :Tuple=1_2 ,_UpperCamelCase :Optional[int]=3_0_7_2 ,_UpperCamelCase :List[Any]="gelu_fast" ,_UpperCamelCase :Optional[Any]=0.0 ,_UpperCamelCase :Union[str, Any]=0.0 ,_UpperCamelCase :str=0.02 ,_UpperCamelCase :Tuple=1E-0_6 ,_UpperCamelCase :Optional[int]=True ,**_UpperCamelCase :Tuple ,): snake_case_ : Dict = hidden_size snake_case_ : List[str] = num_hidden_layers snake_case_ : Any = num_attention_heads snake_case_ : Optional[Any] = intermediate_size snake_case_ : Tuple = hidden_act snake_case_ : str = hidden_dropout_prob snake_case_ : str = attention_probs_dropout_prob snake_case_ : int = initializer_range snake_case_ : Dict = layer_norm_eps snake_case_ : Dict = image_size snake_case_ : Union[str, Any] = num_frames snake_case_ : Union[str, Any] = tubelet_size snake_case_ : Optional[int] = num_channels snake_case_ : Optional[Any] = qkv_bias super().__init__(**_UpperCamelCase )
368
'''simple docstring''' import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class __UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ , unittest.TestCase ): lowercase : Dict = StableDiffusionInpaintPipeline lowercase : str = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS lowercase : Union[str, Any] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS lowercase : Dict = frozenset( [] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess lowercase : Optional[int] = frozenset([] ) def a__ ( self :Any ): torch.manual_seed(0 ) snake_case_ : Optional[int] = UNetaDConditionModel( block_out_channels=(3_2, 6_4) ,layers_per_block=2 ,sample_size=3_2 ,in_channels=9 ,out_channels=4 ,down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") ,up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") ,cross_attention_dim=3_2 ,attention_head_dim=(2, 4) ,use_linear_projection=_UpperCamelCase ,) snake_case_ : Tuple = PNDMScheduler(skip_prk_steps=_UpperCamelCase ) torch.manual_seed(0 ) snake_case_ : List[str] = AutoencoderKL( block_out_channels=[3_2, 6_4] ,in_channels=3 ,out_channels=3 ,down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] ,up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] ,latent_channels=4 ,sample_size=1_2_8 ,) torch.manual_seed(0 ) snake_case_ : Optional[int] = CLIPTextConfig( bos_token_id=0 ,eos_token_id=2 ,hidden_size=3_2 ,intermediate_size=3_7 ,layer_norm_eps=1E-0_5 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1_0_0_0 ,hidden_act="""gelu""" ,projection_dim=5_1_2 ,) snake_case_ : Tuple = CLIPTextModel(_UpperCamelCase ) snake_case_ : Optional[int] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) snake_case_ : str = { """unet""": unet, """scheduler""": scheduler, """vae""": vae, """text_encoder""": text_encoder, """tokenizer""": tokenizer, """safety_checker""": None, """feature_extractor""": None, } return components def a__ ( self :str ,_UpperCamelCase :Optional[int] ,_UpperCamelCase :Union[str, Any]=0 ): # TODO: use tensor inputs instead of PIL, this is here just to leave the old expected_slices untouched snake_case_ : List[Any] = floats_tensor((1, 3, 3_2, 3_2) ,rng=random.Random(_UpperCamelCase ) ).to(_UpperCamelCase ) snake_case_ : int = image.cpu().permute(0 ,2 ,3 ,1 )[0] snake_case_ : List[str] = Image.fromarray(np.uinta(_UpperCamelCase ) ).convert("""RGB""" ).resize((6_4, 6_4) ) snake_case_ : Optional[Any] = Image.fromarray(np.uinta(image + 4 ) ).convert("""RGB""" ).resize((6_4, 6_4) ) if str(_UpperCamelCase ).startswith("""mps""" ): snake_case_ : Optional[Any] = torch.manual_seed(_UpperCamelCase ) else: snake_case_ : Optional[int] = torch.Generator(device=_UpperCamelCase ).manual_seed(_UpperCamelCase ) snake_case_ : int = { """prompt""": """A painting of a squirrel eating a burger""", """image""": init_image, """mask_image""": mask_image, """generator""": generator, """num_inference_steps""": 2, """guidance_scale""": 6.0, """output_type""": """numpy""", } return inputs def a__ ( self :Any ): snake_case_ : Union[str, Any] = """cpu""" # ensure determinism for the device-dependent torch.Generator snake_case_ : Optional[Any] = self.get_dummy_components() snake_case_ : Dict = StableDiffusionInpaintPipeline(**_UpperCamelCase ) snake_case_ : List[str] = sd_pipe.to(_UpperCamelCase ) sd_pipe.set_progress_bar_config(disable=_UpperCamelCase ) snake_case_ : Union[str, Any] = self.get_dummy_inputs(_UpperCamelCase ) snake_case_ : Tuple = sd_pipe(**_UpperCamelCase ).images snake_case_ : List[Any] = image[0, -3:, -3:, -1] assert image.shape == (1, 6_4, 6_4, 3) snake_case_ : Dict = np.array([0.47_27, 0.57_35, 0.39_41, 0.54_46, 0.59_26, 0.43_94, 0.50_62, 0.46_54, 0.44_76] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def a__ ( self :Any ): super().test_inference_batch_single_identical(expected_max_diff=3E-3 ) @slow @require_torch_gpu class __UpperCamelCase ( unittest.TestCase ): def a__ ( self :List[Any] ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def a__ ( self :Tuple ): snake_case_ : Union[str, Any] = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/sd2-inpaint/init_image.png""" ) snake_case_ : List[str] = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" ) snake_case_ : Dict = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint""" """/yellow_cat_sitting_on_a_park_bench.npy""" ) snake_case_ : str = """stabilityai/stable-diffusion-2-inpainting""" snake_case_ : Optional[Any] = StableDiffusionInpaintPipeline.from_pretrained(_UpperCamelCase ,safety_checker=_UpperCamelCase ) pipe.to(_UpperCamelCase ) pipe.set_progress_bar_config(disable=_UpperCamelCase ) pipe.enable_attention_slicing() snake_case_ : Optional[Any] = """Face of a yellow cat, high resolution, sitting on a park bench""" snake_case_ : List[str] = torch.manual_seed(0 ) snake_case_ : Dict = pipe( prompt=_UpperCamelCase ,image=_UpperCamelCase ,mask_image=_UpperCamelCase ,generator=_UpperCamelCase ,output_type="""np""" ,) snake_case_ : Union[str, Any] = output.images[0] assert image.shape == (5_1_2, 5_1_2, 3) assert np.abs(expected_image - image ).max() < 9E-3 def a__ ( self :Tuple ): snake_case_ : int = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/sd2-inpaint/init_image.png""" ) snake_case_ : Dict = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" ) snake_case_ : List[str] = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint""" """/yellow_cat_sitting_on_a_park_bench_fp16.npy""" ) snake_case_ : Optional[int] = """stabilityai/stable-diffusion-2-inpainting""" snake_case_ : List[str] = StableDiffusionInpaintPipeline.from_pretrained( _UpperCamelCase ,torch_dtype=torch.floataa ,safety_checker=_UpperCamelCase ,) pipe.to(_UpperCamelCase ) pipe.set_progress_bar_config(disable=_UpperCamelCase ) pipe.enable_attention_slicing() snake_case_ : Optional[Any] = """Face of a yellow cat, high resolution, sitting on a park bench""" snake_case_ : List[Any] = torch.manual_seed(0 ) snake_case_ : Any = pipe( prompt=_UpperCamelCase ,image=_UpperCamelCase ,mask_image=_UpperCamelCase ,generator=_UpperCamelCase ,output_type="""np""" ,) snake_case_ : List[str] = output.images[0] assert image.shape == (5_1_2, 5_1_2, 3) assert np.abs(expected_image - image ).max() < 5E-1 def a__ ( self :Union[str, Any] ): torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() snake_case_ : Optional[int] = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/sd2-inpaint/init_image.png""" ) snake_case_ : int = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" ) snake_case_ : int = """stabilityai/stable-diffusion-2-inpainting""" snake_case_ : Dict = PNDMScheduler.from_pretrained(_UpperCamelCase ,subfolder="""scheduler""" ) snake_case_ : List[Any] = StableDiffusionInpaintPipeline.from_pretrained( _UpperCamelCase ,safety_checker=_UpperCamelCase ,scheduler=_UpperCamelCase ,torch_dtype=torch.floataa ,) pipe.to(_UpperCamelCase ) pipe.set_progress_bar_config(disable=_UpperCamelCase ) pipe.enable_attention_slicing(1 ) pipe.enable_sequential_cpu_offload() snake_case_ : List[Any] = """Face of a yellow cat, high resolution, sitting on a park bench""" snake_case_ : Optional[int] = torch.manual_seed(0 ) snake_case_ : Tuple = pipe( prompt=_UpperCamelCase ,image=_UpperCamelCase ,mask_image=_UpperCamelCase ,generator=_UpperCamelCase ,num_inference_steps=2 ,output_type="""np""" ,) snake_case_ : Any = torch.cuda.max_memory_allocated() # make sure that less than 2.65 GB is allocated assert mem_bytes < 2.65 * 1_0**9
8
0
'''simple docstring''' from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig __A : Any = { 'albert-base-v1': 'https://huggingface.co/albert-base-v1/resolve/main/config.json', 'albert-large-v1': 'https://huggingface.co/albert-large-v1/resolve/main/config.json', 'albert-xlarge-v1': 'https://huggingface.co/albert-xlarge-v1/resolve/main/config.json', 'albert-xxlarge-v1': 'https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json', 'albert-base-v2': 'https://huggingface.co/albert-base-v2/resolve/main/config.json', 'albert-large-v2': 'https://huggingface.co/albert-large-v2/resolve/main/config.json', 'albert-xlarge-v2': 'https://huggingface.co/albert-xlarge-v2/resolve/main/config.json', 'albert-xxlarge-v2': 'https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json', } class __UpperCamelCase ( lowercase__ ): lowercase : Optional[Any] = 'albert' def __init__( self :int ,_UpperCamelCase :Tuple=3_0_0_0_0 ,_UpperCamelCase :Optional[int]=1_2_8 ,_UpperCamelCase :Dict=4_0_9_6 ,_UpperCamelCase :Tuple=1_2 ,_UpperCamelCase :List[str]=1 ,_UpperCamelCase :Dict=6_4 ,_UpperCamelCase :List[str]=1_6_3_8_4 ,_UpperCamelCase :Any=1 ,_UpperCamelCase :List[str]="gelu_new" ,_UpperCamelCase :int=0 ,_UpperCamelCase :Dict=0 ,_UpperCamelCase :Dict=5_1_2 ,_UpperCamelCase :Dict=2 ,_UpperCamelCase :Tuple=0.02 ,_UpperCamelCase :Dict=1E-1_2 ,_UpperCamelCase :List[str]=0.1 ,_UpperCamelCase :str="absolute" ,_UpperCamelCase :Optional[int]=0 ,_UpperCamelCase :List[str]=2 ,_UpperCamelCase :str=3 ,**_UpperCamelCase :Dict ,): super().__init__(pad_token_id=_UpperCamelCase ,bos_token_id=_UpperCamelCase ,eos_token_id=_UpperCamelCase ,**_UpperCamelCase ) snake_case_ : Union[str, Any] = vocab_size snake_case_ : Optional[Any] = embedding_size snake_case_ : int = hidden_size snake_case_ : Tuple = num_hidden_layers snake_case_ : Optional[int] = num_hidden_groups snake_case_ : Dict = num_attention_heads snake_case_ : Any = inner_group_num snake_case_ : Union[str, Any] = hidden_act snake_case_ : Any = intermediate_size snake_case_ : Union[str, Any] = hidden_dropout_prob snake_case_ : List[str] = attention_probs_dropout_prob snake_case_ : List[Any] = max_position_embeddings snake_case_ : Union[str, Any] = type_vocab_size snake_case_ : Any = initializer_range snake_case_ : Dict = layer_norm_eps snake_case_ : Optional[int] = classifier_dropout_prob snake_case_ : Optional[int] = position_embedding_type class __UpperCamelCase ( lowercase__ ): @property def a__ ( self :List[Any] ): if self.task == "multiple-choice": snake_case_ : Dict = {0: """batch""", 1: """choice""", 2: """sequence"""} else: snake_case_ : Union[str, Any] = {0: """batch""", 1: """sequence"""} return OrderedDict( [ ("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ("""token_type_ids""", dynamic_axis), ] )
369
'''simple docstring''' import collections import os import re from pathlib import Path __A : Dict = 'src/transformers' # Matches is_xxx_available() __A : Dict = re.compile(r'is\_([a-z_]*)_available()') # Catches a one-line _import_struct = {xxx} __A : Any = re.compile(r'^_import_structure\s+=\s+\{([^\}]+)\}') # Catches a line with a key-values pattern: "bla": ["foo", "bar"] __A : Tuple = re.compile(r'\s+"\S*":\s+\[([^\]]*)\]') # Catches a line if not is_foo_available __A : Optional[Any] = re.compile(r'^\s*if\s+not\s+is\_[a-z_]*\_available\(\)') # Catches a line _import_struct["bla"].append("foo") __A : Optional[int] = re.compile(r'^\s*_import_structure\["\S*"\]\.append\("(\S*)"\)') # Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"] __A : List[Any] = re.compile(r'^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]') # Catches a line with an object between quotes and a comma: "MyModel", __A : Union[str, Any] = re.compile(r'^\s+"([^"]+)",') # Catches a line with objects between brackets only: ["foo", "bar"], __A : int = re.compile(r'^\s+\[([^\]]+)\]') # Catches a line with from foo import bar, bla, boo __A : int = re.compile(r'\s+from\s+\S*\s+import\s+([^\(\s].*)\n') # Catches a line with try: __A : List[Any] = re.compile(r'^\s*try:') # Catches a line with else: __A : Any = re.compile(r'^\s*else:') def UpperCAmelCase ( lowerCamelCase_ :str ): '''simple docstring''' if _re_test_backend.search(lowerCamelCase_ ) is None: return None snake_case_ : Tuple = [b[0] for b in _re_backend.findall(lowerCamelCase_ )] backends.sort() return "_and_".join(lowerCamelCase_ ) def UpperCAmelCase ( lowerCamelCase_ :Optional[int] ): '''simple docstring''' with open(lowerCamelCase_ , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f: snake_case_ : str = f.readlines() snake_case_ : List[Any] = 0 while line_index < len(lowerCamelCase_ ) and not lines[line_index].startswith("""_import_structure = {""" ): line_index += 1 # If this is a traditional init, just return. if line_index >= len(lowerCamelCase_ ): return None # First grab the objects without a specific backend in _import_structure snake_case_ : Union[str, Any] = [] while not lines[line_index].startswith("""if TYPE_CHECKING""" ) and find_backend(lines[line_index] ) is None: snake_case_ : str = lines[line_index] # If we have everything on a single line, let's deal with it. if _re_one_line_import_struct.search(lowerCamelCase_ ): snake_case_ : Optional[int] = _re_one_line_import_struct.search(lowerCamelCase_ ).groups()[0] snake_case_ : Union[str, Any] = re.findall(R"""\[([^\]]+)\]""" , lowerCamelCase_ ) for imp in imports: objects.extend([obj[1:-1] for obj in imp.split(""", """ )] ) line_index += 1 continue snake_case_ : Any = _re_import_struct_key_value.search(lowerCamelCase_ ) if single_line_import_search is not None: snake_case_ : Optional[int] = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(""", """ ) if len(lowerCamelCase_ ) > 0] objects.extend(lowerCamelCase_ ) elif line.startswith(""" """ * 8 + """\"""" ): objects.append(line[9:-3] ) line_index += 1 snake_case_ : Union[str, Any] = {"""none""": objects} # Let's continue with backend-specific objects in _import_structure while not lines[line_index].startswith("""if TYPE_CHECKING""" ): # If the line is an if not is_backend_available, we grab all objects associated. snake_case_ : List[str] = find_backend(lines[line_index] ) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1] ) is None: snake_case_ : Tuple = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index] ) is None: line_index += 1 line_index += 1 snake_case_ : Dict = [] # Until we unindent, add backend objects to the list while len(lines[line_index] ) <= 1 or lines[line_index].startswith(""" """ * 4 ): snake_case_ : List[Any] = lines[line_index] if _re_import_struct_add_one.search(lowerCamelCase_ ) is not None: objects.append(_re_import_struct_add_one.search(lowerCamelCase_ ).groups()[0] ) elif _re_import_struct_add_many.search(lowerCamelCase_ ) is not None: snake_case_ : Optional[int] = _re_import_struct_add_many.search(lowerCamelCase_ ).groups()[0].split(""", """ ) snake_case_ : List[str] = [obj[1:-1] for obj in imports if len(lowerCamelCase_ ) > 0] objects.extend(lowerCamelCase_ ) elif _re_between_brackets.search(lowerCamelCase_ ) is not None: snake_case_ : List[str] = _re_between_brackets.search(lowerCamelCase_ ).groups()[0].split(""", """ ) snake_case_ : Any = [obj[1:-1] for obj in imports if len(lowerCamelCase_ ) > 0] objects.extend(lowerCamelCase_ ) elif _re_quote_object.search(lowerCamelCase_ ) is not None: objects.append(_re_quote_object.search(lowerCamelCase_ ).groups()[0] ) elif line.startswith(""" """ * 8 + """\"""" ): objects.append(line[9:-3] ) elif line.startswith(""" """ * 12 + """\"""" ): objects.append(line[13:-3] ) line_index += 1 snake_case_ : int = objects else: line_index += 1 # At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend snake_case_ : List[Any] = [] while ( line_index < len(lowerCamelCase_ ) and find_backend(lines[line_index] ) is None and not lines[line_index].startswith("""else""" ) ): snake_case_ : Union[str, Any] = lines[line_index] snake_case_ : Union[str, Any] = _re_import.search(lowerCamelCase_ ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(""", """ ) ) elif line.startswith(""" """ * 8 ): objects.append(line[8:-2] ) line_index += 1 snake_case_ : Dict = {"""none""": objects} # Let's continue with backend-specific objects while line_index < len(lowerCamelCase_ ): # If the line is an if is_backend_available, we grab all objects associated. snake_case_ : Optional[Any] = find_backend(lines[line_index] ) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1] ) is None: snake_case_ : str = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index] ) is None: line_index += 1 line_index += 1 snake_case_ : Any = [] # Until we unindent, add backend objects to the list while len(lines[line_index] ) <= 1 or lines[line_index].startswith(""" """ * 8 ): snake_case_ : Dict = lines[line_index] snake_case_ : Any = _re_import.search(lowerCamelCase_ ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(""", """ ) ) elif line.startswith(""" """ * 12 ): objects.append(line[12:-2] ) line_index += 1 snake_case_ : int = objects else: line_index += 1 return import_dict_objects, type_hint_objects def UpperCAmelCase ( lowerCamelCase_ :int , lowerCamelCase_ :List[str] ): '''simple docstring''' def find_duplicates(lowerCamelCase_ :Union[str, Any] ): return [k for k, v in collections.Counter(lowerCamelCase_ ).items() if v > 1] if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ): return ["Both sides of the init do not have the same backends!"] snake_case_ : Optional[int] = [] for key in import_dict_objects.keys(): snake_case_ : int = find_duplicates(import_dict_objects[key] ) if duplicate_imports: errors.append(F'''Duplicate _import_structure definitions for: {duplicate_imports}''' ) snake_case_ : List[str] = find_duplicates(type_hint_objects[key] ) if duplicate_type_hints: errors.append(F'''Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}''' ) if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ): snake_case_ : str = """base imports""" if key == """none""" else F'''{key} backend''' errors.append(F'''Differences for {name}:''' ) for a in type_hint_objects[key]: if a not in import_dict_objects[key]: errors.append(F''' {a} in TYPE_HINT but not in _import_structure.''' ) for a in import_dict_objects[key]: if a not in type_hint_objects[key]: errors.append(F''' {a} in _import_structure but not in TYPE_HINT.''' ) return errors def UpperCAmelCase ( ): '''simple docstring''' snake_case_ : Tuple = [] for root, _, files in os.walk(lowerCamelCase_ ): if "__init__.py" in files: snake_case_ : Any = os.path.join(lowerCamelCase_ , """__init__.py""" ) snake_case_ : Dict = parse_init(lowerCamelCase_ ) if objects is not None: snake_case_ : Any = analyze_results(*lowerCamelCase_ ) if len(lowerCamelCase_ ) > 0: snake_case_ : Tuple = F'''Problem in {fname}, both halves do not define the same objects.\n{errors[0]}''' failures.append("""\n""".join(lowerCamelCase_ ) ) if len(lowerCamelCase_ ) > 0: raise ValueError("""\n\n""".join(lowerCamelCase_ ) ) def UpperCAmelCase ( ): '''simple docstring''' snake_case_ : Union[str, Any] = [] for path, directories, files in os.walk(lowerCamelCase_ ): for folder in directories: # Ignore private modules if folder.startswith("""_""" ): directories.remove(lowerCamelCase_ ) continue # Ignore leftovers from branches (empty folders apart from pycache) if len(list((Path(lowerCamelCase_ ) / folder).glob("""*.py""" ) ) ) == 0: continue snake_case_ : Tuple = str((Path(lowerCamelCase_ ) / folder).relative_to(lowerCamelCase_ ) ) snake_case_ : List[str] = short_path.replace(os.path.sep , """.""" ) submodules.append(lowerCamelCase_ ) for fname in files: if fname == "__init__.py": continue snake_case_ : Dict = str((Path(lowerCamelCase_ ) / fname).relative_to(lowerCamelCase_ ) ) snake_case_ : List[str] = short_path.replace(""".py""" , """""" ).replace(os.path.sep , """.""" ) if len(submodule.split(""".""" ) ) == 1: submodules.append(lowerCamelCase_ ) return submodules __A : List[Any] = [ 'convert_pytorch_checkpoint_to_tf2', 'modeling_flax_pytorch_utils', 'models.esm.openfold_utils', ] def UpperCAmelCase ( ): '''simple docstring''' # This is to make sure the transformers module imported is the one in the repo. from transformers.utils import direct_transformers_import snake_case_ : Union[str, Any] = direct_transformers_import(lowerCamelCase_ ) snake_case_ : List[str] = set(transformers._import_structure.keys() ) # This contains all the base keys of the _import_structure object defined in the init, but if the user is missing # some optional dependencies, they may not have all of them. Thus we read the init to read all additions and # (potentiall re-) add them. with open(os.path.join(lowerCamelCase_ , """__init__.py""" ) , """r""" ) as f: snake_case_ : str = f.read() import_structure_keys.update(set(re.findall(R"""import_structure\[\"([^\"]*)\"\]""" , lowerCamelCase_ ) ) ) snake_case_ : Dict = [ module for module in get_transformers_submodules() if module not in IGNORE_SUBMODULES and module not in import_structure_keys ] if len(lowerCamelCase_ ) > 0: snake_case_ : str = """\n""".join(F'''- {module}''' for module in module_not_registered ) raise ValueError( """The following submodules are not properly registed in the main init of Transformers:\n""" F'''{list_of_modules}\n''' """Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.""" ) if __name__ == "__main__": check_all_inits() check_submodules()
8
0
'''simple docstring''' from __future__ import annotations __A : Tuple = list[list[int]] # assigning initial values to the grid __A : Matrix = [ [3, 0, 6, 5, 0, 8, 4, 0, 0], [5, 2, 0, 0, 0, 0, 0, 0, 0], [0, 8, 7, 0, 0, 0, 0, 3, 1], [0, 0, 3, 0, 1, 0, 0, 8, 0], [9, 0, 0, 8, 6, 3, 0, 0, 5], [0, 5, 0, 0, 9, 0, 6, 0, 0], [1, 3, 0, 0, 0, 0, 2, 5, 0], [0, 0, 0, 0, 0, 0, 0, 7, 4], [0, 0, 5, 2, 0, 6, 3, 0, 0], ] # a grid with no solution __A : Matrix = [ [5, 0, 6, 5, 0, 8, 4, 0, 3], [5, 2, 0, 0, 0, 0, 0, 0, 2], [1, 8, 7, 0, 0, 0, 0, 3, 1], [0, 0, 3, 0, 1, 0, 0, 8, 0], [9, 0, 0, 8, 6, 3, 0, 0, 5], [0, 5, 0, 0, 9, 0, 6, 0, 0], [1, 3, 0, 0, 0, 0, 2, 5, 0], [0, 0, 0, 0, 0, 0, 0, 7, 4], [0, 0, 5, 2, 0, 6, 3, 0, 0], ] def UpperCAmelCase ( lowerCamelCase_ :Matrix , lowerCamelCase_ :int , lowerCamelCase_ :int , lowerCamelCase_ :int ): '''simple docstring''' for i in range(9 ): if grid[row][i] == n or grid[i][column] == n: return False for i in range(3 ): for j in range(3 ): if grid[(row - row % 3) + i][(column - column % 3) + j] == n: return False return True def UpperCAmelCase ( lowerCamelCase_ :Matrix ): '''simple docstring''' for i in range(9 ): for j in range(9 ): if grid[i][j] == 0: return i, j return None def UpperCAmelCase ( lowerCamelCase_ :Matrix ): '''simple docstring''' if location := find_empty_location(lowerCamelCase_ ): snake_case_ : str = location else: # If the location is ``None``, then the grid is solved. return grid for digit in range(1 , 10 ): if is_safe(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ): snake_case_ : str = digit if sudoku(lowerCamelCase_ ) is not None: return grid snake_case_ : Any = 0 return None def UpperCAmelCase ( lowerCamelCase_ :Matrix ): '''simple docstring''' for row in grid: for cell in row: print(lowerCamelCase_ , end=""" """ ) print() if __name__ == "__main__": # make a copy of grid so that you can compare with the unmodified grid for example_grid in (initial_grid, no_solution): print('\nExample grid:\n' + '=' * 20) print_solution(example_grid) print('\nExample grid solution:') __A : Tuple = sudoku(example_grid) if solution is not None: print_solution(solution) else: print('Cannot find a solution.')
370
'''simple docstring''' import unittest import numpy as np from transformers.testing_utils import require_pytesseract, require_torch from transformers.utils import is_pytesseract_available, is_torch_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_pytesseract_available(): from PIL import Image from transformers import LayoutLMvaImageProcessor class __UpperCamelCase ( unittest.TestCase ): def __init__( self :List[Any] ,_UpperCamelCase :List[str] ,_UpperCamelCase :Optional[Any]=7 ,_UpperCamelCase :Union[str, Any]=3 ,_UpperCamelCase :Any=1_8 ,_UpperCamelCase :Optional[Any]=3_0 ,_UpperCamelCase :List[str]=4_0_0 ,_UpperCamelCase :Optional[Any]=True ,_UpperCamelCase :Union[str, Any]=None ,_UpperCamelCase :List[Any]=True ,): snake_case_ : List[str] = size if size is not None else {"""height""": 1_8, """width""": 1_8} snake_case_ : Union[str, Any] = parent snake_case_ : str = batch_size snake_case_ : List[Any] = num_channels snake_case_ : Tuple = image_size snake_case_ : int = min_resolution snake_case_ : int = max_resolution snake_case_ : Union[str, Any] = do_resize snake_case_ : Optional[Any] = size snake_case_ : Any = apply_ocr def a__ ( self :Union[str, Any] ): return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr} @require_torch @require_pytesseract class __UpperCamelCase ( lowercase__ , unittest.TestCase ): lowercase : Tuple = LayoutLMvaImageProcessor if is_pytesseract_available() else None def a__ ( self :List[Any] ): snake_case_ : Union[str, Any] = LayoutLMvaImageProcessingTester(self ) @property def a__ ( self :int ): return self.image_processor_tester.prepare_image_processor_dict() def a__ ( self :Any ): snake_case_ : Tuple = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_UpperCamelCase ,"""do_resize""" ) ) self.assertTrue(hasattr(_UpperCamelCase ,"""size""" ) ) self.assertTrue(hasattr(_UpperCamelCase ,"""apply_ocr""" ) ) def a__ ( self :int ): snake_case_ : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size ,{"""height""": 1_8, """width""": 1_8} ) snake_case_ : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict ,size=4_2 ) self.assertEqual(image_processor.size ,{"""height""": 4_2, """width""": 4_2} ) def a__ ( self :Optional[Any] ): pass def a__ ( self :Union[str, Any] ): # Initialize image_processing snake_case_ : Tuple = self.image_processing_class(**self.image_processor_dict ) # create random PIL images snake_case_ : List[str] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_UpperCamelCase ) for image in image_inputs: self.assertIsInstance(_UpperCamelCase ,Image.Image ) # Test not batched input snake_case_ : List[str] = image_processing(image_inputs[0] ,return_tensors="""pt""" ) self.assertEqual( encoding.pixel_values.shape ,( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) ,) self.assertIsInstance(encoding.words ,_UpperCamelCase ) self.assertIsInstance(encoding.boxes ,_UpperCamelCase ) # Test batched snake_case_ : List[Any] = image_processing(_UpperCamelCase ,return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape ,( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) ,) def a__ ( self :Tuple ): # Initialize image_processing snake_case_ : Tuple = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors snake_case_ : Optional[Any] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_UpperCamelCase ,numpify=_UpperCamelCase ) for image in image_inputs: self.assertIsInstance(_UpperCamelCase ,np.ndarray ) # Test not batched input snake_case_ : Optional[int] = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape ,( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) ,) # Test batched snake_case_ : Any = image_processing(_UpperCamelCase ,return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape ,( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) ,) def a__ ( self :Optional[Any] ): # Initialize image_processing snake_case_ : Any = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors snake_case_ : Optional[int] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_UpperCamelCase ,torchify=_UpperCamelCase ) for image in image_inputs: self.assertIsInstance(_UpperCamelCase ,torch.Tensor ) # Test not batched input snake_case_ : Tuple = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape ,( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) ,) # Test batched snake_case_ : Union[str, Any] = image_processing(_UpperCamelCase ,return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape ,( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) ,) def a__ ( self :List[Any] ): # with apply_OCR = True snake_case_ : Any = LayoutLMvaImageProcessor() from datasets import load_dataset snake_case_ : List[Any] = load_dataset("""hf-internal-testing/fixtures_docvqa""" ,split="""test""" ) snake_case_ : str = Image.open(ds[0]["""file"""] ).convert("""RGB""" ) snake_case_ : Dict = image_processing(_UpperCamelCase ,return_tensors="""pt""" ) self.assertEqual(encoding.pixel_values.shape ,(1, 3, 2_2_4, 2_2_4) ) self.assertEqual(len(encoding.words ) ,len(encoding.boxes ) ) # fmt: off # the words and boxes were obtained with Tesseract 4.1.1 snake_case_ : Tuple = [["""11:14""", """to""", """11:39""", """a.m""", """11:39""", """to""", """11:44""", """a.m.""", """11:44""", """a.m.""", """to""", """12:25""", """p.m.""", """12:25""", """to""", """12:58""", """p.m.""", """12:58""", """to""", """4:00""", """p.m.""", """2:00""", """to""", """5:00""", """p.m.""", """Coffee""", """Break""", """Coffee""", """will""", """be""", """served""", """for""", """men""", """and""", """women""", """in""", """the""", """lobby""", """adjacent""", """to""", """exhibit""", """area.""", """Please""", """move""", """into""", """exhibit""", """area.""", """(Exhibits""", """Open)""", """TRRF""", """GENERAL""", """SESSION""", """(PART""", """|)""", """Presiding:""", """Lee""", """A.""", """Waller""", """TRRF""", """Vice""", """President""", """“Introductory""", """Remarks”""", """Lee""", """A.""", """Waller,""", """TRRF""", """Vice""", """Presi-""", """dent""", """Individual""", """Interviews""", """with""", """TRRF""", """Public""", """Board""", """Members""", """and""", """Sci-""", """entific""", """Advisory""", """Council""", """Mem-""", """bers""", """Conducted""", """by""", """TRRF""", """Treasurer""", """Philip""", """G.""", """Kuehn""", """to""", """get""", """answers""", """which""", """the""", """public""", """refrigerated""", """warehousing""", """industry""", """is""", """looking""", """for.""", """Plus""", """questions""", """from""", """the""", """floor.""", """Dr.""", """Emil""", """M.""", """Mrak,""", """University""", """of""", """Cal-""", """ifornia,""", """Chairman,""", """TRRF""", """Board;""", """Sam""", """R.""", """Cecil,""", """University""", """of""", """Georgia""", """College""", """of""", """Agriculture;""", """Dr.""", """Stanley""", """Charm,""", """Tufts""", """University""", """School""", """of""", """Medicine;""", """Dr.""", """Robert""", """H.""", """Cotton,""", """ITT""", """Continental""", """Baking""", """Company;""", """Dr.""", """Owen""", """Fennema,""", """University""", """of""", """Wis-""", """consin;""", """Dr.""", """Robert""", """E.""", """Hardenburg,""", """USDA.""", """Questions""", """and""", """Answers""", """Exhibits""", """Open""", """Capt.""", """Jack""", """Stoney""", """Room""", """TRRF""", """Scientific""", """Advisory""", """Council""", """Meeting""", """Ballroom""", """Foyer"""]] # noqa: E231 snake_case_ : Any = [[[1_4_1, 5_7, 2_1_4, 6_9], [2_2_8, 5_8, 2_5_2, 6_9], [1_4_1, 7_5, 2_1_6, 8_8], [2_3_0, 7_9, 2_8_0, 8_8], [1_4_2, 2_6_0, 2_1_8, 2_7_3], [2_3_0, 2_6_1, 2_5_5, 2_7_3], [1_4_3, 2_7_9, 2_1_8, 2_9_0], [2_3_1, 2_8_2, 2_9_0, 2_9_1], [1_4_3, 3_4_2, 2_1_8, 3_5_4], [2_3_1, 3_4_5, 2_8_9, 3_5_5], [2_0_2, 3_6_2, 2_2_7, 3_7_3], [1_4_3, 3_7_9, 2_2_0, 3_9_2], [2_3_1, 3_8_2, 2_9_1, 3_9_4], [1_4_4, 7_1_4, 2_2_0, 7_2_6], [2_3_1, 7_1_5, 2_5_6, 7_2_6], [1_4_4, 7_3_2, 2_2_0, 7_4_5], [2_3_2, 7_3_6, 2_9_1, 7_4_7], [1_4_4, 7_6_9, 2_1_8, 7_8_2], [2_3_1, 7_7_0, 2_5_6, 7_8_2], [1_4_1, 7_8_8, 2_0_2, 8_0_1], [2_1_5, 7_9_1, 2_7_4, 8_0_4], [1_4_3, 8_2_6, 2_0_4, 8_3_8], [2_1_5, 8_2_6, 2_4_0, 8_3_8], [1_4_2, 8_4_4, 2_0_2, 8_5_7], [2_1_5, 8_4_7, 2_7_4, 8_5_9], [3_3_4, 5_7, 4_2_7, 6_9], [4_4_0, 5_7, 5_2_2, 6_9], [3_6_9, 7_5, 4_6_1, 8_8], [4_6_9, 7_5, 5_1_6, 8_8], [5_2_8, 7_6, 5_6_2, 8_8], [5_7_0, 7_6, 6_6_7, 8_8], [6_7_5, 7_5, 7_1_1, 8_7], [7_2_1, 7_9, 7_7_8, 8_8], [7_8_9, 7_5, 8_4_0, 8_8], [3_6_9, 9_7, 4_7_0, 1_0_7], [4_8_4, 9_4, 5_0_7, 1_0_6], [5_1_8, 9_4, 5_6_2, 1_0_7], [5_7_6, 9_4, 6_5_5, 1_1_0], [6_6_8, 9_4, 7_9_2, 1_0_9], [8_0_4, 9_5, 8_2_9, 1_0_7], [3_6_9, 1_1_3, 4_6_5, 1_2_5], [4_7_7, 1_1_6, 5_4_7, 1_2_5], [5_6_2, 1_1_3, 6_5_8, 1_2_5], [6_7_1, 1_1_6, 7_4_8, 1_2_5], [7_6_1, 1_1_3, 8_1_1, 1_2_5], [3_6_9, 1_3_1, 4_6_5, 1_4_3], [4_7_7, 1_3_3, 5_4_8, 1_4_3], [5_6_3, 1_3_0, 6_9_8, 1_4_5], [7_1_0, 1_3_0, 8_0_2, 1_4_6], [3_3_6, 1_7_1, 4_1_2, 1_8_3], [4_2_3, 1_7_1, 5_7_2, 1_8_3], [5_8_2, 1_7_0, 7_1_6, 1_8_4], [7_2_8, 1_7_1, 8_1_7, 1_8_7], [8_2_9, 1_7_1, 8_4_4, 1_8_6], [3_3_8, 1_9_7, 4_8_2, 2_1_2], [5_0_7, 1_9_6, 5_5_7, 2_0_9], [5_6_9, 1_9_6, 5_9_5, 2_0_8], [6_1_0, 1_9_6, 7_0_2, 2_0_9], [5_0_5, 2_1_4, 5_8_3, 2_2_6], [5_9_5, 2_1_4, 6_5_6, 2_2_7], [6_7_0, 2_1_5, 8_0_7, 2_2_7], [3_3_5, 2_5_9, 5_4_3, 2_7_4], [5_5_6, 2_5_9, 7_0_8, 2_7_2], [3_7_2, 2_7_9, 4_2_2, 2_9_1], [4_3_5, 2_7_9, 4_6_0, 2_9_1], [4_7_4, 2_7_9, 5_7_4, 2_9_2], [5_8_7, 2_7_8, 6_6_4, 2_9_1], [6_7_6, 2_7_8, 7_3_8, 2_9_1], [7_5_1, 2_7_9, 8_3_4, 2_9_1], [3_7_2, 2_9_8, 4_3_4, 3_1_0], [3_3_5, 3_4_1, 4_8_3, 3_5_4], [4_9_7, 3_4_1, 6_5_5, 3_5_4], [6_6_7, 3_4_1, 7_2_8, 3_5_4], [7_4_0, 3_4_1, 8_2_5, 3_5_4], [3_3_5, 3_6_0, 4_3_0, 3_7_2], [4_4_2, 3_6_0, 5_3_4, 3_7_2], [5_4_5, 3_5_9, 6_8_7, 3_7_2], [6_9_7, 3_6_0, 7_5_4, 3_7_2], [7_6_5, 3_6_0, 8_2_3, 3_7_3], [3_3_4, 3_7_8, 4_2_8, 3_9_1], [4_4_0, 3_7_8, 5_7_7, 3_9_4], [5_9_0, 3_7_8, 7_0_5, 3_9_1], [7_2_0, 3_7_8, 8_0_1, 3_9_1], [3_3_4, 3_9_7, 4_0_0, 4_0_9], [3_7_0, 4_1_6, 5_2_9, 4_2_9], [5_4_4, 4_1_6, 5_7_6, 4_3_2], [5_8_7, 4_1_6, 6_6_5, 4_2_8], [6_7_7, 4_1_6, 8_1_4, 4_2_9], [3_7_2, 4_3_5, 4_5_2, 4_5_0], [4_6_5, 4_3_4, 4_9_5, 4_4_7], [5_1_1, 4_3_4, 6_0_0, 4_4_7], [6_1_1, 4_3_6, 6_3_7, 4_4_7], [6_4_9, 4_3_6, 6_9_4, 4_5_1], [7_0_5, 4_3_8, 8_2_4, 4_4_7], [3_6_9, 4_5_3, 4_5_2, 4_6_6], [4_6_4, 4_5_4, 5_0_9, 4_6_6], [5_2_2, 4_5_3, 6_1_1, 4_6_9], [6_2_5, 4_5_3, 7_9_2, 4_6_9], [3_7_0, 4_7_2, 5_5_6, 4_8_8], [5_7_0, 4_7_2, 6_8_4, 4_8_7], [6_9_7, 4_7_2, 7_1_8, 4_8_5], [7_3_2, 4_7_2, 8_3_5, 4_8_8], [3_6_9, 4_9_0, 4_1_1, 5_0_3], [4_2_5, 4_9_0, 4_8_4, 5_0_3], [4_9_6, 4_9_0, 6_3_5, 5_0_6], [6_4_5, 4_9_0, 7_0_7, 5_0_3], [7_1_8, 4_9_1, 7_6_1, 5_0_3], [7_7_1, 4_9_0, 8_4_0, 5_0_3], [3_3_6, 5_1_0, 3_7_4, 5_2_1], [3_8_8, 5_1_0, 4_4_7, 5_2_2], [4_6_0, 5_1_0, 4_8_9, 5_2_1], [5_0_3, 5_1_0, 5_8_0, 5_2_2], [5_9_2, 5_0_9, 7_3_6, 5_2_5], [7_4_5, 5_0_9, 7_7_0, 5_2_2], [7_8_1, 5_0_9, 8_4_0, 5_2_2], [3_3_8, 5_2_8, 4_3_4, 5_4_1], [4_4_8, 5_2_8, 5_9_6, 5_4_1], [6_0_9, 5_2_7, 6_8_7, 5_4_0], [7_0_0, 5_2_8, 7_9_2, 5_4_1], [3_3_6, 5_4_6, 3_9_7, 5_5_9], [4_0_7, 5_4_6, 4_3_1, 5_5_9], [4_4_3, 5_4_6, 5_2_5, 5_6_0], [5_3_7, 5_4_6, 6_8_0, 5_6_2], [6_8_8, 5_4_6, 7_1_4, 5_5_9], [7_2_2, 5_4_6, 8_3_7, 5_6_2], [3_3_6, 5_6_5, 4_4_9, 5_8_1], [4_6_1, 5_6_5, 4_8_5, 5_7_7], [4_9_7, 5_6_5, 6_6_5, 5_8_1], [6_8_1, 5_6_5, 7_1_8, 5_7_7], [7_3_2, 5_6_5, 8_3_7, 5_8_0], [3_3_7, 5_8_4, 4_3_8, 5_9_7], [4_5_2, 5_8_3, 5_2_1, 5_9_6], [5_3_5, 5_8_4, 6_7_7, 5_9_9], [6_9_0, 5_8_3, 7_8_7, 5_9_6], [8_0_1, 5_8_3, 8_2_5, 5_9_6], [3_3_8, 6_0_2, 4_7_8, 6_1_5], [4_9_2, 6_0_2, 5_3_0, 6_1_4], [5_4_3, 6_0_2, 6_3_8, 6_1_5], [6_5_0, 6_0_2, 6_7_6, 6_1_4], [6_8_8, 6_0_2, 7_8_8, 6_1_5], [8_0_2, 6_0_2, 8_4_3, 6_1_4], [3_3_7, 6_2_1, 5_0_2, 6_3_3], [5_1_6, 6_2_1, 6_1_5, 6_3_7], [6_2_9, 6_2_1, 7_7_4, 6_3_6], [7_8_9, 6_2_1, 8_2_7, 6_3_3], [3_3_7, 6_3_9, 4_1_8, 6_5_2], [4_3_2, 6_4_0, 5_7_1, 6_5_3], [5_8_7, 6_3_9, 7_3_1, 6_5_5], [7_4_3, 6_3_9, 7_6_9, 6_5_2], [7_8_0, 6_3_9, 8_4_1, 6_5_2], [3_3_8, 6_5_8, 4_4_0, 6_7_3], [4_5_5, 6_5_8, 4_9_1, 6_7_0], [5_0_8, 6_5_8, 6_0_2, 6_7_1], [6_1_6, 6_5_8, 6_3_8, 6_7_0], [6_5_4, 6_5_8, 8_3_5, 6_7_4], [3_3_7, 6_7_7, 4_2_9, 6_8_9], [3_3_7, 7_1_4, 4_8_2, 7_2_6], [4_9_5, 7_1_4, 5_4_8, 7_2_6], [5_6_1, 7_1_4, 6_8_3, 7_2_6], [3_3_8, 7_7_0, 4_6_1, 7_8_2], [4_7_4, 7_6_9, 5_5_4, 7_8_5], [4_8_9, 7_8_8, 5_6_2, 8_0_3], [5_7_6, 7_8_8, 6_4_3, 8_0_1], [6_5_6, 7_8_7, 7_5_1, 8_0_4], [7_6_4, 7_8_8, 8_4_4, 8_0_1], [3_3_4, 8_2_5, 4_2_1, 8_3_8], [4_3_0, 8_2_4, 5_7_4, 8_3_8], [5_8_4, 8_2_4, 7_2_3, 8_4_1], [3_3_5, 8_4_4, 4_5_0, 8_5_7], [4_6_4, 8_4_3, 5_8_3, 8_6_0], [6_2_8, 8_6_2, 7_5_5, 8_7_5], [7_6_9, 8_6_1, 8_4_8, 8_7_8]]] # noqa: E231 # fmt: on self.assertListEqual(encoding.words ,_UpperCamelCase ) self.assertListEqual(encoding.boxes ,_UpperCamelCase ) # with apply_OCR = False snake_case_ : Dict = LayoutLMvaImageProcessor(apply_ocr=_UpperCamelCase ) snake_case_ : Optional[int] = image_processing(_UpperCamelCase ,return_tensors="""pt""" ) self.assertEqual(encoding.pixel_values.shape ,(1, 3, 2_2_4, 2_2_4) )
8
0
'''simple docstring''' import pytest from datasets.utils.sharding import _distribute_shards, _number_of_shards_in_gen_kwargs, _split_gen_kwargs @pytest.mark.parametrize( """kwargs, expected""" , [ ({"""num_shards""": 0, """max_num_jobs""": 1}, []), ({"""num_shards""": 10, """max_num_jobs""": 1}, [range(10 )]), ({"""num_shards""": 10, """max_num_jobs""": 10}, [range(lowerCamelCase_ , i + 1 ) for i in range(10 )]), ({"""num_shards""": 1, """max_num_jobs""": 10}, [range(1 )]), ({"""num_shards""": 10, """max_num_jobs""": 3}, [range(0 , 4 ), range(4 , 7 ), range(7 , 10 )]), ({"""num_shards""": 3, """max_num_jobs""": 10}, [range(0 , 1 ), range(1 , 2 ), range(2 , 3 )]), ] , ) def UpperCAmelCase ( lowerCamelCase_ :Any , lowerCamelCase_ :List[Any] ): '''simple docstring''' snake_case_ : List[Any] = _distribute_shards(**lowerCamelCase_ ) assert out == expected @pytest.mark.parametrize( """gen_kwargs, max_num_jobs, expected""" , [ ({"""foo""": 0}, 10, [{"""foo""": 0}]), ({"""shards""": [0, 1, 2, 3]}, 1, [{"""shards""": [0, 1, 2, 3]}]), ({"""shards""": [0, 1, 2, 3]}, 4, [{"""shards""": [0]}, {"""shards""": [1]}, {"""shards""": [2]}, {"""shards""": [3]}]), ({"""shards""": [0, 1]}, 4, [{"""shards""": [0]}, {"""shards""": [1]}]), ({"""shards""": [0, 1, 2, 3]}, 2, [{"""shards""": [0, 1]}, {"""shards""": [2, 3]}]), ] , ) def UpperCAmelCase ( lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :Any , lowerCamelCase_ :int ): '''simple docstring''' snake_case_ : Tuple = _split_gen_kwargs(lowerCamelCase_ , lowerCamelCase_ ) assert out == expected @pytest.mark.parametrize( """gen_kwargs, expected""" , [ ({"""foo""": 0}, 1), ({"""shards""": [0]}, 1), ({"""shards""": [0, 1, 2, 3]}, 4), ({"""shards""": [0, 1, 2, 3], """foo""": 0}, 4), ({"""shards""": [0, 1, 2, 3], """other""": (0, 1)}, 4), ({"""shards""": [0, 1, 2, 3], """shards2""": [0, 1]}, RuntimeError), ] , ) def UpperCAmelCase ( lowerCamelCase_ :str , lowerCamelCase_ :List[Any] ): '''simple docstring''' if expected is RuntimeError: with pytest.raises(lowerCamelCase_ ): _number_of_shards_in_gen_kwargs(lowerCamelCase_ ) else: snake_case_ : Dict = _number_of_shards_in_gen_kwargs(lowerCamelCase_ ) assert out == expected
371
'''simple docstring''' def UpperCAmelCase ( lowerCamelCase_ :int ): '''simple docstring''' snake_case_ : List[Any] = generate_pascal_triangle(lowerCamelCase_ ) for row_idx in range(lowerCamelCase_ ): # Print left spaces for _ in range(num_rows - row_idx - 1 ): print(end=""" """ ) # Print row values for col_idx in range(row_idx + 1 ): if col_idx != row_idx: print(triangle[row_idx][col_idx] , end=""" """ ) else: print(triangle[row_idx][col_idx] , end="""""" ) print() def UpperCAmelCase ( lowerCamelCase_ :int ): '''simple docstring''' if not isinstance(lowerCamelCase_ , lowerCamelCase_ ): raise TypeError("""The input value of 'num_rows' should be 'int'""" ) if num_rows == 0: return [] elif num_rows < 0: raise ValueError( """The input value of 'num_rows' should be greater than or equal to 0""" ) snake_case_ : list[list[int]] = [] for current_row_idx in range(lowerCamelCase_ ): snake_case_ : List[str] = populate_current_row(lowerCamelCase_ , lowerCamelCase_ ) triangle.append(lowerCamelCase_ ) return triangle def UpperCAmelCase ( lowerCamelCase_ :list[list[int]] , lowerCamelCase_ :int ): '''simple docstring''' snake_case_ : Union[str, Any] = [-1] * (current_row_idx + 1) # first and last elements of current row are equal to 1 snake_case_ , snake_case_ : Optional[Any] = 1, 1 for current_col_idx in range(1 , lowerCamelCase_ ): calculate_current_element( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) return current_row def UpperCAmelCase ( lowerCamelCase_ :list[list[int]] , lowerCamelCase_ :list[int] , lowerCamelCase_ :int , lowerCamelCase_ :int , ): '''simple docstring''' snake_case_ : Union[str, Any] = triangle[current_row_idx - 1][current_col_idx - 1] snake_case_ : List[Any] = triangle[current_row_idx - 1][current_col_idx] snake_case_ : Optional[int] = above_to_left_elt + above_to_right_elt def UpperCAmelCase ( lowerCamelCase_ :int ): '''simple docstring''' if not isinstance(lowerCamelCase_ , lowerCamelCase_ ): raise TypeError("""The input value of 'num_rows' should be 'int'""" ) if num_rows == 0: return [] elif num_rows < 0: raise ValueError( """The input value of 'num_rows' should be greater than or equal to 0""" ) snake_case_ : list[list[int]] = [[1]] for row_index in range(1 , lowerCamelCase_ ): snake_case_ : Optional[Any] = [0] + result[-1] + [0] snake_case_ : Dict = row_index + 1 # Calculate the number of distinct elements in a row snake_case_ : Any = sum(divmod(lowerCamelCase_ , 2 ) ) snake_case_ : Tuple = [ temp_row[i - 1] + temp_row[i] for i in range(1 , distinct_elements + 1 ) ] snake_case_ : Optional[int] = row_first_half[: (row_index + 1) // 2] row_second_half.reverse() snake_case_ : str = row_first_half + row_second_half result.append(lowerCamelCase_ ) return result def UpperCAmelCase ( ): '''simple docstring''' from collections.abc import Callable from timeit import timeit def benchmark_a_function(lowerCamelCase_ :Callable , lowerCamelCase_ :int ) -> None: snake_case_ : Dict = F'''{func.__name__}({value})''' snake_case_ : Dict = timeit(F'''__main__.{call}''' , setup="""import __main__""" ) # print(f"{call:38} = {func(value)} -- {timing:.4f} seconds") print(F'''{call:38} -- {timing:.4f} seconds''' ) for value in range(15 ): # (1, 7, 14): for func in (generate_pascal_triangle, generate_pascal_triangle_optimized): benchmark_a_function(lowerCamelCase_ , lowerCamelCase_ ) print() if __name__ == "__main__": import doctest doctest.testmod() benchmark()
8
0
'''simple docstring''' import math def UpperCAmelCase ( lowerCamelCase_ :str , lowerCamelCase_ :Any ): '''simple docstring''' if 0 not in (x, y): # We use the relation x^y = y*log10(x), where 10 is the base. return y * math.logaa(lowerCamelCase_ ) else: if x == 0: # 0 raised to any number is 0 return 0 elif y == 0: return 1 # any number raised to 0 is 1 raise AssertionError("""This should never happen""" ) if __name__ == "__main__": # Main function # Read two numbers from input and typecast them to int using map function. # Here x is the base and y is the power. __A : Optional[Any] = 'Enter the base and the power separated by a comma: ' __A : int = map(int, input(prompt).split(',')) __A : str = map(int, input(prompt).split(',')) # We find the log of each number, using the function res(), which takes two # arguments. __A : int = res(xa, ya) __A : str = res(xa, ya) # We check for the largest number if resa > resa: print('Largest number is', xa, '^', ya) elif resa > resa: print('Largest number is', xa, '^', ya) else: print('Both are equal')
350
'''simple docstring''' import unittest from transformers import is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow if is_torch_available(): import torch from transformers import XLMRobertaModel @require_sentencepiece @require_tokenizers @require_torch class __UpperCamelCase ( unittest.TestCase ): @slow def a__ ( self :Dict ): snake_case_ : Optional[Any] = XLMRobertaModel.from_pretrained("""xlm-roberta-base""" ) snake_case_ : Optional[int] = torch.tensor([[0, 5_8_1, 1_0_2_6_9, 8_3, 9_9_9_4_2, 1_3_6, 6_0_7_4_2, 2_3, 7_0, 8_0_5_8_3, 1_8_2_7_6, 2]] ) # The dog is cute and lives in the garden house snake_case_ : Tuple = torch.Size((1, 1_2, 7_6_8) ) # batch_size, sequence_length, embedding_vector_dim snake_case_ : Dict = torch.tensor( [[-0.01_01, 0.12_18, -0.08_03, 0.08_01, 0.13_27, 0.07_76, -0.12_15, 0.23_83, 0.33_38, 0.31_06, 0.03_00, 0.02_52]] ) # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.eval() # expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1] with torch.no_grad(): snake_case_ : Tuple = model(_UpperCamelCase )["""last_hidden_state"""].detach() self.assertEqual(output.shape ,_UpperCamelCase ) # compare the actual values for a slice of last dim self.assertTrue(torch.allclose(output[:, :, -1] ,_UpperCamelCase ,atol=1E-3 ) ) @slow def a__ ( self :Union[str, Any] ): snake_case_ : List[Any] = XLMRobertaModel.from_pretrained("""xlm-roberta-large""" ) snake_case_ : Dict = torch.tensor([[0, 5_8_1, 1_0_2_6_9, 8_3, 9_9_9_4_2, 1_3_6, 6_0_7_4_2, 2_3, 7_0, 8_0_5_8_3, 1_8_2_7_6, 2]] ) # The dog is cute and lives in the garden house snake_case_ : List[Any] = torch.Size((1, 1_2, 1_0_2_4) ) # batch_size, sequence_length, embedding_vector_dim snake_case_ : Any = torch.tensor( [[-0.06_99, -0.03_18, 0.07_05, -0.12_41, 0.09_99, -0.05_20, 0.10_04, -0.18_38, -0.47_04, 0.14_37, 0.08_21, 0.01_26]] ) # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large') # xlmr.eval() # expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1] with torch.no_grad(): snake_case_ : str = model(_UpperCamelCase )["""last_hidden_state"""].detach() self.assertEqual(output.shape ,_UpperCamelCase ) # compare the actual values for a slice of last dim self.assertTrue(torch.allclose(output[:, :, -1] ,_UpperCamelCase ,atol=1E-3 ) )
8
0
'''simple docstring''' def UpperCAmelCase ( lowerCamelCase_ :int ): '''simple docstring''' if length <= 0 or not isinstance(lowerCamelCase_ , lowerCamelCase_ ): raise ValueError("""Length must be a positive integer.""" ) return [n * (2 * n - 1) for n in range(lowerCamelCase_ )] if __name__ == "__main__": print(hexagonal_numbers(length=5)) print(hexagonal_numbers(length=10))
351
'''simple docstring''' from __future__ import annotations from collections.abc import Callable def UpperCAmelCase ( lowerCamelCase_ :Callable[[int | float], int | float] , lowerCamelCase_ :int | float , lowerCamelCase_ :int | float , lowerCamelCase_ :int = 1_00 , ): '''simple docstring''' snake_case_ : Tuple = x_start snake_case_ : Optional[int] = fnc(lowerCamelCase_ ) snake_case_ : Optional[int] = 0.0 for _ in range(lowerCamelCase_ ): # Approximates small segments of curve as linear and solve # for trapezoidal area snake_case_ : int = (x_end - x_start) / steps + xa snake_case_ : Union[str, Any] = fnc(lowerCamelCase_ ) area += abs(fxa + fxa ) * (xa - xa) / 2 # Increment step snake_case_ : Any = xa snake_case_ : str = fxa return area if __name__ == "__main__": def UpperCAmelCase ( lowerCamelCase_ :Any ): '''simple docstring''' return x**3 + x**2 print('f(x) = x^3 + x^2') print('The area between the curve, x = -5, x = 5 and the x axis is:') __A : List[str] = 10 while i <= 100_000: print(F'with {i} steps: {trapezoidal_area(f, -5, 5, i)}') i *= 10
8
0
'''simple docstring''' from typing import TYPE_CHECKING import torch from ..models.auto import AutoModelForVisualQuestionAnswering, AutoProcessor from ..utils import requires_backends from .base import PipelineTool if TYPE_CHECKING: from PIL import Image class __UpperCamelCase ( lowercase__ ): lowercase : Any = 'dandelin/vilt-b32-finetuned-vqa' lowercase : Tuple = ( 'This is a tool that answers a question about an image. It takes an input named `image` which should be the ' 'image containing the information, as well as a `question` which should be the question in English. It ' 'returns a text that is the answer to the question.' ) lowercase : Optional[Any] = 'image_qa' lowercase : int = AutoProcessor lowercase : Tuple = AutoModelForVisualQuestionAnswering lowercase : Any = ['image', 'text'] lowercase : Optional[Any] = ['text'] def __init__( self :str ,*_UpperCamelCase :int ,**_UpperCamelCase :List[Any] ): requires_backends(self ,["""vision"""] ) super().__init__(*_UpperCamelCase ,**_UpperCamelCase ) def a__ ( self :int ,_UpperCamelCase :"Image" ,_UpperCamelCase :str ): return self.pre_processor(_UpperCamelCase ,_UpperCamelCase ,return_tensors="""pt""" ) def a__ ( self :Optional[int] ,_UpperCamelCase :Optional[Any] ): with torch.no_grad(): return self.model(**_UpperCamelCase ).logits def a__ ( self :str ,_UpperCamelCase :Union[str, Any] ): snake_case_ : Tuple = outputs.argmax(-1 ).item() return self.model.config.idalabel[idx]
352
'''simple docstring''' import argparse import json import logging import os import shutil import sys import tempfile import unittest from unittest import mock import torch from accelerate.utils import write_basic_config from transformers.testing_utils import TestCasePlus, get_gpu_count, run_command, slow, torch_device from transformers.utils import is_apex_available logging.basicConfig(level=logging.DEBUG) __A : int = logging.getLogger() def UpperCAmelCase ( ): '''simple docstring''' snake_case_ : List[Any] = argparse.ArgumentParser() parser.add_argument("""-f""" ) snake_case_ : int = parser.parse_args() return args.f def UpperCAmelCase ( lowerCamelCase_ :str ): '''simple docstring''' snake_case_ : Optional[Any] = {} snake_case_ : Optional[Any] = os.path.join(lowerCamelCase_ , """all_results.json""" ) if os.path.exists(lowerCamelCase_ ): with open(lowerCamelCase_ , """r""" ) as f: snake_case_ : str = json.load(lowerCamelCase_ ) else: raise ValueError(F'''can\'t find {path}''' ) return results def UpperCAmelCase ( ): '''simple docstring''' snake_case_ : List[str] = torch.cuda.is_available() and torch_device == """cuda""" return is_using_cuda and is_apex_available() __A : Any = logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) class __UpperCamelCase ( lowercase__ ): @classmethod def a__ ( cls :Dict ): # Write Accelerate config, will pick up on CPU, GPU, and multi-GPU snake_case_ : Optional[int] = tempfile.mkdtemp() snake_case_ : Any = os.path.join(cls.tmpdir ,"""default_config.yml""" ) write_basic_config(save_location=cls.configPath ) snake_case_ : List[Any] = ["""accelerate""", """launch""", """--config_file""", cls.configPath] @classmethod def a__ ( cls :int ): shutil.rmtree(cls.tmpdir ) @mock.patch.dict(os.environ ,{"""WANDB_MODE""": """offline"""} ) def a__ ( self :Optional[int] ): snake_case_ : List[Any] = self.get_auto_remove_tmp_dir() snake_case_ : List[str] = F''' {self.examples_dir}/pytorch/text-classification/run_glue_no_trainer.py --model_name_or_path distilbert-base-uncased --output_dir {tmp_dir} --train_file ./tests/fixtures/tests_samples/MRPC/train.csv --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --learning_rate=1e-4 --seed=42 --checkpointing_steps epoch --with_tracking '''.split() if is_cuda_and_apex_available(): testargs.append("""--fp16""" ) run_command(self._launch_args + testargs ) snake_case_ : Dict = get_results(_UpperCamelCase ) self.assertGreaterEqual(result["""eval_accuracy"""] ,0.75 ) self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase ,"""epoch_0""" ) ) ) self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase ,"""glue_no_trainer""" ) ) ) @mock.patch.dict(os.environ ,{"""WANDB_MODE""": """offline"""} ) def a__ ( self :Tuple ): snake_case_ : str = self.get_auto_remove_tmp_dir() snake_case_ : Tuple = F''' {self.examples_dir}/pytorch/language-modeling/run_clm_no_trainer.py --model_name_or_path distilgpt2 --train_file ./tests/fixtures/sample_text.txt --validation_file ./tests/fixtures/sample_text.txt --block_size 128 --per_device_train_batch_size 5 --per_device_eval_batch_size 5 --num_train_epochs 2 --output_dir {tmp_dir} --checkpointing_steps epoch --with_tracking '''.split() if torch.cuda.device_count() > 1: # Skipping because there are not enough batches to train the model + would need a drop_last to work. return run_command(self._launch_args + testargs ) snake_case_ : Optional[int] = get_results(_UpperCamelCase ) self.assertLess(result["""perplexity"""] ,1_0_0 ) self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase ,"""epoch_0""" ) ) ) self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase ,"""clm_no_trainer""" ) ) ) @mock.patch.dict(os.environ ,{"""WANDB_MODE""": """offline"""} ) def a__ ( self :Tuple ): snake_case_ : List[Any] = self.get_auto_remove_tmp_dir() snake_case_ : List[str] = F''' {self.examples_dir}/pytorch/language-modeling/run_mlm_no_trainer.py --model_name_or_path distilroberta-base --train_file ./tests/fixtures/sample_text.txt --validation_file ./tests/fixtures/sample_text.txt --output_dir {tmp_dir} --num_train_epochs=1 --checkpointing_steps epoch --with_tracking '''.split() run_command(self._launch_args + testargs ) snake_case_ : str = get_results(_UpperCamelCase ) self.assertLess(result["""perplexity"""] ,4_2 ) self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase ,"""epoch_0""" ) ) ) self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase ,"""mlm_no_trainer""" ) ) ) @mock.patch.dict(os.environ ,{"""WANDB_MODE""": """offline"""} ) def a__ ( self :List[Any] ): # with so little data distributed training needs more epochs to get the score on par with 0/1 gpu snake_case_ : Dict = 7 if get_gpu_count() > 1 else 2 snake_case_ : str = self.get_auto_remove_tmp_dir() snake_case_ : str = F''' {self.examples_dir}/pytorch/token-classification/run_ner_no_trainer.py --model_name_or_path bert-base-uncased --train_file tests/fixtures/tests_samples/conll/sample.json --validation_file tests/fixtures/tests_samples/conll/sample.json --output_dir {tmp_dir} --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=2 --num_train_epochs={epochs} --seed 7 --checkpointing_steps epoch --with_tracking '''.split() run_command(self._launch_args + testargs ) snake_case_ : Optional[int] = get_results(_UpperCamelCase ) self.assertGreaterEqual(result["""eval_accuracy"""] ,0.75 ) self.assertLess(result["""train_loss"""] ,0.5 ) self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase ,"""epoch_0""" ) ) ) self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase ,"""ner_no_trainer""" ) ) ) @unittest.skip(reason="""Fix me @muellerzr""" ) @mock.patch.dict(os.environ ,{"""WANDB_MODE""": """offline"""} ) def a__ ( self :List[str] ): snake_case_ : List[Any] = self.get_auto_remove_tmp_dir() snake_case_ : Optional[int] = F''' {self.examples_dir}/pytorch/question-answering/run_qa_no_trainer.py --model_name_or_path bert-base-uncased --version_2_with_negative --train_file tests/fixtures/tests_samples/SQUAD/sample.json --validation_file tests/fixtures/tests_samples/SQUAD/sample.json --output_dir {tmp_dir} --seed=42 --max_train_steps=10 --num_warmup_steps=2 --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --checkpointing_steps epoch --with_tracking '''.split() run_command(self._launch_args + testargs ) snake_case_ : str = get_results(_UpperCamelCase ) # Because we use --version_2_with_negative the testing script uses SQuAD v2 metrics. self.assertGreaterEqual(result["""eval_f1"""] ,2_8 ) self.assertGreaterEqual(result["""eval_exact"""] ,2_8 ) self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase ,"""epoch_0""" ) ) ) self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase ,"""qa_no_trainer""" ) ) ) @mock.patch.dict(os.environ ,{"""WANDB_MODE""": """offline"""} ) def a__ ( self :List[Any] ): snake_case_ : str = self.get_auto_remove_tmp_dir() snake_case_ : Union[str, Any] = F''' {self.examples_dir}/pytorch/multiple-choice/run_swag_no_trainer.py --model_name_or_path bert-base-uncased --train_file tests/fixtures/tests_samples/swag/sample.json --validation_file tests/fixtures/tests_samples/swag/sample.json --output_dir {tmp_dir} --max_train_steps=20 --num_warmup_steps=2 --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --with_tracking '''.split() run_command(self._launch_args + testargs ) snake_case_ : Union[str, Any] = get_results(_UpperCamelCase ) self.assertGreaterEqual(result["""eval_accuracy"""] ,0.8 ) self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase ,"""swag_no_trainer""" ) ) ) @slow @mock.patch.dict(os.environ ,{"""WANDB_MODE""": """offline"""} ) def a__ ( self :int ): snake_case_ : List[Any] = self.get_auto_remove_tmp_dir() snake_case_ : List[Any] = F''' {self.examples_dir}/pytorch/summarization/run_summarization_no_trainer.py --model_name_or_path t5-small --train_file tests/fixtures/tests_samples/xsum/sample.json --validation_file tests/fixtures/tests_samples/xsum/sample.json --output_dir {tmp_dir} --max_train_steps=50 --num_warmup_steps=8 --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --checkpointing_steps epoch --with_tracking '''.split() run_command(self._launch_args + testargs ) snake_case_ : int = get_results(_UpperCamelCase ) self.assertGreaterEqual(result["""eval_rouge1"""] ,1_0 ) self.assertGreaterEqual(result["""eval_rouge2"""] ,2 ) self.assertGreaterEqual(result["""eval_rougeL"""] ,7 ) self.assertGreaterEqual(result["""eval_rougeLsum"""] ,7 ) self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase ,"""epoch_0""" ) ) ) self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase ,"""summarization_no_trainer""" ) ) ) @slow @mock.patch.dict(os.environ ,{"""WANDB_MODE""": """offline"""} ) def a__ ( self :int ): snake_case_ : Tuple = self.get_auto_remove_tmp_dir() snake_case_ : Optional[Any] = F''' {self.examples_dir}/pytorch/translation/run_translation_no_trainer.py --model_name_or_path sshleifer/student_marian_en_ro_6_1 --source_lang en --target_lang ro --train_file tests/fixtures/tests_samples/wmt16/sample.json --validation_file tests/fixtures/tests_samples/wmt16/sample.json --output_dir {tmp_dir} --max_train_steps=50 --num_warmup_steps=8 --num_beams=6 --learning_rate=3e-3 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --source_lang en_XX --target_lang ro_RO --checkpointing_steps epoch --with_tracking '''.split() run_command(self._launch_args + testargs ) snake_case_ : Any = get_results(_UpperCamelCase ) self.assertGreaterEqual(result["""eval_bleu"""] ,3_0 ) self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase ,"""epoch_0""" ) ) ) self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase ,"""translation_no_trainer""" ) ) ) @slow def a__ ( self :Optional[Any] ): snake_case_ : List[str] = logging.StreamHandler(sys.stdout ) logger.addHandler(_UpperCamelCase ) snake_case_ : Dict = self.get_auto_remove_tmp_dir() snake_case_ : Tuple = F''' {self.examples_dir}/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py --dataset_name huggingface/semantic-segmentation-test-sample --output_dir {tmp_dir} --max_train_steps=10 --num_warmup_steps=2 --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --checkpointing_steps epoch '''.split() run_command(self._launch_args + testargs ) snake_case_ : str = get_results(_UpperCamelCase ) self.assertGreaterEqual(result["""eval_overall_accuracy"""] ,0.10 ) @mock.patch.dict(os.environ ,{"""WANDB_MODE""": """offline"""} ) def a__ ( self :Any ): snake_case_ : Dict = self.get_auto_remove_tmp_dir() snake_case_ : Tuple = F''' {self.examples_dir}/pytorch/image-classification/run_image_classification_no_trainer.py --model_name_or_path google/vit-base-patch16-224-in21k --dataset_name hf-internal-testing/cats_vs_dogs_sample --learning_rate 1e-4 --per_device_train_batch_size 2 --per_device_eval_batch_size 1 --max_train_steps 2 --train_val_split 0.1 --seed 42 --output_dir {tmp_dir} --with_tracking --checkpointing_steps 1 '''.split() if is_cuda_and_apex_available(): testargs.append("""--fp16""" ) run_command(self._launch_args + testargs ) snake_case_ : str = get_results(_UpperCamelCase ) # The base model scores a 25% self.assertGreaterEqual(result["""eval_accuracy"""] ,0.6 ) self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase ,"""step_1""" ) ) ) self.assertTrue(os.path.exists(os.path.join(_UpperCamelCase ,"""image_classification_no_trainer""" ) ) )
8
0
'''simple docstring''' from __future__ import annotations def UpperCAmelCase ( lowerCamelCase_ :str ): '''simple docstring''' return [ord(lowerCamelCase_ ) - 96 for elem in plain] def UpperCAmelCase ( lowerCamelCase_ :list[int] ): '''simple docstring''' return "".join(chr(elem + 96 ) for elem in encoded ) def UpperCAmelCase ( ): '''simple docstring''' snake_case_ : Optional[Any] = encode(input("""-> """ ).strip().lower() ) print("""Encoded: """ , lowerCamelCase_ ) print("""Decoded:""" , decode(lowerCamelCase_ ) ) if __name__ == "__main__": main()
353
'''simple docstring''' from typing import List, Optional, Union import numpy as np from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import PaddingStrategy, TensorType, logging __A : Tuple = logging.get_logger(__name__) class __UpperCamelCase ( lowercase__ ): lowercase : str = ['input_values', 'padding_mask'] def __init__( self :Optional[int] ,_UpperCamelCase :int = 1 ,_UpperCamelCase :int = 2_4_0_0_0 ,_UpperCamelCase :float = 0.0 ,_UpperCamelCase :float = None ,_UpperCamelCase :float = None ,**_UpperCamelCase :List[Any] ,): super().__init__(feature_size=_UpperCamelCase ,sampling_rate=_UpperCamelCase ,padding_value=_UpperCamelCase ,**_UpperCamelCase ) snake_case_ : Dict = chunk_length_s snake_case_ : str = overlap @property def a__ ( self :Any ): if self.chunk_length_s is None: return None else: return int(self.chunk_length_s * self.sampling_rate ) @property def a__ ( self :List[str] ): if self.chunk_length_s is None or self.overlap is None: return None else: return max(1 ,int((1.0 - self.overlap) * self.chunk_length ) ) def __call__( self :Optional[Any] ,_UpperCamelCase :Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] ,_UpperCamelCase :Optional[Union[bool, str, PaddingStrategy]] = None ,_UpperCamelCase :Optional[bool] = False ,_UpperCamelCase :Optional[int] = None ,_UpperCamelCase :Optional[Union[str, TensorType]] = None ,_UpperCamelCase :Optional[int] = None ,): if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( F'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of''' F''' {self.sampling_rate}. Please make sure that the provided audio input was sampled with''' F''' {self.sampling_rate} and not {sampling_rate}.''' ) else: logger.warning( """It is strongly recommended to pass the `sampling_rate` argument to this function. """ """Failing to do so can result in silent errors that might be hard to debug.""" ) if padding and truncation: raise ValueError("""Both padding and truncation were set. Make sure you only set one.""" ) elif padding is None: # by default let's pad the inputs snake_case_ : Tuple = True snake_case_ : str = bool( isinstance(_UpperCamelCase ,(list, tuple) ) and (isinstance(raw_audio[0] ,(np.ndarray, tuple, list) )) ) if is_batched: snake_case_ : Any = [np.asarray(_UpperCamelCase ,dtype=np.floataa ).T for audio in raw_audio] elif not is_batched and not isinstance(_UpperCamelCase ,np.ndarray ): snake_case_ : Optional[int] = np.asarray(_UpperCamelCase ,dtype=np.floataa ) elif isinstance(_UpperCamelCase ,np.ndarray ) and raw_audio.dtype is np.dtype(np.floataa ): snake_case_ : List[str] = raw_audio.astype(np.floataa ) # always return batch if not is_batched: snake_case_ : Optional[Any] = [np.asarray(_UpperCamelCase ).T] # verify inputs are valid for idx, example in enumerate(_UpperCamelCase ): if example.ndim > 2: raise ValueError(F'''Expected input shape (channels, length) but got shape {example.shape}''' ) if self.feature_size == 1 and example.ndim != 1: raise ValueError(F'''Expected mono audio but example has {example.shape[-1]} channels''' ) if self.feature_size == 2 and example.shape[-1] != 2: raise ValueError(F'''Expected stereo audio but example has {example.shape[-1]} channels''' ) snake_case_ : Tuple = None snake_case_ : Optional[Any] = BatchFeature({"""input_values""": raw_audio} ) if self.chunk_stride is not None and self.chunk_length is not None and max_length is None: if truncation: snake_case_ : Union[str, Any] = min(array.shape[0] for array in raw_audio ) snake_case_ : Dict = int(np.floor(max_length / self.chunk_stride ) ) snake_case_ : Union[str, Any] = (nb_step - 1) * self.chunk_stride + self.chunk_length elif padding: snake_case_ : Any = max(array.shape[0] for array in raw_audio ) snake_case_ : List[Any] = int(np.ceil(max_length / self.chunk_stride ) ) snake_case_ : Any = (nb_step - 1) * self.chunk_stride + self.chunk_length snake_case_ : Union[str, Any] = """max_length""" else: snake_case_ : int = input_values # normal padding on batch if padded_inputs is None: snake_case_ : Optional[int] = self.pad( _UpperCamelCase ,max_length=_UpperCamelCase ,truncation=_UpperCamelCase ,padding=_UpperCamelCase ,return_attention_mask=_UpperCamelCase ,) if padding: snake_case_ : Tuple = padded_inputs.pop("""attention_mask""" ) snake_case_ : Optional[int] = [] for example in padded_inputs.pop("""input_values""" ): if self.feature_size == 1: snake_case_ : Dict = example[..., None] input_values.append(example.T ) snake_case_ : List[Any] = input_values if return_tensors is not None: snake_case_ : Tuple = padded_inputs.convert_to_tensors(_UpperCamelCase ) return padded_inputs
8
0
'''simple docstring''' def UpperCAmelCase ( lowerCamelCase_ :int = 10_00 ): '''simple docstring''' snake_case_ : List[Any] = 1, 1 snake_case_ : Union[str, Any] = [] for i in range(1 , n + 1 ): snake_case_ : int = prev_numerator + 2 * prev_denominator snake_case_ : Any = prev_numerator + prev_denominator if len(str(lowerCamelCase_ ) ) > len(str(lowerCamelCase_ ) ): result.append(lowerCamelCase_ ) snake_case_ : Tuple = numerator snake_case_ : Optional[int] = denominator return len(lowerCamelCase_ ) if __name__ == "__main__": print(F'{solution() = }')
354
'''simple docstring''' from __future__ import annotations from typing import Dict from ...configuration_utils import PretrainedConfig __A : Dict = { 'susnato/ernie-m-base_pytorch': 'https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/config.json', 'susnato/ernie-m-large_pytorch': 'https://huggingface.co/susnato/ernie-m-large_pytorch/blob/main/config.json', } class __UpperCamelCase ( lowercase__ ): lowercase : Optional[int] = 'ernie_m' lowercase : Dict[str, str] = {"dropout": "classifier_dropout", "num_classes": "num_labels"} def __init__( self :Optional[Any] ,_UpperCamelCase :int = 2_5_0_0_0_2 ,_UpperCamelCase :int = 7_6_8 ,_UpperCamelCase :int = 1_2 ,_UpperCamelCase :int = 1_2 ,_UpperCamelCase :int = 3_0_7_2 ,_UpperCamelCase :str = "gelu" ,_UpperCamelCase :float = 0.1 ,_UpperCamelCase :float = 0.1 ,_UpperCamelCase :int = 5_1_4 ,_UpperCamelCase :float = 0.02 ,_UpperCamelCase :int = 1 ,_UpperCamelCase :float = 1E-0_5 ,_UpperCamelCase :List[Any]=None ,_UpperCamelCase :List[str]=False ,_UpperCamelCase :Optional[int]=0.0 ,**_UpperCamelCase :List[Any] ,): super().__init__(pad_token_id=_UpperCamelCase ,**_UpperCamelCase ) snake_case_ : Optional[int] = vocab_size snake_case_ : Any = hidden_size snake_case_ : Union[str, Any] = num_hidden_layers snake_case_ : Union[str, Any] = num_attention_heads snake_case_ : Any = intermediate_size snake_case_ : Any = hidden_act snake_case_ : Tuple = hidden_dropout_prob snake_case_ : Union[str, Any] = attention_probs_dropout_prob snake_case_ : str = max_position_embeddings snake_case_ : int = initializer_range snake_case_ : Optional[Any] = layer_norm_eps snake_case_ : Union[str, Any] = classifier_dropout snake_case_ : Tuple = is_decoder snake_case_ : int = act_dropout
8
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) __A : int = { 'configuration_whisper': ['WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'WhisperConfig', 'WhisperOnnxConfig'], 'feature_extraction_whisper': ['WhisperFeatureExtractor'], 'processing_whisper': ['WhisperProcessor'], 'tokenization_whisper': ['WhisperTokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : Union[str, Any] = ['WhisperTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : Any = [ 'WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST', 'WhisperForConditionalGeneration', 'WhisperModel', 'WhisperPreTrainedModel', 'WhisperForAudioClassification', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : List[Any] = [ 'TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFWhisperForConditionalGeneration', 'TFWhisperModel', 'TFWhisperPreTrainedModel', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : Union[str, Any] = [ 'FlaxWhisperForConditionalGeneration', 'FlaxWhisperModel', 'FlaxWhisperPreTrainedModel', 'FlaxWhisperForAudioClassification', ] if TYPE_CHECKING: from .configuration_whisper import WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP, WhisperConfig, WhisperOnnxConfig from .feature_extraction_whisper import WhisperFeatureExtractor from .processing_whisper import WhisperProcessor from .tokenization_whisper import WhisperTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_whisper_fast import WhisperTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_whisper import ( WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST, WhisperForAudioClassification, WhisperForConditionalGeneration, WhisperModel, WhisperPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_whisper import ( TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST, TFWhisperForConditionalGeneration, TFWhisperModel, TFWhisperPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_whisper import ( FlaxWhisperForAudioClassification, FlaxWhisperForConditionalGeneration, FlaxWhisperModel, FlaxWhisperPreTrainedModel, ) else: import sys __A : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
355
'''simple docstring''' from typing import Any, Dict, Optional import torch import torch.nn.functional as F from torch import nn from ..utils import maybe_allow_in_graph from .activations import get_activation from .attention_processor import Attention from .embeddings import CombinedTimestepLabelEmbeddings @maybe_allow_in_graph class __UpperCamelCase ( nn.Module ): def __init__( self :Any ,_UpperCamelCase :int ,_UpperCamelCase :int ,_UpperCamelCase :int ,_UpperCamelCase :int=0.0 ,_UpperCamelCase :Optional[int] = None ,_UpperCamelCase :str = "geglu" ,_UpperCamelCase :Optional[int] = None ,_UpperCamelCase :bool = False ,_UpperCamelCase :bool = False ,_UpperCamelCase :bool = False ,_UpperCamelCase :bool = False ,_UpperCamelCase :bool = True ,_UpperCamelCase :str = "layer_norm" ,_UpperCamelCase :bool = False ,): super().__init__() snake_case_ : Any = only_cross_attention snake_case_ : Union[str, Any] = (num_embeds_ada_norm is not None) and norm_type == """ada_norm_zero""" snake_case_ : Any = (num_embeds_ada_norm is not None) and norm_type == """ada_norm""" if norm_type in ("ada_norm", "ada_norm_zero") and num_embeds_ada_norm is None: raise ValueError( F'''`norm_type` is set to {norm_type}, but `num_embeds_ada_norm` is not defined. Please make sure to''' F''' define `num_embeds_ada_norm` if setting `norm_type` to {norm_type}.''' ) # Define 3 blocks. Each block has its own normalization layer. # 1. Self-Attn if self.use_ada_layer_norm: snake_case_ : Dict = AdaLayerNorm(_UpperCamelCase ,_UpperCamelCase ) elif self.use_ada_layer_norm_zero: snake_case_ : str = AdaLayerNormZero(_UpperCamelCase ,_UpperCamelCase ) else: snake_case_ : List[Any] = nn.LayerNorm(_UpperCamelCase ,elementwise_affine=_UpperCamelCase ) snake_case_ : List[str] = Attention( query_dim=_UpperCamelCase ,heads=_UpperCamelCase ,dim_head=_UpperCamelCase ,dropout=_UpperCamelCase ,bias=_UpperCamelCase ,cross_attention_dim=cross_attention_dim if only_cross_attention else None ,upcast_attention=_UpperCamelCase ,) # 2. Cross-Attn if cross_attention_dim is not None or double_self_attention: # We currently only use AdaLayerNormZero for self attention where there will only be one attention block. # I.e. the number of returned modulation chunks from AdaLayerZero would not make sense if returned during # the second cross attention block. snake_case_ : str = ( AdaLayerNorm(_UpperCamelCase ,_UpperCamelCase ) if self.use_ada_layer_norm else nn.LayerNorm(_UpperCamelCase ,elementwise_affine=_UpperCamelCase ) ) snake_case_ : List[str] = Attention( query_dim=_UpperCamelCase ,cross_attention_dim=cross_attention_dim if not double_self_attention else None ,heads=_UpperCamelCase ,dim_head=_UpperCamelCase ,dropout=_UpperCamelCase ,bias=_UpperCamelCase ,upcast_attention=_UpperCamelCase ,) # is self-attn if encoder_hidden_states is none else: snake_case_ : Any = None snake_case_ : Optional[Any] = None # 3. Feed-forward snake_case_ : List[str] = nn.LayerNorm(_UpperCamelCase ,elementwise_affine=_UpperCamelCase ) snake_case_ : Union[str, Any] = FeedForward(_UpperCamelCase ,dropout=_UpperCamelCase ,activation_fn=_UpperCamelCase ,final_dropout=_UpperCamelCase ) # let chunk size default to None snake_case_ : Optional[int] = None snake_case_ : Dict = 0 def a__ ( self :List[Any] ,_UpperCamelCase :Optional[int] ,_UpperCamelCase :int ): # Sets chunk feed-forward snake_case_ : Optional[Any] = chunk_size snake_case_ : Optional[Any] = dim def a__ ( self :List[str] ,_UpperCamelCase :torch.FloatTensor ,_UpperCamelCase :Optional[torch.FloatTensor] = None ,_UpperCamelCase :Optional[torch.FloatTensor] = None ,_UpperCamelCase :Optional[torch.FloatTensor] = None ,_UpperCamelCase :Optional[torch.LongTensor] = None ,_UpperCamelCase :Dict[str, Any] = None ,_UpperCamelCase :Optional[torch.LongTensor] = None ,): # Notice that normalization is always applied before the real computation in the following blocks. # 1. Self-Attention if self.use_ada_layer_norm: snake_case_ : Optional[Any] = self.norma(_UpperCamelCase ,_UpperCamelCase ) elif self.use_ada_layer_norm_zero: snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ : Union[str, Any] = self.norma( _UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,hidden_dtype=hidden_states.dtype ) else: snake_case_ : Optional[int] = self.norma(_UpperCamelCase ) snake_case_ : int = cross_attention_kwargs if cross_attention_kwargs is not None else {} snake_case_ : Union[str, Any] = self.attna( _UpperCamelCase ,encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None ,attention_mask=_UpperCamelCase ,**_UpperCamelCase ,) if self.use_ada_layer_norm_zero: snake_case_ : Union[str, Any] = gate_msa.unsqueeze(1 ) * attn_output snake_case_ : Union[str, Any] = attn_output + hidden_states # 2. Cross-Attention if self.attna is not None: snake_case_ : Any = ( self.norma(_UpperCamelCase ,_UpperCamelCase ) if self.use_ada_layer_norm else self.norma(_UpperCamelCase ) ) snake_case_ : List[Any] = self.attna( _UpperCamelCase ,encoder_hidden_states=_UpperCamelCase ,attention_mask=_UpperCamelCase ,**_UpperCamelCase ,) snake_case_ : Tuple = attn_output + hidden_states # 3. Feed-forward snake_case_ : Optional[Any] = self.norma(_UpperCamelCase ) if self.use_ada_layer_norm_zero: snake_case_ : Dict = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None] if self._chunk_size is not None: # "feed_forward_chunk_size" can be used to save memory if norm_hidden_states.shape[self._chunk_dim] % self._chunk_size != 0: raise ValueError( F'''`hidden_states` dimension to be chunked: {norm_hidden_states.shape[self._chunk_dim]} has to be divisible by chunk size: {self._chunk_size}. Make sure to set an appropriate `chunk_size` when calling `unet.enable_forward_chunking`.''' ) snake_case_ : Union[str, Any] = norm_hidden_states.shape[self._chunk_dim] // self._chunk_size snake_case_ : int = torch.cat( [self.ff(_UpperCamelCase ) for hid_slice in norm_hidden_states.chunk(_UpperCamelCase ,dim=self._chunk_dim )] ,dim=self._chunk_dim ,) else: snake_case_ : List[str] = self.ff(_UpperCamelCase ) if self.use_ada_layer_norm_zero: snake_case_ : Union[str, Any] = gate_mlp.unsqueeze(1 ) * ff_output snake_case_ : Any = ff_output + hidden_states return hidden_states class __UpperCamelCase ( nn.Module ): def __init__( self :Dict ,_UpperCamelCase :int ,_UpperCamelCase :Optional[int] = None ,_UpperCamelCase :int = 4 ,_UpperCamelCase :float = 0.0 ,_UpperCamelCase :str = "geglu" ,_UpperCamelCase :bool = False ,): super().__init__() snake_case_ : Tuple = int(dim * mult ) snake_case_ : Optional[int] = dim_out if dim_out is not None else dim if activation_fn == "gelu": snake_case_ : Any = GELU(_UpperCamelCase ,_UpperCamelCase ) if activation_fn == "gelu-approximate": snake_case_ : Tuple = GELU(_UpperCamelCase ,_UpperCamelCase ,approximate="""tanh""" ) elif activation_fn == "geglu": snake_case_ : Dict = GEGLU(_UpperCamelCase ,_UpperCamelCase ) elif activation_fn == "geglu-approximate": snake_case_ : Optional[Any] = ApproximateGELU(_UpperCamelCase ,_UpperCamelCase ) snake_case_ : Dict = nn.ModuleList([] ) # project in self.net.append(_UpperCamelCase ) # project dropout self.net.append(nn.Dropout(_UpperCamelCase ) ) # project out self.net.append(nn.Linear(_UpperCamelCase ,_UpperCamelCase ) ) # FF as used in Vision Transformer, MLP-Mixer, etc. have a final dropout if final_dropout: self.net.append(nn.Dropout(_UpperCamelCase ) ) def a__ ( self :Tuple ,_UpperCamelCase :Union[str, Any] ): for module in self.net: snake_case_ : Tuple = module(_UpperCamelCase ) return hidden_states class __UpperCamelCase ( nn.Module ): def __init__( self :Optional[Any] ,_UpperCamelCase :int ,_UpperCamelCase :int ,_UpperCamelCase :str = "none" ): super().__init__() snake_case_ : Union[str, Any] = nn.Linear(_UpperCamelCase ,_UpperCamelCase ) snake_case_ : Optional[Any] = approximate def a__ ( self :str ,_UpperCamelCase :int ): if gate.device.type != "mps": return F.gelu(_UpperCamelCase ,approximate=self.approximate ) # mps: gelu is not implemented for float16 return F.gelu(gate.to(dtype=torch.floataa ) ,approximate=self.approximate ).to(dtype=gate.dtype ) def a__ ( self :Optional[int] ,_UpperCamelCase :Optional[Any] ): snake_case_ : Optional[Any] = self.proj(_UpperCamelCase ) snake_case_ : int = self.gelu(_UpperCamelCase ) return hidden_states class __UpperCamelCase ( nn.Module ): def __init__( self :List[Any] ,_UpperCamelCase :int ,_UpperCamelCase :int ): super().__init__() snake_case_ : str = nn.Linear(_UpperCamelCase ,dim_out * 2 ) def a__ ( self :Dict ,_UpperCamelCase :List[str] ): if gate.device.type != "mps": return F.gelu(_UpperCamelCase ) # mps: gelu is not implemented for float16 return F.gelu(gate.to(dtype=torch.floataa ) ).to(dtype=gate.dtype ) def a__ ( self :Optional[Any] ,_UpperCamelCase :Optional[int] ): snake_case_ , snake_case_ : Dict = self.proj(_UpperCamelCase ).chunk(2 ,dim=-1 ) return hidden_states * self.gelu(_UpperCamelCase ) class __UpperCamelCase ( nn.Module ): def __init__( self :List[str] ,_UpperCamelCase :int ,_UpperCamelCase :int ): super().__init__() snake_case_ : int = nn.Linear(_UpperCamelCase ,_UpperCamelCase ) def a__ ( self :Optional[int] ,_UpperCamelCase :Optional[int] ): snake_case_ : int = self.proj(_UpperCamelCase ) return x * torch.sigmoid(1.7_02 * x ) class __UpperCamelCase ( nn.Module ): def __init__( self :int ,_UpperCamelCase :str ,_UpperCamelCase :List[Any] ): super().__init__() snake_case_ : int = nn.Embedding(_UpperCamelCase ,_UpperCamelCase ) snake_case_ : Union[str, Any] = nn.SiLU() snake_case_ : Any = nn.Linear(_UpperCamelCase ,embedding_dim * 2 ) snake_case_ : Dict = nn.LayerNorm(_UpperCamelCase ,elementwise_affine=_UpperCamelCase ) def a__ ( self :int ,_UpperCamelCase :List[str] ,_UpperCamelCase :int ): snake_case_ : Union[str, Any] = self.linear(self.silu(self.emb(_UpperCamelCase ) ) ) snake_case_ , snake_case_ : Tuple = torch.chunk(_UpperCamelCase ,2 ) snake_case_ : Tuple = self.norm(_UpperCamelCase ) * (1 + scale) + shift return x class __UpperCamelCase ( nn.Module ): def __init__( self :List[str] ,_UpperCamelCase :Tuple ,_UpperCamelCase :int ): super().__init__() snake_case_ : int = CombinedTimestepLabelEmbeddings(_UpperCamelCase ,_UpperCamelCase ) snake_case_ : int = nn.SiLU() snake_case_ : List[str] = nn.Linear(_UpperCamelCase ,6 * embedding_dim ,bias=_UpperCamelCase ) snake_case_ : str = nn.LayerNorm(_UpperCamelCase ,elementwise_affine=_UpperCamelCase ,eps=1E-6 ) def a__ ( self :Union[str, Any] ,_UpperCamelCase :Any ,_UpperCamelCase :Tuple ,_UpperCamelCase :Optional[Any] ,_UpperCamelCase :str=None ): snake_case_ : Union[str, Any] = self.linear(self.silu(self.emb(_UpperCamelCase ,_UpperCamelCase ,hidden_dtype=_UpperCamelCase ) ) ) snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ : Any = emb.chunk(6 ,dim=1 ) snake_case_ : str = self.norm(_UpperCamelCase ) * (1 + scale_msa[:, None]) + shift_msa[:, None] return x, gate_msa, shift_mlp, scale_mlp, gate_mlp class __UpperCamelCase ( nn.Module ): def __init__( self :Optional[int] ,_UpperCamelCase :int ,_UpperCamelCase :int ,_UpperCamelCase :int ,_UpperCamelCase :Optional[str] = None ,_UpperCamelCase :float = 1E-5 ): super().__init__() snake_case_ : Optional[int] = num_groups snake_case_ : List[Any] = eps if act_fn is None: snake_case_ : int = None else: snake_case_ : Dict = get_activation(_UpperCamelCase ) snake_case_ : Optional[int] = nn.Linear(_UpperCamelCase ,out_dim * 2 ) def a__ ( self :List[Any] ,_UpperCamelCase :Optional[Any] ,_UpperCamelCase :List[str] ): if self.act: snake_case_ : Any = self.act(_UpperCamelCase ) snake_case_ : Optional[int] = self.linear(_UpperCamelCase ) snake_case_ : Dict = emb[:, :, None, None] snake_case_ , snake_case_ : str = emb.chunk(2 ,dim=1 ) snake_case_ : str = F.group_norm(_UpperCamelCase ,self.num_groups ,eps=self.eps ) snake_case_ : List[str] = x * (1 + scale) + shift return x
8
0
'''simple docstring''' import argparse import logging import os import datasets import tensorflow as tf from transformers import AutoTokenizer __A : str = logging.getLogger(__name__) def UpperCAmelCase ( ): '''simple docstring''' snake_case_ : List[Any] = argparse.ArgumentParser( description="""Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset.""" ) parser.add_argument( """--dataset_name""" , type=lowerCamelCase_ , default="""wikitext""" , help="""Name of the training. Explore datasets at: hf.co/datasets.""" , ) parser.add_argument( """--dataset_config""" , type=lowerCamelCase_ , default="""wikitext-103-raw-v1""" , help="""Configuration name of the dataset.""" ) parser.add_argument( """--tokenizer_name_or_path""" , type=lowerCamelCase_ , default="""sayakpaul/unigram-tokenizer-wikitext""" , help="""Tokenizer identifier. Can be a local filepath or a Hub identifier.""" , ) parser.add_argument( """--shard_size""" , type=lowerCamelCase_ , default=10_00 , help="""Number of entries to go in a single shard.""" , ) parser.add_argument("""--split""" , type=lowerCamelCase_ , default="""train""" , choices=["""train""", """test""", """validation"""] ) parser.add_argument( """--limit""" , default=lowerCamelCase_ , type=lowerCamelCase_ , help="""Limit the number of shards (used for debugging).""" , ) parser.add_argument( """--max_length""" , type=lowerCamelCase_ , default=5_12 , help="""Maximum sequence length. For training on TPUs, it helps to have a maximum""" """ sequence length that is a multiple of 8.""" , ) parser.add_argument( """--output_dir""" , default="""tf-tpu""" , type=lowerCamelCase_ , help="""Output directory where the TFRecord shards will be saved. If the""" """ path is appended with `gs://` ('gs://tf-tpu', for example) then the TFRecord""" """ shards will be directly saved to a Google Cloud Storage bucket.""" , ) snake_case_ : Optional[int] = parser.parse_args() return args def UpperCAmelCase ( lowerCamelCase_ :Optional[int] ): '''simple docstring''' def fn(lowerCamelCase_ :Union[str, Any] ): return tokenizer(examples["""text"""] ) return fn def UpperCAmelCase ( lowerCamelCase_ :Dict ): '''simple docstring''' snake_case_ : Optional[Any] = [] for i in range(len(tokenized_data["""input_ids"""] ) ): snake_case_ : List[Any] = { """input_ids""": tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data["""input_ids"""][i] ) ), """attention_mask""": tf.train.Feature( intaa_list=tf.train.IntaaList(value=tokenized_data["""attention_mask"""][i] ) ), } snake_case_ : Tuple = tf.train.Features(feature=lowerCamelCase_ ) snake_case_ : Union[str, Any] = tf.train.Example(features=lowerCamelCase_ ) snake_case_ : List[Any] = example.SerializeToString() records.append(lowerCamelCase_ ) return records def UpperCAmelCase ( lowerCamelCase_ :int ): '''simple docstring''' snake_case_ : List[str] = datasets.load_dataset(args.dataset_name , args.dataset_config , split=args.split ) if args.limit is not None: snake_case_ : List[str] = min(len(lowerCamelCase_ ) , args.limit ) snake_case_ : Optional[Any] = dataset.select(range(lowerCamelCase_ ) ) print(F'''Limiting the dataset to {args.limit} entries.''' ) snake_case_ : Tuple = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path ) # Handle output directory creation. # For serializing into a Google Cloud Storage Bucket, one needs to first # create a bucket. if "gs" not in args.output_dir: if not os.path.exists(args.output_dir ): os.makedirs(args.output_dir ) snake_case_ : int = os.path.join(args.output_dir , args.split ) if not os.path.exists(lowerCamelCase_ ): os.makedirs(lowerCamelCase_ ) else: snake_case_ : List[str] = os.path.join(args.output_dir , args.split ) # Tokenize the whole dataset at once. snake_case_ : List[str] = tokenize_function(lowerCamelCase_ ) snake_case_ : Optional[int] = dataset.map(lowerCamelCase_ , batched=lowerCamelCase_ , num_proc=4 , remove_columns=["""text"""] ) # We need to concatenate all our texts together, and then split the result # into chunks of a fixed size, which we will call block_size. To do this, we # will use the map method again, with the option batched=True. When we use batched=True, # the function we pass to map() will be passed multiple inputs at once, allowing us # to group them into more or fewer examples than we had in the input. # This allows us to create our new fixed-length samples. The advantage of this # method is that we don't lose a whole lot of content from the dataset compared to the # case where we simply tokenize with a pre-defined max_length. def group_texts(lowerCamelCase_ :Tuple ): # Concatenate all texts. snake_case_ : Any = {k: sum(examples[k] , [] ) for k in examples.keys()} snake_case_ : Tuple = len(concatenated_examples[list(examples.keys() )[0]] ) # We drop the small remainder, though you could add padding instead if the model supports it # In this, as in all things, we advise you to follow your heart 🫀 snake_case_ : str = (total_length // args.max_length) * args.max_length # Split by chunks of max_len. snake_case_ : Tuple = { k: [t[i : i + args.max_length] for i in range(0 , lowerCamelCase_ , args.max_length )] for k, t in concatenated_examples.items() } return result snake_case_ : List[str] = dataset_tokenized.map(lowerCamelCase_ , batched=lowerCamelCase_ , batch_size=10_00 , num_proc=4 ) snake_case_ : Optional[Any] = 0 snake_case_ : str = 0 for shard in range(0 , len(lowerCamelCase_ ) , args.shard_size ): snake_case_ : List[Any] = grouped_dataset[shard : shard + args.shard_size] snake_case_ : Dict = len(dataset_snapshot["""input_ids"""] ) snake_case_ : int = os.path.join(lowerCamelCase_ , F'''dataset-{shard_count}-{records_containing}.tfrecord''' ) snake_case_ : List[Any] = get_serialized_examples(lowerCamelCase_ ) with tf.io.TFRecordWriter(lowerCamelCase_ ) as out_file: for i in range(len(lowerCamelCase_ ) ): snake_case_ : Dict = serialized_examples[i] out_file.write(lowerCamelCase_ ) print("""Wrote file {} containing {} records""".format(lowerCamelCase_ , lowerCamelCase_ ) ) shard_count += 1 total_records += records_containing with open(F'''split-{args.split}-records-count.txt''' , """w""" ) as f: print(F'''Total {args.split} records: {total_records}''' , file=lowerCamelCase_ ) if __name__ == "__main__": __A : List[Any] = parse_args() main(args)
356
'''simple docstring''' import itertools import json import linecache import os import pickle import re import socket import string from collections import Counter from logging import getLogger from pathlib import Path from typing import Callable, Dict, Iterable, List import git import torch from torch.utils.data import Dataset from transformers import BartTokenizer, RagTokenizer, TaTokenizer def UpperCAmelCase ( lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :int , lowerCamelCase_ :List[Any] , lowerCamelCase_ :List[Any] , lowerCamelCase_ :str=True , lowerCamelCase_ :str="pt" ): '''simple docstring''' snake_case_ : Tuple = {"""add_prefix_space""": True} if isinstance(lowerCamelCase_ , lowerCamelCase_ ) and not line.startswith(""" """ ) else {} snake_case_ : Union[str, Any] = padding_side return tokenizer( [line] , max_length=lowerCamelCase_ , padding="""max_length""" if pad_to_max_length else None , truncation=lowerCamelCase_ , return_tensors=lowerCamelCase_ , add_special_tokens=lowerCamelCase_ , **lowerCamelCase_ , ) def UpperCAmelCase ( lowerCamelCase_ :Optional[int] , lowerCamelCase_ :str , lowerCamelCase_ :Any=None , ): '''simple docstring''' snake_case_ : Dict = input_ids.ne(lowerCamelCase_ ).any(dim=0 ) if attention_mask is None: return input_ids[:, keep_column_mask] else: return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask]) class __UpperCamelCase ( lowercase__ ): def __init__( self :List[Any] ,_UpperCamelCase :List[Any] ,_UpperCamelCase :Any ,_UpperCamelCase :int ,_UpperCamelCase :Optional[Any] ,_UpperCamelCase :Any="train" ,_UpperCamelCase :Optional[int]=None ,_UpperCamelCase :int=None ,_UpperCamelCase :List[Any]=None ,_UpperCamelCase :Optional[int]="" ,): super().__init__() snake_case_ : List[str] = Path(_UpperCamelCase ).joinpath(type_path + """.source""" ) snake_case_ : int = Path(_UpperCamelCase ).joinpath(type_path + """.target""" ) snake_case_ : Optional[int] = self.get_char_lens(self.src_file ) snake_case_ : List[str] = max_source_length snake_case_ : str = max_target_length assert min(self.src_lens ) > 0, F'''found empty line in {self.src_file}''' snake_case_ : str = tokenizer snake_case_ : str = prefix if n_obs is not None: snake_case_ : int = self.src_lens[:n_obs] snake_case_ : Tuple = src_lang snake_case_ : str = tgt_lang def __len__( self :Any ): return len(self.src_lens ) def __getitem__( self :List[str] ,_UpperCamelCase :Union[str, Any] ): snake_case_ : Optional[int] = index + 1 # linecache starts at 1 snake_case_ : Dict = self.prefix + linecache.getline(str(self.src_file ) ,_UpperCamelCase ).rstrip("""\n""" ) snake_case_ : List[Any] = linecache.getline(str(self.tgt_file ) ,_UpperCamelCase ).rstrip("""\n""" ) assert source_line, F'''empty source line for index {index}''' assert tgt_line, F'''empty tgt line for index {index}''' # Need to add eos token manually for T5 if isinstance(self.tokenizer ,_UpperCamelCase ): source_line += self.tokenizer.eos_token tgt_line += self.tokenizer.eos_token # Pad source and target to the right snake_case_ : int = ( self.tokenizer.question_encoder if isinstance(self.tokenizer ,_UpperCamelCase ) else self.tokenizer ) snake_case_ : Optional[int] = self.tokenizer.generator if isinstance(self.tokenizer ,_UpperCamelCase ) else self.tokenizer snake_case_ : Optional[Any] = encode_line(_UpperCamelCase ,_UpperCamelCase ,self.max_source_length ,"""right""" ) snake_case_ : Tuple = encode_line(_UpperCamelCase ,_UpperCamelCase ,self.max_target_length ,"""right""" ) snake_case_ : int = source_inputs["""input_ids"""].squeeze() snake_case_ : str = target_inputs["""input_ids"""].squeeze() snake_case_ : Union[str, Any] = source_inputs["""attention_mask"""].squeeze() return { "input_ids": source_ids, "attention_mask": src_mask, "decoder_input_ids": target_ids, } @staticmethod def a__ ( _UpperCamelCase :str ): return [len(_UpperCamelCase ) for x in Path(_UpperCamelCase ).open().readlines()] def a__ ( self :Optional[int] ,_UpperCamelCase :List[str] ): snake_case_ : Optional[Any] = torch.stack([x["""input_ids"""] for x in batch] ) snake_case_ : List[Any] = torch.stack([x["""attention_mask"""] for x in batch] ) snake_case_ : Union[str, Any] = torch.stack([x["""decoder_input_ids"""] for x in batch] ) snake_case_ : Optional[Any] = ( self.tokenizer.generator.pad_token_id if isinstance(self.tokenizer ,_UpperCamelCase ) else self.tokenizer.pad_token_id ) snake_case_ : Tuple = ( self.tokenizer.question_encoder.pad_token_id if isinstance(self.tokenizer ,_UpperCamelCase ) else self.tokenizer.pad_token_id ) snake_case_ : Optional[int] = trim_batch(_UpperCamelCase ,_UpperCamelCase ) snake_case_ , snake_case_ : Dict = trim_batch(_UpperCamelCase ,_UpperCamelCase ,attention_mask=_UpperCamelCase ) snake_case_ : Optional[int] = { """input_ids""": source_ids, """attention_mask""": source_mask, """decoder_input_ids""": y, } return batch __A : List[Any] = getLogger(__name__) def UpperCAmelCase ( lowerCamelCase_ :List[List] ): '''simple docstring''' return list(itertools.chain.from_iterable(lowerCamelCase_ ) ) def UpperCAmelCase ( lowerCamelCase_ :str ): '''simple docstring''' snake_case_ : int = get_git_info() save_json(lowerCamelCase_ , os.path.join(lowerCamelCase_ , """git_log.json""" ) ) def UpperCAmelCase ( lowerCamelCase_ :Optional[int] , lowerCamelCase_ :int , lowerCamelCase_ :Optional[int]=4 , **lowerCamelCase_ :Union[str, Any] ): '''simple docstring''' with open(lowerCamelCase_ , """w""" ) as f: json.dump(lowerCamelCase_ , lowerCamelCase_ , indent=lowerCamelCase_ , **lowerCamelCase_ ) def UpperCAmelCase ( lowerCamelCase_ :List[Any] ): '''simple docstring''' with open(lowerCamelCase_ ) as f: return json.load(lowerCamelCase_ ) def UpperCAmelCase ( ): '''simple docstring''' snake_case_ : Optional[Any] = git.Repo(search_parent_directories=lowerCamelCase_ ) snake_case_ : List[str] = { """repo_id""": str(lowerCamelCase_ ), """repo_sha""": str(repo.head.object.hexsha ), """repo_branch""": str(repo.active_branch ), """hostname""": str(socket.gethostname() ), } return repo_infos def UpperCAmelCase ( lowerCamelCase_ :Callable , lowerCamelCase_ :Iterable ): '''simple docstring''' return list(map(lowerCamelCase_ , lowerCamelCase_ ) ) def UpperCAmelCase ( lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :int ): '''simple docstring''' with open(lowerCamelCase_ , """wb""" ) as f: return pickle.dump(lowerCamelCase_ , lowerCamelCase_ ) def UpperCAmelCase ( lowerCamelCase_ :Dict ): '''simple docstring''' def remove_articles(lowerCamelCase_ :str ): return re.sub(R"""\b(a|an|the)\b""" , """ """ , lowerCamelCase_ ) def white_space_fix(lowerCamelCase_ :Optional[Any] ): return " ".join(text.split() ) def remove_punc(lowerCamelCase_ :Tuple ): snake_case_ : Union[str, Any] = set(string.punctuation ) return "".join(ch for ch in text if ch not in exclude ) def lower(lowerCamelCase_ :Optional[Any] ): return text.lower() return white_space_fix(remove_articles(remove_punc(lower(lowerCamelCase_ ) ) ) ) def UpperCAmelCase ( lowerCamelCase_ :List[Any] , lowerCamelCase_ :Optional[int] ): '''simple docstring''' snake_case_ : List[Any] = normalize_answer(lowerCamelCase_ ).split() snake_case_ : Optional[int] = normalize_answer(lowerCamelCase_ ).split() snake_case_ : List[Any] = Counter(lowerCamelCase_ ) & Counter(lowerCamelCase_ ) snake_case_ : Optional[Any] = sum(common.values() ) if num_same == 0: return 0 snake_case_ : Optional[Any] = 1.0 * num_same / len(lowerCamelCase_ ) snake_case_ : Union[str, Any] = 1.0 * num_same / len(lowerCamelCase_ ) snake_case_ : Optional[Any] = (2 * precision * recall) / (precision + recall) return fa def UpperCAmelCase ( lowerCamelCase_ :Optional[Any] , lowerCamelCase_ :Union[str, Any] ): '''simple docstring''' return normalize_answer(lowerCamelCase_ ) == normalize_answer(lowerCamelCase_ ) def UpperCAmelCase ( lowerCamelCase_ :List[str] , lowerCamelCase_ :List[str] ): '''simple docstring''' assert len(lowerCamelCase_ ) == len(lowerCamelCase_ ) snake_case_ : Optional[int] = 0 for hypo, pred in zip(lowerCamelCase_ , lowerCamelCase_ ): em += exact_match_score(lowerCamelCase_ , lowerCamelCase_ ) if len(lowerCamelCase_ ) > 0: em /= len(lowerCamelCase_ ) return {"em": em} def UpperCAmelCase ( lowerCamelCase_ :Any ): '''simple docstring''' return model_prefix.startswith("""rag""" ) def UpperCAmelCase ( lowerCamelCase_ :Optional[int] , lowerCamelCase_ :Any , lowerCamelCase_ :Union[str, Any] ): '''simple docstring''' snake_case_ : List[str] = {p: p for p in extra_params} # T5 models don't have `dropout` param, they have `dropout_rate` instead snake_case_ : Optional[int] = """dropout_rate""" for p in extra_params: if getattr(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ): if not hasattr(lowerCamelCase_ , lowerCamelCase_ ) and not hasattr(lowerCamelCase_ , equivalent_param[p] ): logger.info("""config doesn't have a `{}` attribute""".format(lowerCamelCase_ ) ) delattr(lowerCamelCase_ , lowerCamelCase_ ) continue snake_case_ : str = p if hasattr(lowerCamelCase_ , lowerCamelCase_ ) else equivalent_param[p] setattr(lowerCamelCase_ , lowerCamelCase_ , getattr(lowerCamelCase_ , lowerCamelCase_ ) ) delattr(lowerCamelCase_ , lowerCamelCase_ ) return hparams, config
8
0
'''simple docstring''' from ..utils import DummyObject, requires_backends class __UpperCamelCase ( metaclass=lowercase__ ): lowercase : Union[str, Any] = ['flax', 'transformers'] def __init__( self :Dict ,*_UpperCamelCase :Any ,**_UpperCamelCase :str ): requires_backends(self ,["""flax""", """transformers"""] ) @classmethod def a__ ( cls :Union[str, Any] ,*_UpperCamelCase :int ,**_UpperCamelCase :int ): requires_backends(cls ,["""flax""", """transformers"""] ) @classmethod def a__ ( cls :List[Any] ,*_UpperCamelCase :Union[str, Any] ,**_UpperCamelCase :Any ): requires_backends(cls ,["""flax""", """transformers"""] ) class __UpperCamelCase ( metaclass=lowercase__ ): lowercase : int = ['flax', 'transformers'] def __init__( self :Optional[int] ,*_UpperCamelCase :List[Any] ,**_UpperCamelCase :int ): requires_backends(self ,["""flax""", """transformers"""] ) @classmethod def a__ ( cls :Tuple ,*_UpperCamelCase :Tuple ,**_UpperCamelCase :List[str] ): requires_backends(cls ,["""flax""", """transformers"""] ) @classmethod def a__ ( cls :int ,*_UpperCamelCase :List[str] ,**_UpperCamelCase :Optional[Any] ): requires_backends(cls ,["""flax""", """transformers"""] ) class __UpperCamelCase ( metaclass=lowercase__ ): lowercase : Union[str, Any] = ['flax', 'transformers'] def __init__( self :Tuple ,*_UpperCamelCase :Optional[int] ,**_UpperCamelCase :Union[str, Any] ): requires_backends(self ,["""flax""", """transformers"""] ) @classmethod def a__ ( cls :int ,*_UpperCamelCase :Any ,**_UpperCamelCase :str ): requires_backends(cls ,["""flax""", """transformers"""] ) @classmethod def a__ ( cls :Tuple ,*_UpperCamelCase :Union[str, Any] ,**_UpperCamelCase :Any ): requires_backends(cls ,["""flax""", """transformers"""] ) class __UpperCamelCase ( metaclass=lowercase__ ): lowercase : Dict = ['flax', 'transformers'] def __init__( self :List[Any] ,*_UpperCamelCase :Any ,**_UpperCamelCase :Any ): requires_backends(self ,["""flax""", """transformers"""] ) @classmethod def a__ ( cls :Any ,*_UpperCamelCase :str ,**_UpperCamelCase :Any ): requires_backends(cls ,["""flax""", """transformers"""] ) @classmethod def a__ ( cls :Optional[Any] ,*_UpperCamelCase :List[Any] ,**_UpperCamelCase :Union[str, Any] ): requires_backends(cls ,["""flax""", """transformers"""] )
357
'''simple docstring''' import functools def UpperCAmelCase ( lowerCamelCase_ :str , lowerCamelCase_ :str ): '''simple docstring''' snake_case_ : List[str] = len(lowerCamelCase_ ) snake_case_ : Dict = len(lowerCamelCase_ ) @functools.cache def min_distance(lowerCamelCase_ :int , lowerCamelCase_ :int ) -> int: # if first word index is overflow - delete all from the second word if indexa >= len_worda: return len_worda - indexa # if second word index is overflow - delete all from the first word if indexa >= len_worda: return len_worda - indexa snake_case_ : Union[str, Any] = int(worda[indexa] != worda[indexa] ) # current letters not identical return min( 1 + min_distance(indexa + 1 , lowerCamelCase_ ) , 1 + min_distance(lowerCamelCase_ , indexa + 1 ) , diff + min_distance(indexa + 1 , indexa + 1 ) , ) return min_distance(0 , 0 ) if __name__ == "__main__": import doctest doctest.testmod()
8
0
'''simple docstring''' from collections.abc import Sequence def UpperCAmelCase ( lowerCamelCase_ :Sequence[float] , lowerCamelCase_ :bool = False ): '''simple docstring''' if not arr: return 0 snake_case_ : Optional[int] = 0 if allow_empty_subarrays else float("""-inf""" ) snake_case_ : Dict = 0.0 for num in arr: snake_case_ : Optional[Any] = max(0 if allow_empty_subarrays else num , curr_sum + num ) snake_case_ : Tuple = max(lowerCamelCase_ , lowerCamelCase_ ) return max_sum if __name__ == "__main__": from doctest import testmod testmod() __A : Dict = [-2, 1, -3, 4, -1, 2, 1, -5, 4] print(F'{max_subarray_sum(nums) = }')
358
'''simple docstring''' import os import textwrap import pyarrow as pa import pytest from datasets import ClassLabel, Features, Image from datasets.packaged_modules.csv.csv import Csv from ..utils import require_pil @pytest.fixture def UpperCAmelCase ( lowerCamelCase_ :str ): '''simple docstring''' snake_case_ : Any = tmp_path / """file.csv""" snake_case_ : Any = textwrap.dedent( """\ header1,header2 1,2 10,20 """ ) with open(lowerCamelCase_ , """w""" ) as f: f.write(lowerCamelCase_ ) return str(lowerCamelCase_ ) @pytest.fixture def UpperCAmelCase ( lowerCamelCase_ :str ): '''simple docstring''' snake_case_ : Optional[int] = tmp_path / """malformed_file.csv""" snake_case_ : int = textwrap.dedent( """\ header1,header2 1,2 10,20, """ ) with open(lowerCamelCase_ , """w""" ) as f: f.write(lowerCamelCase_ ) return str(lowerCamelCase_ ) @pytest.fixture def UpperCAmelCase ( lowerCamelCase_ :Optional[Any] , lowerCamelCase_ :int ): '''simple docstring''' snake_case_ : str = tmp_path / """csv_with_image.csv""" snake_case_ : int = textwrap.dedent( F'''\ image {image_file} ''' ) with open(lowerCamelCase_ , """w""" ) as f: f.write(lowerCamelCase_ ) return str(lowerCamelCase_ ) @pytest.fixture def UpperCAmelCase ( lowerCamelCase_ :Any ): '''simple docstring''' snake_case_ : int = tmp_path / """csv_with_label.csv""" snake_case_ : Tuple = textwrap.dedent( """\ label good bad good """ ) with open(lowerCamelCase_ , """w""" ) as f: f.write(lowerCamelCase_ ) return str(lowerCamelCase_ ) @pytest.fixture def UpperCAmelCase ( lowerCamelCase_ :Union[str, Any] ): '''simple docstring''' snake_case_ : List[str] = tmp_path / """csv_with_int_list.csv""" snake_case_ : str = textwrap.dedent( """\ int_list 1 2 3 4 5 6 7 8 9 """ ) with open(lowerCamelCase_ , """w""" ) as f: f.write(lowerCamelCase_ ) return str(lowerCamelCase_ ) def UpperCAmelCase ( lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :int , lowerCamelCase_ :Tuple ): '''simple docstring''' snake_case_ : int = Csv() snake_case_ : Optional[Any] = csv._generate_tables([[csv_file, malformed_csv_file]] ) with pytest.raises(lowerCamelCase_ , match="""Error tokenizing data""" ): for _ in generator: pass assert any( record.levelname == """ERROR""" and """Failed to read file""" in record.message and os.path.basename(lowerCamelCase_ ) in record.message for record in caplog.records ) @require_pil def UpperCAmelCase ( lowerCamelCase_ :Tuple ): '''simple docstring''' with open(lowerCamelCase_ , encoding="""utf-8""" ) as f: snake_case_ : Tuple = f.read().splitlines()[1] snake_case_ : str = Csv(encoding="""utf-8""" , features=Features({"""image""": Image()} ) ) snake_case_ : Tuple = csv._generate_tables([[csv_file_with_image]] ) snake_case_ : Optional[Any] = pa.concat_tables([table for _, table in generator] ) assert pa_table.schema.field("""image""" ).type == Image()() snake_case_ : List[str] = pa_table.to_pydict()["""image"""] assert generated_content == [{"path": image_file, "bytes": None}] def UpperCAmelCase ( lowerCamelCase_ :int ): '''simple docstring''' with open(lowerCamelCase_ , encoding="""utf-8""" ) as f: snake_case_ : List[Any] = f.read().splitlines()[1:] snake_case_ : Union[str, Any] = Csv(encoding="""utf-8""" , features=Features({"""label""": ClassLabel(names=["""good""", """bad"""] )} ) ) snake_case_ : Optional[Any] = csv._generate_tables([[csv_file_with_label]] ) snake_case_ : Optional[int] = pa.concat_tables([table for _, table in generator] ) assert pa_table.schema.field("""label""" ).type == ClassLabel(names=["""good""", """bad"""] )() snake_case_ : Union[str, Any] = pa_table.to_pydict()["""label"""] assert generated_content == [ClassLabel(names=["""good""", """bad"""] ).straint(lowerCamelCase_ ) for label in labels] def UpperCAmelCase ( lowerCamelCase_ :Union[str, Any] ): '''simple docstring''' snake_case_ : str = Csv(encoding="""utf-8""" , sep=""",""" , converters={"""int_list""": lambda lowerCamelCase_ : [int(lowerCamelCase_ ) for i in x.split()]} ) snake_case_ : Optional[Any] = csv._generate_tables([[csv_file_with_int_list]] ) snake_case_ : Tuple = pa.concat_tables([table for _, table in generator] ) assert pa.types.is_list(pa_table.schema.field("""int_list""" ).type ) snake_case_ : Dict = pa_table.to_pydict()["""int_list"""] assert generated_content == [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
8
0
def UpperCAmelCase ( lowerCamelCase_ :int , lowerCamelCase_ :int ): '''simple docstring''' return int((input_a, input_a).count(0 ) == 0 ) def UpperCAmelCase ( ): '''simple docstring''' assert and_gate(0 , 0 ) == 0 assert and_gate(0 , 1 ) == 0 assert and_gate(1 , 0 ) == 0 assert and_gate(1 , 1 ) == 1 if __name__ == "__main__": test_and_gate() print(and_gate(1, 0)) print(and_gate(0, 0)) print(and_gate(0, 1)) print(and_gate(1, 1))
359
'''simple docstring''' import argparse import pickle import numpy as np import torch from torch import nn from transformers import ReformerConfig, ReformerModelWithLMHead from transformers.utils import logging logging.set_verbosity_info() def UpperCAmelCase ( lowerCamelCase_ :Dict , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :Tuple=None ): '''simple docstring''' # set parameter of one layer assert torch_layer.weight.shape == weight.shape, F'''{torch_layer} layer.weight does not match''' snake_case_ : Optional[Any] = nn.Parameter(lowerCamelCase_ ) if bias is not None: assert torch_layer.bias.shape == bias.shape, F'''{torch_layer} layer.bias does not match''' snake_case_ : List[str] = nn.Parameter(lowerCamelCase_ ) def UpperCAmelCase ( lowerCamelCase_ :str , lowerCamelCase_ :Dict , lowerCamelCase_ :List[Any] ): '''simple docstring''' # set torch weights for 1-to-1 comparison snake_case_ : Optional[Any] = np.asarray(weights[0] ) snake_case_ : int = np.asarray(weights[1] ) snake_case_ : Any = np.asarray(weights[2] ) set_param( torch_layer.self_attention.query_key , torch.tensor(lowerCamelCase_ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCamelCase_ ) , ) set_param( torch_layer.self_attention.value , torch.tensor(lowerCamelCase_ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCamelCase_ ) , ) set_param( torch_layer.output.dense , torch.tensor(lowerCamelCase_ ).view(-1 , lowerCamelCase_ ).contiguous().transpose(0 , 1 ) , ) def UpperCAmelCase ( lowerCamelCase_ :List[str] , lowerCamelCase_ :Optional[Any] , lowerCamelCase_ :Optional[Any] ): '''simple docstring''' # set torch weights for 1-to-1 comparison snake_case_ : List[Any] = np.asarray(weights[0] ) snake_case_ : Optional[int] = np.asarray(weights[1] ) snake_case_ : Union[str, Any] = np.asarray(weights[2] ) snake_case_ : int = np.asarray(weights[3] ) set_param( torch_layer.self_attention.query , torch.tensor(lowerCamelCase_ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCamelCase_ ) , ) set_param( torch_layer.self_attention.key , torch.tensor(lowerCamelCase_ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCamelCase_ ) , ) set_param( torch_layer.self_attention.value , torch.tensor(lowerCamelCase_ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCamelCase_ ) , ) set_param( torch_layer.output.dense , torch.tensor(lowerCamelCase_ ).view(-1 , lowerCamelCase_ ).contiguous().transpose(0 , 1 ) , ) def UpperCAmelCase ( lowerCamelCase_ :Any , lowerCamelCase_ :List[str] , lowerCamelCase_ :Optional[int] ): '''simple docstring''' # layernorm 1 snake_case_ : str = weights[0][0][0] snake_case_ : int = np.asarray(layer_norm_a[0] ) snake_case_ : Optional[Any] = np.asarray(layer_norm_a[1] ) set_param( torch_block.attention.layer_norm , torch.tensor(lowerCamelCase_ ) , torch.tensor(lowerCamelCase_ ) , ) # lsh weights + output snake_case_ : Tuple = weights[0][1] if len(lowerCamelCase_ ) < 4: set_layer_weights_in_torch_lsh(lowerCamelCase_ , torch_block.attention , lowerCamelCase_ ) else: set_layer_weights_in_torch_local(lowerCamelCase_ , torch_block.attention , lowerCamelCase_ ) # intermediate weighs snake_case_ : str = weights[2][0][1][2] # Chunked Feed Forward if len(lowerCamelCase_ ) == 4: snake_case_ : List[Any] = intermediate_weights[2] # layernorm 2 snake_case_ : Tuple = np.asarray(intermediate_weights[0][0] ) snake_case_ : Optional[Any] = np.asarray(intermediate_weights[0][1] ) set_param( torch_block.feed_forward.layer_norm , torch.tensor(lowerCamelCase_ ) , torch.tensor(lowerCamelCase_ ) , ) # intermediate dense snake_case_ : Any = np.asarray(intermediate_weights[1][0] ) snake_case_ : List[Any] = np.asarray(intermediate_weights[1][1] ) set_param( torch_block.feed_forward.dense.dense , torch.tensor(lowerCamelCase_ ).transpose(0 , 1 ).contiguous() , torch.tensor(lowerCamelCase_ ) , ) # intermediate out snake_case_ : List[Any] = np.asarray(intermediate_weights[4][0] ) snake_case_ : Union[str, Any] = np.asarray(intermediate_weights[4][1] ) set_param( torch_block.feed_forward.output.dense , torch.tensor(lowerCamelCase_ ).transpose(0 , 1 ).contiguous() , torch.tensor(lowerCamelCase_ ) , ) def UpperCAmelCase ( lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :str , lowerCamelCase_ :Any ): '''simple docstring''' # reformer model snake_case_ : Dict = torch_model.reformer # word embeds snake_case_ : List[Any] = np.asarray(weights[1] ) set_param( torch_model_reformer.embeddings.word_embeddings , torch.tensor(lowerCamelCase_ ) , ) if isinstance(weights[3] , lowerCamelCase_ ): snake_case_ : Tuple = torch_model_reformer.embeddings.position_embeddings for emb_idx in range(len(position_embeddings.weights ) ): snake_case_ : Dict = np.asarray(weights[3][emb_idx][0] ) assert ( position_embeddings.weights[emb_idx].shape == emb_weights.shape ), F'''{position_embeddings[emb_idx]} emb does not match''' snake_case_ : Optional[Any] = nn.Parameter(torch.tensor(lowerCamelCase_ ) ) snake_case_ : List[Any] = weights[5] assert len(torch_model_reformer.encoder.layers ) * 4 == len( lowerCamelCase_ ), "HF and trax model do not have the same number of layers" for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ): snake_case_ : str = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)] set_block_weights_in_torch(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) # output layer norm snake_case_ : Optional[Any] = np.asarray(weights[7][0] ) snake_case_ : List[Any] = np.asarray(weights[7][1] ) set_param( torch_model_reformer.encoder.layer_norm , torch.tensor(lowerCamelCase_ ) , torch.tensor(lowerCamelCase_ ) , ) # output embeddings snake_case_ : Optional[int] = np.asarray(weights[9][0] ) snake_case_ : Any = np.asarray(weights[9][1] ) set_param( torch_model.lm_head.decoder , torch.tensor(lowerCamelCase_ ).transpose(0 , 1 ).contiguous() , torch.tensor(lowerCamelCase_ ) , ) def UpperCAmelCase ( lowerCamelCase_ :Any , lowerCamelCase_ :Dict , lowerCamelCase_ :List[Any] ): '''simple docstring''' # Initialise PyTorch model snake_case_ : List[str] = ReformerConfig.from_json_file(lowerCamelCase_ ) print(F'''Building PyTorch model from configuration: {config}''' ) snake_case_ : str = ReformerModelWithLMHead(lowerCamelCase_ ) with open(lowerCamelCase_ , """rb""" ) as f: snake_case_ : List[Any] = pickle.load(lowerCamelCase_ )["""weights"""] set_model_weights_in_torch(lowerCamelCase_ , lowerCamelCase_ , config.hidden_size ) # Save pytorch-model print(F'''Save PyTorch model to {pytorch_dump_path}''' ) torch.save(model.state_dict() , lowerCamelCase_ ) if __name__ == "__main__": __A : List[str] = argparse.ArgumentParser() # Required parameters parser.add_argument( '--trax_model_pkl_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.' ) parser.add_argument( '--config_file', default=None, type=str, required=True, help=( 'The config json file corresponding to the pre-trained Reformer model. \n' 'This specifies the model architecture.' ), ) parser.add_argument( '--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) __A : List[Any] = parser.parse_args() convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
8
0
'''simple docstring''' from typing import Any, Dict, Optional import torch import torch.nn.functional as F from torch import nn from ..utils import maybe_allow_in_graph from .activations import get_activation from .attention_processor import Attention from .embeddings import CombinedTimestepLabelEmbeddings @maybe_allow_in_graph class __UpperCamelCase ( nn.Module ): def __init__( self :Any ,_UpperCamelCase :int ,_UpperCamelCase :int ,_UpperCamelCase :int ,_UpperCamelCase :int=0.0 ,_UpperCamelCase :Optional[int] = None ,_UpperCamelCase :str = "geglu" ,_UpperCamelCase :Optional[int] = None ,_UpperCamelCase :bool = False ,_UpperCamelCase :bool = False ,_UpperCamelCase :bool = False ,_UpperCamelCase :bool = False ,_UpperCamelCase :bool = True ,_UpperCamelCase :str = "layer_norm" ,_UpperCamelCase :bool = False ,): super().__init__() snake_case_ : Any = only_cross_attention snake_case_ : Union[str, Any] = (num_embeds_ada_norm is not None) and norm_type == """ada_norm_zero""" snake_case_ : Any = (num_embeds_ada_norm is not None) and norm_type == """ada_norm""" if norm_type in ("ada_norm", "ada_norm_zero") and num_embeds_ada_norm is None: raise ValueError( F'''`norm_type` is set to {norm_type}, but `num_embeds_ada_norm` is not defined. Please make sure to''' F''' define `num_embeds_ada_norm` if setting `norm_type` to {norm_type}.''' ) # Define 3 blocks. Each block has its own normalization layer. # 1. Self-Attn if self.use_ada_layer_norm: snake_case_ : Dict = AdaLayerNorm(_UpperCamelCase ,_UpperCamelCase ) elif self.use_ada_layer_norm_zero: snake_case_ : str = AdaLayerNormZero(_UpperCamelCase ,_UpperCamelCase ) else: snake_case_ : List[Any] = nn.LayerNorm(_UpperCamelCase ,elementwise_affine=_UpperCamelCase ) snake_case_ : List[str] = Attention( query_dim=_UpperCamelCase ,heads=_UpperCamelCase ,dim_head=_UpperCamelCase ,dropout=_UpperCamelCase ,bias=_UpperCamelCase ,cross_attention_dim=cross_attention_dim if only_cross_attention else None ,upcast_attention=_UpperCamelCase ,) # 2. Cross-Attn if cross_attention_dim is not None or double_self_attention: # We currently only use AdaLayerNormZero for self attention where there will only be one attention block. # I.e. the number of returned modulation chunks from AdaLayerZero would not make sense if returned during # the second cross attention block. snake_case_ : str = ( AdaLayerNorm(_UpperCamelCase ,_UpperCamelCase ) if self.use_ada_layer_norm else nn.LayerNorm(_UpperCamelCase ,elementwise_affine=_UpperCamelCase ) ) snake_case_ : List[str] = Attention( query_dim=_UpperCamelCase ,cross_attention_dim=cross_attention_dim if not double_self_attention else None ,heads=_UpperCamelCase ,dim_head=_UpperCamelCase ,dropout=_UpperCamelCase ,bias=_UpperCamelCase ,upcast_attention=_UpperCamelCase ,) # is self-attn if encoder_hidden_states is none else: snake_case_ : Any = None snake_case_ : Optional[Any] = None # 3. Feed-forward snake_case_ : List[str] = nn.LayerNorm(_UpperCamelCase ,elementwise_affine=_UpperCamelCase ) snake_case_ : Union[str, Any] = FeedForward(_UpperCamelCase ,dropout=_UpperCamelCase ,activation_fn=_UpperCamelCase ,final_dropout=_UpperCamelCase ) # let chunk size default to None snake_case_ : Optional[int] = None snake_case_ : Dict = 0 def a__ ( self :List[Any] ,_UpperCamelCase :Optional[int] ,_UpperCamelCase :int ): # Sets chunk feed-forward snake_case_ : Optional[Any] = chunk_size snake_case_ : Optional[Any] = dim def a__ ( self :List[str] ,_UpperCamelCase :torch.FloatTensor ,_UpperCamelCase :Optional[torch.FloatTensor] = None ,_UpperCamelCase :Optional[torch.FloatTensor] = None ,_UpperCamelCase :Optional[torch.FloatTensor] = None ,_UpperCamelCase :Optional[torch.LongTensor] = None ,_UpperCamelCase :Dict[str, Any] = None ,_UpperCamelCase :Optional[torch.LongTensor] = None ,): # Notice that normalization is always applied before the real computation in the following blocks. # 1. Self-Attention if self.use_ada_layer_norm: snake_case_ : Optional[Any] = self.norma(_UpperCamelCase ,_UpperCamelCase ) elif self.use_ada_layer_norm_zero: snake_case_ : Union[str, Any] = self.norma( _UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,hidden_dtype=hidden_states.dtype ) else: snake_case_ : Optional[int] = self.norma(_UpperCamelCase ) snake_case_ : int = cross_attention_kwargs if cross_attention_kwargs is not None else {} snake_case_ : Union[str, Any] = self.attna( _UpperCamelCase ,encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None ,attention_mask=_UpperCamelCase ,**_UpperCamelCase ,) if self.use_ada_layer_norm_zero: snake_case_ : Union[str, Any] = gate_msa.unsqueeze(1 ) * attn_output snake_case_ : Union[str, Any] = attn_output + hidden_states # 2. Cross-Attention if self.attna is not None: snake_case_ : Any = ( self.norma(_UpperCamelCase ,_UpperCamelCase ) if self.use_ada_layer_norm else self.norma(_UpperCamelCase ) ) snake_case_ : List[Any] = self.attna( _UpperCamelCase ,encoder_hidden_states=_UpperCamelCase ,attention_mask=_UpperCamelCase ,**_UpperCamelCase ,) snake_case_ : Tuple = attn_output + hidden_states # 3. Feed-forward snake_case_ : Optional[Any] = self.norma(_UpperCamelCase ) if self.use_ada_layer_norm_zero: snake_case_ : Dict = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None] if self._chunk_size is not None: # "feed_forward_chunk_size" can be used to save memory if norm_hidden_states.shape[self._chunk_dim] % self._chunk_size != 0: raise ValueError( F'''`hidden_states` dimension to be chunked: {norm_hidden_states.shape[self._chunk_dim]} has to be divisible by chunk size: {self._chunk_size}. Make sure to set an appropriate `chunk_size` when calling `unet.enable_forward_chunking`.''' ) snake_case_ : Union[str, Any] = norm_hidden_states.shape[self._chunk_dim] // self._chunk_size snake_case_ : int = torch.cat( [self.ff(_UpperCamelCase ) for hid_slice in norm_hidden_states.chunk(_UpperCamelCase ,dim=self._chunk_dim )] ,dim=self._chunk_dim ,) else: snake_case_ : List[str] = self.ff(_UpperCamelCase ) if self.use_ada_layer_norm_zero: snake_case_ : Union[str, Any] = gate_mlp.unsqueeze(1 ) * ff_output snake_case_ : Any = ff_output + hidden_states return hidden_states class __UpperCamelCase ( nn.Module ): def __init__( self :Dict ,_UpperCamelCase :int ,_UpperCamelCase :Optional[int] = None ,_UpperCamelCase :int = 4 ,_UpperCamelCase :float = 0.0 ,_UpperCamelCase :str = "geglu" ,_UpperCamelCase :bool = False ,): super().__init__() snake_case_ : Tuple = int(dim * mult ) snake_case_ : Optional[int] = dim_out if dim_out is not None else dim if activation_fn == "gelu": snake_case_ : Any = GELU(_UpperCamelCase ,_UpperCamelCase ) if activation_fn == "gelu-approximate": snake_case_ : Tuple = GELU(_UpperCamelCase ,_UpperCamelCase ,approximate="""tanh""" ) elif activation_fn == "geglu": snake_case_ : Dict = GEGLU(_UpperCamelCase ,_UpperCamelCase ) elif activation_fn == "geglu-approximate": snake_case_ : Optional[Any] = ApproximateGELU(_UpperCamelCase ,_UpperCamelCase ) snake_case_ : Dict = nn.ModuleList([] ) # project in self.net.append(_UpperCamelCase ) # project dropout self.net.append(nn.Dropout(_UpperCamelCase ) ) # project out self.net.append(nn.Linear(_UpperCamelCase ,_UpperCamelCase ) ) # FF as used in Vision Transformer, MLP-Mixer, etc. have a final dropout if final_dropout: self.net.append(nn.Dropout(_UpperCamelCase ) ) def a__ ( self :Tuple ,_UpperCamelCase :Union[str, Any] ): for module in self.net: snake_case_ : Tuple = module(_UpperCamelCase ) return hidden_states class __UpperCamelCase ( nn.Module ): def __init__( self :Optional[Any] ,_UpperCamelCase :int ,_UpperCamelCase :int ,_UpperCamelCase :str = "none" ): super().__init__() snake_case_ : Union[str, Any] = nn.Linear(_UpperCamelCase ,_UpperCamelCase ) snake_case_ : Optional[Any] = approximate def a__ ( self :str ,_UpperCamelCase :int ): if gate.device.type != "mps": return F.gelu(_UpperCamelCase ,approximate=self.approximate ) # mps: gelu is not implemented for float16 return F.gelu(gate.to(dtype=torch.floataa ) ,approximate=self.approximate ).to(dtype=gate.dtype ) def a__ ( self :Optional[int] ,_UpperCamelCase :Optional[Any] ): snake_case_ : Optional[Any] = self.proj(_UpperCamelCase ) snake_case_ : int = self.gelu(_UpperCamelCase ) return hidden_states class __UpperCamelCase ( nn.Module ): def __init__( self :List[Any] ,_UpperCamelCase :int ,_UpperCamelCase :int ): super().__init__() snake_case_ : str = nn.Linear(_UpperCamelCase ,dim_out * 2 ) def a__ ( self :Dict ,_UpperCamelCase :List[str] ): if gate.device.type != "mps": return F.gelu(_UpperCamelCase ) # mps: gelu is not implemented for float16 return F.gelu(gate.to(dtype=torch.floataa ) ).to(dtype=gate.dtype ) def a__ ( self :Optional[Any] ,_UpperCamelCase :Optional[int] ): snake_case_ : Dict = self.proj(_UpperCamelCase ).chunk(2 ,dim=-1 ) return hidden_states * self.gelu(_UpperCamelCase ) class __UpperCamelCase ( nn.Module ): def __init__( self :List[str] ,_UpperCamelCase :int ,_UpperCamelCase :int ): super().__init__() snake_case_ : int = nn.Linear(_UpperCamelCase ,_UpperCamelCase ) def a__ ( self :Optional[int] ,_UpperCamelCase :Optional[int] ): snake_case_ : int = self.proj(_UpperCamelCase ) return x * torch.sigmoid(1.7_02 * x ) class __UpperCamelCase ( nn.Module ): def __init__( self :int ,_UpperCamelCase :str ,_UpperCamelCase :List[Any] ): super().__init__() snake_case_ : int = nn.Embedding(_UpperCamelCase ,_UpperCamelCase ) snake_case_ : Union[str, Any] = nn.SiLU() snake_case_ : Any = nn.Linear(_UpperCamelCase ,embedding_dim * 2 ) snake_case_ : Dict = nn.LayerNorm(_UpperCamelCase ,elementwise_affine=_UpperCamelCase ) def a__ ( self :int ,_UpperCamelCase :List[str] ,_UpperCamelCase :int ): snake_case_ : Union[str, Any] = self.linear(self.silu(self.emb(_UpperCamelCase ) ) ) snake_case_ : Tuple = torch.chunk(_UpperCamelCase ,2 ) snake_case_ : Tuple = self.norm(_UpperCamelCase ) * (1 + scale) + shift return x class __UpperCamelCase ( nn.Module ): def __init__( self :List[str] ,_UpperCamelCase :Tuple ,_UpperCamelCase :int ): super().__init__() snake_case_ : int = CombinedTimestepLabelEmbeddings(_UpperCamelCase ,_UpperCamelCase ) snake_case_ : int = nn.SiLU() snake_case_ : List[str] = nn.Linear(_UpperCamelCase ,6 * embedding_dim ,bias=_UpperCamelCase ) snake_case_ : str = nn.LayerNorm(_UpperCamelCase ,elementwise_affine=_UpperCamelCase ,eps=1E-6 ) def a__ ( self :Union[str, Any] ,_UpperCamelCase :Any ,_UpperCamelCase :Tuple ,_UpperCamelCase :Optional[Any] ,_UpperCamelCase :str=None ): snake_case_ : Union[str, Any] = self.linear(self.silu(self.emb(_UpperCamelCase ,_UpperCamelCase ,hidden_dtype=_UpperCamelCase ) ) ) snake_case_ : Any = emb.chunk(6 ,dim=1 ) snake_case_ : str = self.norm(_UpperCamelCase ) * (1 + scale_msa[:, None]) + shift_msa[:, None] return x, gate_msa, shift_mlp, scale_mlp, gate_mlp class __UpperCamelCase ( nn.Module ): def __init__( self :Optional[int] ,_UpperCamelCase :int ,_UpperCamelCase :int ,_UpperCamelCase :int ,_UpperCamelCase :Optional[str] = None ,_UpperCamelCase :float = 1E-5 ): super().__init__() snake_case_ : Optional[int] = num_groups snake_case_ : List[Any] = eps if act_fn is None: snake_case_ : int = None else: snake_case_ : Dict = get_activation(_UpperCamelCase ) snake_case_ : Optional[int] = nn.Linear(_UpperCamelCase ,out_dim * 2 ) def a__ ( self :List[Any] ,_UpperCamelCase :Optional[Any] ,_UpperCamelCase :List[str] ): if self.act: snake_case_ : Any = self.act(_UpperCamelCase ) snake_case_ : Optional[int] = self.linear(_UpperCamelCase ) snake_case_ : Dict = emb[:, :, None, None] snake_case_ : str = emb.chunk(2 ,dim=1 ) snake_case_ : str = F.group_norm(_UpperCamelCase ,self.num_groups ,eps=self.eps ) snake_case_ : List[str] = x * (1 + scale) + shift return x
360
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging __A : List[Any] = logging.get_logger(__name__) __A : str = { 'google/canine-s': 'https://huggingface.co/google/canine-s/resolve/main/config.json', # See all CANINE models at https://huggingface.co/models?filter=canine } class __UpperCamelCase ( lowercase__ ): lowercase : List[Any] = 'canine' def __init__( self :Optional[int] ,_UpperCamelCase :Dict=7_6_8 ,_UpperCamelCase :Union[str, Any]=1_2 ,_UpperCamelCase :int=1_2 ,_UpperCamelCase :int=3_0_7_2 ,_UpperCamelCase :int="gelu" ,_UpperCamelCase :Any=0.1 ,_UpperCamelCase :int=0.1 ,_UpperCamelCase :Any=1_6_3_8_4 ,_UpperCamelCase :Tuple=1_6 ,_UpperCamelCase :List[str]=0.02 ,_UpperCamelCase :Any=1E-1_2 ,_UpperCamelCase :Tuple=0 ,_UpperCamelCase :List[str]=0xE_0_0_0 ,_UpperCamelCase :Optional[Any]=0xE_0_0_1 ,_UpperCamelCase :str=4 ,_UpperCamelCase :Optional[int]=4 ,_UpperCamelCase :str=8 ,_UpperCamelCase :int=1_6_3_8_4 ,_UpperCamelCase :int=1_2_8 ,**_UpperCamelCase :str ,): super().__init__(pad_token_id=_UpperCamelCase ,bos_token_id=_UpperCamelCase ,eos_token_id=_UpperCamelCase ,**_UpperCamelCase ) snake_case_ : List[str] = max_position_embeddings snake_case_ : Union[str, Any] = hidden_size snake_case_ : Dict = num_hidden_layers snake_case_ : Optional[int] = num_attention_heads snake_case_ : Tuple = intermediate_size snake_case_ : str = hidden_act snake_case_ : Union[str, Any] = hidden_dropout_prob snake_case_ : Dict = attention_probs_dropout_prob snake_case_ : Optional[Any] = initializer_range snake_case_ : Optional[int] = type_vocab_size snake_case_ : List[str] = layer_norm_eps # Character config: snake_case_ : Any = downsampling_rate snake_case_ : List[str] = upsampling_kernel_size snake_case_ : int = num_hash_functions snake_case_ : Tuple = num_hash_buckets snake_case_ : Tuple = local_transformer_stride
8
0
'''simple docstring''' import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_camembert import CamembertTokenizer else: __A : Any = None __A : Optional[Any] = logging.get_logger(__name__) __A : List[str] = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'} __A : Any = { 'vocab_file': { 'camembert-base': 'https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model', }, 'tokenizer_file': { 'camembert-base': 'https://huggingface.co/camembert-base/resolve/main/tokenizer.json', }, } __A : List[Any] = { 'camembert-base': 512, } __A : Union[str, Any] = '▁' class __UpperCamelCase ( lowercase__ ): lowercase : Dict = VOCAB_FILES_NAMES lowercase : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP lowercase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase : List[str] = ['input_ids', 'attention_mask'] lowercase : List[str] = CamembertTokenizer def __init__( self :Optional[Any] ,_UpperCamelCase :Dict=None ,_UpperCamelCase :Any=None ,_UpperCamelCase :List[Any]="<s>" ,_UpperCamelCase :List[str]="</s>" ,_UpperCamelCase :List[str]="</s>" ,_UpperCamelCase :Dict="<s>" ,_UpperCamelCase :str="<unk>" ,_UpperCamelCase :List[str]="<pad>" ,_UpperCamelCase :Union[str, Any]="<mask>" ,_UpperCamelCase :Optional[Any]=["<s>NOTUSED", "</s>NOTUSED"] ,**_UpperCamelCase :str ,): # Mask token behave like a normal word, i.e. include the space before it snake_case_ : List[str] = AddedToken(_UpperCamelCase ,lstrip=_UpperCamelCase ,rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase ,_UpperCamelCase ) else mask_token super().__init__( _UpperCamelCase ,tokenizer_file=_UpperCamelCase ,bos_token=_UpperCamelCase ,eos_token=_UpperCamelCase ,sep_token=_UpperCamelCase ,cls_token=_UpperCamelCase ,unk_token=_UpperCamelCase ,pad_token=_UpperCamelCase ,mask_token=_UpperCamelCase ,additional_special_tokens=_UpperCamelCase ,**_UpperCamelCase ,) snake_case_ : Dict = vocab_file snake_case_ : Tuple = False if not self.vocab_file else True def a__ ( self :List[Any] ,_UpperCamelCase :List[int] ,_UpperCamelCase :Optional[List[int]] = None ): if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] snake_case_ : Optional[int] = [self.cls_token_id] snake_case_ : Dict = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def a__ ( self :Dict ,_UpperCamelCase :List[int] ,_UpperCamelCase :Optional[List[int]] = None ): snake_case_ : Optional[Any] = [self.sep_token_id] snake_case_ : List[Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def a__ ( self :Any ,_UpperCamelCase :str ,_UpperCamelCase :Optional[str] = None ): if not self.can_save_slow_tokenizer: raise ValueError( """Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """ """tokenizer.""" ) if not os.path.isdir(_UpperCamelCase ): logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' ) return snake_case_ : Any = os.path.join( _UpperCamelCase ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCamelCase ): copyfile(self.vocab_file ,_UpperCamelCase ) return (out_vocab_file,)
361
'''simple docstring''' from typing import List, Optional from tokenizers import ByteLevelBPETokenizer from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_blenderbot_small import BlenderbotSmallTokenizer __A : Tuple = logging.get_logger(__name__) __A : List[Any] = { 'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_config_file': 'tokenizer_config.json', } __A : str = { 'vocab_file': { 'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json' }, 'merges_file': { 'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt' }, 'tokenizer_config_file': { 'facebook/blenderbot_small-90M': ( 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json' ) }, } __A : Optional[Any] = { 'facebook/blenderbot_small-90M': 512, } class __UpperCamelCase ( lowercase__ ): lowercase : str = VOCAB_FILES_NAMES lowercase : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP lowercase : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase : Dict = BlenderbotSmallTokenizer def __init__( self :str ,_UpperCamelCase :Optional[int]=None ,_UpperCamelCase :Union[str, Any]=None ,_UpperCamelCase :Tuple="<|endoftext|>" ,_UpperCamelCase :int="<|endoftext|>" ,_UpperCamelCase :Dict="<|endoftext|>" ,_UpperCamelCase :Optional[Any]=False ,_UpperCamelCase :List[Any]=True ,**_UpperCamelCase :Any ,): super().__init__( ByteLevelBPETokenizer( vocab=_UpperCamelCase ,merges=_UpperCamelCase ,add_prefix_space=_UpperCamelCase ,trim_offsets=_UpperCamelCase ,) ,bos_token=_UpperCamelCase ,eos_token=_UpperCamelCase ,unk_token=_UpperCamelCase ,**_UpperCamelCase ,) snake_case_ : Any = add_prefix_space def a__ ( self :Optional[Any] ,_UpperCamelCase :int ,_UpperCamelCase :Optional[Any]=None ): snake_case_ : List[Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def a__ ( self :int ,_UpperCamelCase :List[int] ,_UpperCamelCase :Optional[List[int]] = None ): snake_case_ : int = [self.sep_token_id] snake_case_ : Tuple = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
8
0
'''simple docstring''' def UpperCAmelCase ( lowerCamelCase_ :int ): '''simple docstring''' snake_case_ : List[Any] = generate_pascal_triangle(lowerCamelCase_ ) for row_idx in range(lowerCamelCase_ ): # Print left spaces for _ in range(num_rows - row_idx - 1 ): print(end=""" """ ) # Print row values for col_idx in range(row_idx + 1 ): if col_idx != row_idx: print(triangle[row_idx][col_idx] , end=""" """ ) else: print(triangle[row_idx][col_idx] , end="""""" ) print() def UpperCAmelCase ( lowerCamelCase_ :int ): '''simple docstring''' if not isinstance(lowerCamelCase_ , lowerCamelCase_ ): raise TypeError("""The input value of 'num_rows' should be 'int'""" ) if num_rows == 0: return [] elif num_rows < 0: raise ValueError( """The input value of 'num_rows' should be greater than or equal to 0""" ) snake_case_ : list[list[int]] = [] for current_row_idx in range(lowerCamelCase_ ): snake_case_ : List[str] = populate_current_row(lowerCamelCase_ , lowerCamelCase_ ) triangle.append(lowerCamelCase_ ) return triangle def UpperCAmelCase ( lowerCamelCase_ :list[list[int]] , lowerCamelCase_ :int ): '''simple docstring''' snake_case_ : Union[str, Any] = [-1] * (current_row_idx + 1) # first and last elements of current row are equal to 1 snake_case_ : Optional[Any] = 1, 1 for current_col_idx in range(1 , lowerCamelCase_ ): calculate_current_element( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) return current_row def UpperCAmelCase ( lowerCamelCase_ :list[list[int]] , lowerCamelCase_ :list[int] , lowerCamelCase_ :int , lowerCamelCase_ :int , ): '''simple docstring''' snake_case_ : Union[str, Any] = triangle[current_row_idx - 1][current_col_idx - 1] snake_case_ : List[Any] = triangle[current_row_idx - 1][current_col_idx] snake_case_ : Optional[int] = above_to_left_elt + above_to_right_elt def UpperCAmelCase ( lowerCamelCase_ :int ): '''simple docstring''' if not isinstance(lowerCamelCase_ , lowerCamelCase_ ): raise TypeError("""The input value of 'num_rows' should be 'int'""" ) if num_rows == 0: return [] elif num_rows < 0: raise ValueError( """The input value of 'num_rows' should be greater than or equal to 0""" ) snake_case_ : list[list[int]] = [[1]] for row_index in range(1 , lowerCamelCase_ ): snake_case_ : Optional[Any] = [0] + result[-1] + [0] snake_case_ : Dict = row_index + 1 # Calculate the number of distinct elements in a row snake_case_ : Any = sum(divmod(lowerCamelCase_ , 2 ) ) snake_case_ : Tuple = [ temp_row[i - 1] + temp_row[i] for i in range(1 , distinct_elements + 1 ) ] snake_case_ : Optional[int] = row_first_half[: (row_index + 1) // 2] row_second_half.reverse() snake_case_ : str = row_first_half + row_second_half result.append(lowerCamelCase_ ) return result def UpperCAmelCase ( ): '''simple docstring''' from collections.abc import Callable from timeit import timeit def benchmark_a_function(lowerCamelCase_ :Callable , lowerCamelCase_ :int ) -> None: snake_case_ : Dict = F'''{func.__name__}({value})''' snake_case_ : Dict = timeit(F'''__main__.{call}''' , setup="""import __main__""" ) # print(f"{call:38} = {func(value)} -- {timing:.4f} seconds") print(F'''{call:38} -- {timing:.4f} seconds''' ) for value in range(15 ): # (1, 7, 14): for func in (generate_pascal_triangle, generate_pascal_triangle_optimized): benchmark_a_function(lowerCamelCase_ , lowerCamelCase_ ) print() if __name__ == "__main__": import doctest doctest.testmod() benchmark()
362
'''simple docstring''' def UpperCAmelCase ( lowerCamelCase_ :list ): '''simple docstring''' if len(lowerCamelCase_ ) <= 1: return lst snake_case_ : Union[str, Any] = 1 while i < len(lowerCamelCase_ ): if lst[i - 1] <= lst[i]: i += 1 else: snake_case_ , snake_case_ : Union[str, Any] = lst[i], lst[i - 1] i -= 1 if i == 0: snake_case_ : int = 1 return lst if __name__ == "__main__": __A : Optional[int] = input('Enter numbers separated by a comma:\n').strip() __A : int = [int(item) for item in user_input.split(',')] print(gnome_sort(unsorted))
8
0
'''simple docstring''' from json import JSONDecodeError # Workaround for requests.exceptions.JSONDecodeError import requests def UpperCAmelCase ( lowerCamelCase_ :str = "isbn/0140328726" ): '''simple docstring''' snake_case_ : Tuple = olid.strip().strip("""/""" ) # Remove leading/trailing whitespace & slashes if new_olid.count("""/""" ) != 1: snake_case_ : int = F'''{olid} is not a valid Open Library olid''' raise ValueError(lowerCamelCase_ ) return requests.get(F'''https://openlibrary.org/{new_olid}.json''' ).json() def UpperCAmelCase ( lowerCamelCase_ :dict ): '''simple docstring''' snake_case_ : List[str] = { """title""": """Title""", """publish_date""": """Publish date""", """authors""": """Authors""", """number_of_pages""": """Number of pages:""", """first_sentence""": """First sentence""", """isbn_10""": """ISBN (10)""", """isbn_13""": """ISBN (13)""", } snake_case_ : Optional[int] = {better_key: ol_book_data[key] for key, better_key in desired_keys.items()} snake_case_ : List[Any] = [ get_openlibrary_data(author["""key"""] )["""name"""] for author in data["""Authors"""] ] snake_case_ : Optional[int] = data["""First sentence"""]["""value"""] for key, value in data.items(): if isinstance(lowerCamelCase_ , lowerCamelCase_ ): snake_case_ : str = """, """.join(lowerCamelCase_ ) return data if __name__ == "__main__": import doctest doctest.testmod() while True: __A : Any = input('\nEnter the ISBN code to search (or \'quit\' to stop): ').strip() if isbn.lower() in ("", "q", "quit", "exit", "stop"): break if len(isbn) not in (10, 13) or not isbn.isdigit(): print(F'Sorry, {isbn} is not a valid ISBN. Please, input a valid ISBN.') continue print(F'\nSearching Open Library for ISBN: {isbn}...\n') try: __A : Any = summarize_book(get_openlibrary_data(F'isbn/{isbn}')) print('\n'.join(F'{key}: {value}' for key, value in book_summary.items())) except JSONDecodeError: # Workaround for requests.exceptions.RequestException: print(F'Sorry, there are no results for ISBN: {isbn}.')
363
'''simple docstring''' from __future__ import annotations import unittest import numpy as np from transformers import BlipTextConfig from transformers.testing_utils import require_tf, slow from transformers.utils import is_tf_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask if is_tf_available(): import tensorflow as tf from transformers import TFBlipTextModel from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST class __UpperCamelCase : def __init__( self :Any ,_UpperCamelCase :Optional[Any] ,_UpperCamelCase :Optional[int]=1_2 ,_UpperCamelCase :Optional[Any]=7 ,_UpperCamelCase :Optional[int]=True ,_UpperCamelCase :Union[str, Any]=True ,_UpperCamelCase :Dict=True ,_UpperCamelCase :Optional[int]=9_9 ,_UpperCamelCase :Dict=3_2 ,_UpperCamelCase :Union[str, Any]=3_2 ,_UpperCamelCase :Union[str, Any]=2 ,_UpperCamelCase :Optional[Any]=4 ,_UpperCamelCase :List[Any]=3_7 ,_UpperCamelCase :Tuple=0.1 ,_UpperCamelCase :Optional[int]=0.1 ,_UpperCamelCase :int=5_1_2 ,_UpperCamelCase :Tuple=0.02 ,_UpperCamelCase :Any=0 ,_UpperCamelCase :str=None ,): snake_case_ : str = parent snake_case_ : int = batch_size snake_case_ : Union[str, Any] = seq_length snake_case_ : List[Any] = is_training snake_case_ : Union[str, Any] = use_input_mask snake_case_ : List[str] = use_labels snake_case_ : int = vocab_size snake_case_ : Any = hidden_size snake_case_ : List[Any] = projection_dim snake_case_ : Dict = num_hidden_layers snake_case_ : Dict = num_attention_heads snake_case_ : str = intermediate_size snake_case_ : int = dropout snake_case_ : int = attention_dropout snake_case_ : Dict = max_position_embeddings snake_case_ : Union[str, Any] = initializer_range snake_case_ : Dict = scope snake_case_ : Union[str, Any] = bos_token_id def a__ ( self :Any ): snake_case_ : Any = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size ) snake_case_ : Union[str, Any] = None if self.use_input_mask: snake_case_ : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] ) if input_mask is not None: snake_case_ : int = input_mask.numpy() snake_case_ , snake_case_ : Tuple = input_mask.shape snake_case_ : Any = np.random.randint(1 ,seq_length - 1 ,size=(batch_size,) ) for batch_idx, start_index in enumerate(_UpperCamelCase ): snake_case_ : Optional[int] = 1 snake_case_ : List[str] = 0 snake_case_ : Tuple = self.get_config() return config, input_ids, tf.convert_to_tensor(_UpperCamelCase ) def a__ ( self :str ): return BlipTextConfig( vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,projection_dim=self.projection_dim ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,dropout=self.dropout ,attention_dropout=self.attention_dropout ,max_position_embeddings=self.max_position_embeddings ,initializer_range=self.initializer_range ,bos_token_id=self.bos_token_id ,) def a__ ( self :List[Any] ,_UpperCamelCase :Union[str, Any] ,_UpperCamelCase :Tuple ,_UpperCamelCase :Optional[int] ): snake_case_ : List[str] = TFBlipTextModel(config=_UpperCamelCase ) snake_case_ : List[Any] = model(_UpperCamelCase ,attention_mask=_UpperCamelCase ,training=_UpperCamelCase ) snake_case_ : Any = model(_UpperCamelCase ,training=_UpperCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.hidden_size) ) def a__ ( self :List[str] ): snake_case_ : Union[str, Any] = self.prepare_config_and_inputs() snake_case_ , snake_case_ , snake_case_ : str = config_and_inputs snake_case_ : Dict = {"""input_ids""": input_ids, """attention_mask""": input_mask} return config, inputs_dict @require_tf class __UpperCamelCase ( lowercase__ , unittest.TestCase ): lowercase : Optional[Any] = (TFBlipTextModel,) if is_tf_available() else () lowercase : int = False lowercase : List[Any] = False lowercase : Dict = False def a__ ( self :List[Any] ): snake_case_ : List[str] = BlipTextModelTester(self ) snake_case_ : Tuple = ConfigTester(self ,config_class=_UpperCamelCase ,hidden_size=3_7 ) def a__ ( self :Union[str, Any] ): self.config_tester.run_common_tests() def a__ ( self :Union[str, Any] ): snake_case_ : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_UpperCamelCase ) def a__ ( self :Tuple ): pass def a__ ( self :Tuple ): pass @unittest.skip(reason="""Blip does not use inputs_embeds""" ) def a__ ( self :Any ): pass @unittest.skip(reason="""BlipTextModel has no base class and is not available in MODEL_MAPPING""" ) def a__ ( self :Tuple ): pass @unittest.skip(reason="""BlipTextModel has no base class and is not available in MODEL_MAPPING""" ) def a__ ( self :List[Any] ): pass @slow def a__ ( self :Any ): for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: snake_case_ : Optional[Any] = TFBlipTextModel.from_pretrained(_UpperCamelCase ) self.assertIsNotNone(_UpperCamelCase ) def a__ ( self :Dict ,_UpperCamelCase :Tuple=True ): super().test_pt_tf_model_equivalence(allow_missing_keys=_UpperCamelCase )
8
0
'''simple docstring''' from __future__ import annotations from typing import Dict from ...configuration_utils import PretrainedConfig __A : Dict = { 'susnato/ernie-m-base_pytorch': 'https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/config.json', 'susnato/ernie-m-large_pytorch': 'https://huggingface.co/susnato/ernie-m-large_pytorch/blob/main/config.json', } class __UpperCamelCase ( lowercase__ ): lowercase : Optional[int] = 'ernie_m' lowercase : Dict[str, str] = {"dropout": "classifier_dropout", "num_classes": "num_labels"} def __init__( self :Optional[Any] ,_UpperCamelCase :int = 2_5_0_0_0_2 ,_UpperCamelCase :int = 7_6_8 ,_UpperCamelCase :int = 1_2 ,_UpperCamelCase :int = 1_2 ,_UpperCamelCase :int = 3_0_7_2 ,_UpperCamelCase :str = "gelu" ,_UpperCamelCase :float = 0.1 ,_UpperCamelCase :float = 0.1 ,_UpperCamelCase :int = 5_1_4 ,_UpperCamelCase :float = 0.02 ,_UpperCamelCase :int = 1 ,_UpperCamelCase :float = 1E-0_5 ,_UpperCamelCase :List[Any]=None ,_UpperCamelCase :List[str]=False ,_UpperCamelCase :Optional[int]=0.0 ,**_UpperCamelCase :List[Any] ,): super().__init__(pad_token_id=_UpperCamelCase ,**_UpperCamelCase ) snake_case_ : Optional[int] = vocab_size snake_case_ : Any = hidden_size snake_case_ : Union[str, Any] = num_hidden_layers snake_case_ : Union[str, Any] = num_attention_heads snake_case_ : Any = intermediate_size snake_case_ : Any = hidden_act snake_case_ : Tuple = hidden_dropout_prob snake_case_ : Union[str, Any] = attention_probs_dropout_prob snake_case_ : str = max_position_embeddings snake_case_ : int = initializer_range snake_case_ : Optional[Any] = layer_norm_eps snake_case_ : Union[str, Any] = classifier_dropout snake_case_ : Tuple = is_decoder snake_case_ : int = act_dropout
364
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) __A : int = { 'configuration_whisper': ['WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'WhisperConfig', 'WhisperOnnxConfig'], 'feature_extraction_whisper': ['WhisperFeatureExtractor'], 'processing_whisper': ['WhisperProcessor'], 'tokenization_whisper': ['WhisperTokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : Union[str, Any] = ['WhisperTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : Any = [ 'WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST', 'WhisperForConditionalGeneration', 'WhisperModel', 'WhisperPreTrainedModel', 'WhisperForAudioClassification', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : List[Any] = [ 'TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFWhisperForConditionalGeneration', 'TFWhisperModel', 'TFWhisperPreTrainedModel', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : Union[str, Any] = [ 'FlaxWhisperForConditionalGeneration', 'FlaxWhisperModel', 'FlaxWhisperPreTrainedModel', 'FlaxWhisperForAudioClassification', ] if TYPE_CHECKING: from .configuration_whisper import WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP, WhisperConfig, WhisperOnnxConfig from .feature_extraction_whisper import WhisperFeatureExtractor from .processing_whisper import WhisperProcessor from .tokenization_whisper import WhisperTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_whisper_fast import WhisperTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_whisper import ( WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST, WhisperForAudioClassification, WhisperForConditionalGeneration, WhisperModel, WhisperPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_whisper import ( TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST, TFWhisperForConditionalGeneration, TFWhisperModel, TFWhisperPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_whisper import ( FlaxWhisperForAudioClassification, FlaxWhisperForConditionalGeneration, FlaxWhisperModel, FlaxWhisperPreTrainedModel, ) else: import sys __A : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
8
0
'''simple docstring''' from google.protobuf import descriptor as _descriptor from google.protobuf import descriptor_pool as _descriptor_pool from google.protobuf import symbol_database as _symbol_database from google.protobuf.internal import builder as _builder # @@protoc_insertion_point(imports) __A : Optional[Any] = _symbol_database.Default() __A : Union[str, Any] = _descriptor_pool.Default().AddSerializedFile( b'\n\x19sentencepiece_model.proto\x12\rsentencepiece"\x80\x0c\n\x0bTrainerSpec\x12\r\n\x05input\x18\x01 \x03(\t\x12\x14\n\x0cinput_format\x18\x07 \x01(\t\x12\x14\n\x0cmodel_prefix\x18\x02 \x01(\t\x12\x41\n\nmodel_type\x18\x03 \x01(\x0e\x32$.sentencepiece.TrainerSpec.ModelType:\x07UNIGRAM\x12\x18\n\nvocab_size\x18\x04 \x01(\x05:\x04\x38\x30\x30\x30\x12\x17\n\x0f\x61\x63\x63\x65pt_language\x18\x05 \x03(\t\x12 \n\x15self_test_sample_size\x18\x06 \x01(\x05:\x01\x30\x12*\n\x1b\x65nable_differential_privacy\x18\x32 \x01(\x08:\x05\x66\x61lse\x12+\n differential_privacy_noise_level\x18\x33 \x01(\x02:\x01\x30\x12\x32\n\'differential_privacy_clipping_threshold\x18\x34 \x01(\x04:\x01\x30\x12"\n\x12\x63haracter_coverage\x18\n \x01(\x02:\x06\x30.9995\x12\x1e\n\x13input_sentence_size\x18\x0b \x01(\x04:\x01\x30\x12$\n\x16shuffle_input_sentence\x18\x13 \x01(\x08:\x04true\x12 \n\x14mining_sentence_size\x18\x0c \x01(\x05\x42\x02\x18\x01\x12"\n\x16training_sentence_size\x18\r \x01(\x05\x42\x02\x18\x01\x12(\n\x17seed_sentencepiece_size\x18\x0e \x01(\x05:\x07\x31\x30\x30\x30\x30\x30\x30\x12\x1e\n\x10shrinking_factor\x18\x0f \x01(\x02:\x04\x30.75\x12!\n\x13max_sentence_length\x18\x12 \x01(\x05:\x04\x34\x31\x39\x32\x12\x17\n\x0bnum_threads\x18\x10 \x01(\x05:\x02\x31\x36\x12\x1d\n\x12num_sub_iterations\x18\x11 \x01(\x05:\x01\x32\x12$\n\x18max_sentencepiece_length\x18\x14 \x01(\x05:\x02\x31\x36\x12%\n\x17split_by_unicode_script\x18\x15 \x01(\x08:\x04true\x12\x1d\n\x0fsplit_by_number\x18\x17 \x01(\x08:\x04true\x12!\n\x13split_by_whitespace\x18\x16 \x01(\x08:\x04true\x12)\n\x1atreat_whitespace_as_suffix\x18\x18 \x01(\x08:\x05\x66\x61lse\x12+\n\x1c\x61llow_whitespace_only_pieces\x18\x1a \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x0csplit_digits\x18\x19 \x01(\x08:\x05\x66\x61lse\x12#\n\x19pretokenization_delimiter\x18\x35 \x01(\t:\x00\x12\x17\n\x0f\x63ontrol_symbols\x18\x1e \x03(\t\x12\x1c\n\x14user_defined_symbols\x18\x1f \x03(\t\x12\x16\n\x0erequired_chars\x18$ \x01(\t\x12\x1c\n\rbyte_fallback\x18# \x01(\x08:\x05\x66\x61lse\x12+\n\x1dvocabulary_output_piece_score\x18 \x01(\x08:\x04true\x12\x1e\n\x10hard_vocab_limit\x18! \x01(\x08:\x04true\x12\x1c\n\ruse_all_vocab\x18" \x01(\x08:\x05\x66\x61lse\x12\x11\n\x06unk_id\x18( \x01(\x05:\x01\x30\x12\x11\n\x06\x62os_id\x18) \x01(\x05:\x01\x31\x12\x11\n\x06\x65os_id\x18* \x01(\x05:\x01\x32\x12\x12\n\x06pad_id\x18+ \x01(\x05:\x02-1\x12\x18\n\tunk_piece\x18- \x01(\t:\x05<unk>\x12\x16\n\tbos_piece\x18. \x01(\t:\x03<s>\x12\x17\n\teos_piece\x18/ \x01(\t:\x04</s>\x12\x18\n\tpad_piece\x18\x30 \x01(\t:\x05<pad>\x12\x1a\n\x0bunk_surface\x18, \x01(\t:\x05 \xe2\x81\x87 \x12+\n\x1ctrain_extremely_large_corpus\x18\x31 \x01(\x08:\x05\x66\x61lse"5\n\tModelType\x12\x0b\n\x07UNIGRAM\x10\x01\x12\x07\n\x03\x42PE\x10\x02\x12\x08\n\x04WORD\x10\x03\x12\x08\n\x04\x43HAR\x10\x04*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"\xd1\x01\n\x0eNormalizerSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x1c\n\x14precompiled_charsmap\x18\x02 \x01(\x0c\x12\x1e\n\x10\x61\x64\x64_dummy_prefix\x18\x03 \x01(\x08:\x04true\x12&\n\x18remove_extra_whitespaces\x18\x04 \x01(\x08:\x04true\x12 \n\x12\x65scape_whitespaces\x18\x05 \x01(\x08:\x04true\x12\x1e\n\x16normalization_rule_tsv\x18\x06 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"y\n\x0cSelfTestData\x12\x33\n\x07samples\x18\x01 \x03(\x0b\x32".sentencepiece.SelfTestData.Sample\x1a)\n\x06Sample\x12\r\n\x05input\x18\x01 \x01(\t\x12\x10\n\x08\x65xpected\x18\x02 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"\xfe\x03\n\nModelProto\x12\x37\n\x06pieces\x18\x01 \x03(\x0b\x32\'.sentencepiece.ModelProto.SentencePiece\x12\x30\n\x0ctrainer_spec\x18\x02 \x01(\x0b\x32\x1a.sentencepiece.TrainerSpec\x12\x36\n\x0fnormalizer_spec\x18\x03 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x12\x33\n\x0eself_test_data\x18\x04 \x01(\x0b\x32\x1b.sentencepiece.SelfTestData\x12\x38\n\x11\x64\x65normalizer_spec\x18\x05 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x1a\xd2\x01\n\rSentencePiece\x12\r\n\x05piece\x18\x01 \x01(\t\x12\r\n\x05score\x18\x02 \x01(\x02\x12\x42\n\x04type\x18\x03 \x01(\x0e\x32,.sentencepiece.ModelProto.SentencePiece.Type:\x06NORMAL"T\n\x04Type\x12\n\n\x06NORMAL\x10\x01\x12\x0b\n\x07UNKNOWN\x10\x02\x12\x0b\n\x07\x43ONTROL\x10\x03\x12\x10\n\x0cUSER_DEFINED\x10\x04\x12\x08\n\x04\x42YTE\x10\x06\x12\n\n\x06UNUSED\x10\x05*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\x42\x02H\x03' ) __A : Dict = globals() _builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) _builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'sentencepiece_model_pb2', _globals) if _descriptor._USE_C_DESCRIPTORS is False: __A : List[Any] = None __A : Union[str, Any] = b'H\003' # (generated by protobuf compiler, but `_TRAINERSPEC` is not defined) # _TRAINERSPEC.fields_by_name["mining_sentence_size"]._options = None # _TRAINERSPEC.fields_by_name["mining_sentence_size"]._serialized_options = b"\030\001" # _TRAINERSPEC.fields_by_name["training_sentence_size"]._options = None # _TRAINERSPEC.fields_by_name["training_sentence_size"]._serialized_options = b"\030\001" __A : Dict = 45 __A : Optional[Any] = 1_581 __A : Dict = 1_517 __A : Tuple = 1_570 __A : List[Any] = 1_584 __A : Union[str, Any] = 1_793 __A : List[Any] = 1_795 __A : Optional[int] = 1_916 __A : List[str] = 1_864 __A : List[str] = 1_905 __A : Any = 1_919 __A : Any = 2_429 __A : Dict = 2_208 __A : Optional[int] = 2_418 __A : Optional[int] = 2_323 __A : Union[str, Any] = 2_407 # @@protoc_insertion_point(module_scope)
365
'''simple docstring''' import warnings from ...utils import logging from .image_processing_mobilevit import MobileViTImageProcessor __A : Optional[int] = logging.get_logger(__name__) class __UpperCamelCase ( lowercase__ ): def __init__( self :List[str] ,*_UpperCamelCase :str ,**_UpperCamelCase :Optional[int] ): warnings.warn( """The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.""" """ Please use MobileViTImageProcessor instead.""" ,_UpperCamelCase ,) super().__init__(*_UpperCamelCase ,**_UpperCamelCase )
8
0
'''simple docstring''' import collections import os import re from pathlib import Path __A : Dict = 'src/transformers' # Matches is_xxx_available() __A : Dict = re.compile(r'is\_([a-z_]*)_available()') # Catches a one-line _import_struct = {xxx} __A : Any = re.compile(r'^_import_structure\s+=\s+\{([^\}]+)\}') # Catches a line with a key-values pattern: "bla": ["foo", "bar"] __A : Tuple = re.compile(r'\s+"\S*":\s+\[([^\]]*)\]') # Catches a line if not is_foo_available __A : Optional[Any] = re.compile(r'^\s*if\s+not\s+is\_[a-z_]*\_available\(\)') # Catches a line _import_struct["bla"].append("foo") __A : Optional[int] = re.compile(r'^\s*_import_structure\["\S*"\]\.append\("(\S*)"\)') # Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"] __A : List[Any] = re.compile(r'^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]') # Catches a line with an object between quotes and a comma: "MyModel", __A : Union[str, Any] = re.compile(r'^\s+"([^"]+)",') # Catches a line with objects between brackets only: ["foo", "bar"], __A : int = re.compile(r'^\s+\[([^\]]+)\]') # Catches a line with from foo import bar, bla, boo __A : int = re.compile(r'\s+from\s+\S*\s+import\s+([^\(\s].*)\n') # Catches a line with try: __A : List[Any] = re.compile(r'^\s*try:') # Catches a line with else: __A : Any = re.compile(r'^\s*else:') def UpperCAmelCase ( lowerCamelCase_ :str ): '''simple docstring''' if _re_test_backend.search(lowerCamelCase_ ) is None: return None snake_case_ : Tuple = [b[0] for b in _re_backend.findall(lowerCamelCase_ )] backends.sort() return "_and_".join(lowerCamelCase_ ) def UpperCAmelCase ( lowerCamelCase_ :Optional[int] ): '''simple docstring''' with open(lowerCamelCase_ , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f: snake_case_ : str = f.readlines() snake_case_ : List[Any] = 0 while line_index < len(lowerCamelCase_ ) and not lines[line_index].startswith("""_import_structure = {""" ): line_index += 1 # If this is a traditional init, just return. if line_index >= len(lowerCamelCase_ ): return None # First grab the objects without a specific backend in _import_structure snake_case_ : Union[str, Any] = [] while not lines[line_index].startswith("""if TYPE_CHECKING""" ) and find_backend(lines[line_index] ) is None: snake_case_ : str = lines[line_index] # If we have everything on a single line, let's deal with it. if _re_one_line_import_struct.search(lowerCamelCase_ ): snake_case_ : Optional[int] = _re_one_line_import_struct.search(lowerCamelCase_ ).groups()[0] snake_case_ : Union[str, Any] = re.findall(R"""\[([^\]]+)\]""" , lowerCamelCase_ ) for imp in imports: objects.extend([obj[1:-1] for obj in imp.split(""", """ )] ) line_index += 1 continue snake_case_ : Any = _re_import_struct_key_value.search(lowerCamelCase_ ) if single_line_import_search is not None: snake_case_ : Optional[int] = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(""", """ ) if len(lowerCamelCase_ ) > 0] objects.extend(lowerCamelCase_ ) elif line.startswith(""" """ * 8 + """\"""" ): objects.append(line[9:-3] ) line_index += 1 snake_case_ : Union[str, Any] = {"""none""": objects} # Let's continue with backend-specific objects in _import_structure while not lines[line_index].startswith("""if TYPE_CHECKING""" ): # If the line is an if not is_backend_available, we grab all objects associated. snake_case_ : List[str] = find_backend(lines[line_index] ) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1] ) is None: snake_case_ : Tuple = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index] ) is None: line_index += 1 line_index += 1 snake_case_ : Dict = [] # Until we unindent, add backend objects to the list while len(lines[line_index] ) <= 1 or lines[line_index].startswith(""" """ * 4 ): snake_case_ : List[Any] = lines[line_index] if _re_import_struct_add_one.search(lowerCamelCase_ ) is not None: objects.append(_re_import_struct_add_one.search(lowerCamelCase_ ).groups()[0] ) elif _re_import_struct_add_many.search(lowerCamelCase_ ) is not None: snake_case_ : Optional[int] = _re_import_struct_add_many.search(lowerCamelCase_ ).groups()[0].split(""", """ ) snake_case_ : List[str] = [obj[1:-1] for obj in imports if len(lowerCamelCase_ ) > 0] objects.extend(lowerCamelCase_ ) elif _re_between_brackets.search(lowerCamelCase_ ) is not None: snake_case_ : List[str] = _re_between_brackets.search(lowerCamelCase_ ).groups()[0].split(""", """ ) snake_case_ : Any = [obj[1:-1] for obj in imports if len(lowerCamelCase_ ) > 0] objects.extend(lowerCamelCase_ ) elif _re_quote_object.search(lowerCamelCase_ ) is not None: objects.append(_re_quote_object.search(lowerCamelCase_ ).groups()[0] ) elif line.startswith(""" """ * 8 + """\"""" ): objects.append(line[9:-3] ) elif line.startswith(""" """ * 12 + """\"""" ): objects.append(line[13:-3] ) line_index += 1 snake_case_ : int = objects else: line_index += 1 # At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend snake_case_ : List[Any] = [] while ( line_index < len(lowerCamelCase_ ) and find_backend(lines[line_index] ) is None and not lines[line_index].startswith("""else""" ) ): snake_case_ : Union[str, Any] = lines[line_index] snake_case_ : Union[str, Any] = _re_import.search(lowerCamelCase_ ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(""", """ ) ) elif line.startswith(""" """ * 8 ): objects.append(line[8:-2] ) line_index += 1 snake_case_ : Dict = {"""none""": objects} # Let's continue with backend-specific objects while line_index < len(lowerCamelCase_ ): # If the line is an if is_backend_available, we grab all objects associated. snake_case_ : Optional[Any] = find_backend(lines[line_index] ) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1] ) is None: snake_case_ : str = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index] ) is None: line_index += 1 line_index += 1 snake_case_ : Any = [] # Until we unindent, add backend objects to the list while len(lines[line_index] ) <= 1 or lines[line_index].startswith(""" """ * 8 ): snake_case_ : Dict = lines[line_index] snake_case_ : Any = _re_import.search(lowerCamelCase_ ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(""", """ ) ) elif line.startswith(""" """ * 12 ): objects.append(line[12:-2] ) line_index += 1 snake_case_ : int = objects else: line_index += 1 return import_dict_objects, type_hint_objects def UpperCAmelCase ( lowerCamelCase_ :int , lowerCamelCase_ :List[str] ): '''simple docstring''' def find_duplicates(lowerCamelCase_ :Union[str, Any] ): return [k for k, v in collections.Counter(lowerCamelCase_ ).items() if v > 1] if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ): return ["Both sides of the init do not have the same backends!"] snake_case_ : Optional[int] = [] for key in import_dict_objects.keys(): snake_case_ : int = find_duplicates(import_dict_objects[key] ) if duplicate_imports: errors.append(F'''Duplicate _import_structure definitions for: {duplicate_imports}''' ) snake_case_ : List[str] = find_duplicates(type_hint_objects[key] ) if duplicate_type_hints: errors.append(F'''Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}''' ) if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ): snake_case_ : str = """base imports""" if key == """none""" else F'''{key} backend''' errors.append(F'''Differences for {name}:''' ) for a in type_hint_objects[key]: if a not in import_dict_objects[key]: errors.append(F''' {a} in TYPE_HINT but not in _import_structure.''' ) for a in import_dict_objects[key]: if a not in type_hint_objects[key]: errors.append(F''' {a} in _import_structure but not in TYPE_HINT.''' ) return errors def UpperCAmelCase ( ): '''simple docstring''' snake_case_ : Tuple = [] for root, _, files in os.walk(lowerCamelCase_ ): if "__init__.py" in files: snake_case_ : Any = os.path.join(lowerCamelCase_ , """__init__.py""" ) snake_case_ : Dict = parse_init(lowerCamelCase_ ) if objects is not None: snake_case_ : Any = analyze_results(*lowerCamelCase_ ) if len(lowerCamelCase_ ) > 0: snake_case_ : Tuple = F'''Problem in {fname}, both halves do not define the same objects.\n{errors[0]}''' failures.append("""\n""".join(lowerCamelCase_ ) ) if len(lowerCamelCase_ ) > 0: raise ValueError("""\n\n""".join(lowerCamelCase_ ) ) def UpperCAmelCase ( ): '''simple docstring''' snake_case_ : Union[str, Any] = [] for path, directories, files in os.walk(lowerCamelCase_ ): for folder in directories: # Ignore private modules if folder.startswith("""_""" ): directories.remove(lowerCamelCase_ ) continue # Ignore leftovers from branches (empty folders apart from pycache) if len(list((Path(lowerCamelCase_ ) / folder).glob("""*.py""" ) ) ) == 0: continue snake_case_ : Tuple = str((Path(lowerCamelCase_ ) / folder).relative_to(lowerCamelCase_ ) ) snake_case_ : List[str] = short_path.replace(os.path.sep , """.""" ) submodules.append(lowerCamelCase_ ) for fname in files: if fname == "__init__.py": continue snake_case_ : Dict = str((Path(lowerCamelCase_ ) / fname).relative_to(lowerCamelCase_ ) ) snake_case_ : List[str] = short_path.replace(""".py""" , """""" ).replace(os.path.sep , """.""" ) if len(submodule.split(""".""" ) ) == 1: submodules.append(lowerCamelCase_ ) return submodules __A : List[Any] = [ 'convert_pytorch_checkpoint_to_tf2', 'modeling_flax_pytorch_utils', 'models.esm.openfold_utils', ] def UpperCAmelCase ( ): '''simple docstring''' from transformers.utils import direct_transformers_import snake_case_ : Union[str, Any] = direct_transformers_import(lowerCamelCase_ ) snake_case_ : List[str] = set(transformers._import_structure.keys() ) # This contains all the base keys of the _import_structure object defined in the init, but if the user is missing # some optional dependencies, they may not have all of them. Thus we read the init to read all additions and # (potentiall re-) add them. with open(os.path.join(lowerCamelCase_ , """__init__.py""" ) , """r""" ) as f: snake_case_ : str = f.read() import_structure_keys.update(set(re.findall(R"""import_structure\[\"([^\"]*)\"\]""" , lowerCamelCase_ ) ) ) snake_case_ : Dict = [ module for module in get_transformers_submodules() if module not in IGNORE_SUBMODULES and module not in import_structure_keys ] if len(lowerCamelCase_ ) > 0: snake_case_ : str = """\n""".join(F'''- {module}''' for module in module_not_registered ) raise ValueError( """The following submodules are not properly registed in the main init of Transformers:\n""" F'''{list_of_modules}\n''' """Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.""" ) if __name__ == "__main__": check_all_inits() check_submodules()
366
'''simple docstring''' import re def UpperCAmelCase ( lowerCamelCase_ :str ): '''simple docstring''' snake_case_ : List[Any] = re.compile( R"""^(?:0|94|\+94|0{2}94)""" R"""7(0|1|2|4|5|6|7|8)""" R"""(-| |)""" R"""\d{7}$""" ) return bool(re.search(lowerCamelCase_ , lowerCamelCase_ ) ) if __name__ == "__main__": __A : int = '0094702343221' print(is_sri_lankan_phone_number(phone))
8
0
'''simple docstring''' from __future__ import annotations import inspect import unittest import numpy as np from transformers import DeiTConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher, TFDeiTForMaskedImageModeling, TFDeiTModel, ) from transformers.models.deit.modeling_tf_deit import TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import DeiTImageProcessor class __UpperCamelCase : def __init__( self :Tuple ,_UpperCamelCase :Any ,_UpperCamelCase :Optional[Any]=1_3 ,_UpperCamelCase :Union[str, Any]=3_0 ,_UpperCamelCase :int=2 ,_UpperCamelCase :Dict=3 ,_UpperCamelCase :Tuple=True ,_UpperCamelCase :List[str]=True ,_UpperCamelCase :Dict=3_2 ,_UpperCamelCase :Optional[int]=2 ,_UpperCamelCase :int=4 ,_UpperCamelCase :List[str]=3_7 ,_UpperCamelCase :Optional[Any]="gelu" ,_UpperCamelCase :Dict=0.1 ,_UpperCamelCase :Dict=0.1 ,_UpperCamelCase :List[str]=1_0 ,_UpperCamelCase :Optional[int]=0.02 ,_UpperCamelCase :Union[str, Any]=3 ,_UpperCamelCase :Optional[int]=None ,_UpperCamelCase :Dict=2 ,): snake_case_ : Optional[Any] = parent snake_case_ : int = batch_size snake_case_ : Dict = image_size snake_case_ : Union[str, Any] = patch_size snake_case_ : Optional[int] = num_channels snake_case_ : Tuple = is_training snake_case_ : List[Any] = use_labels snake_case_ : Any = hidden_size snake_case_ : Optional[Any] = num_hidden_layers snake_case_ : Dict = num_attention_heads snake_case_ : Optional[int] = intermediate_size snake_case_ : Tuple = hidden_act snake_case_ : List[Any] = hidden_dropout_prob snake_case_ : List[str] = attention_probs_dropout_prob snake_case_ : str = type_sequence_label_size snake_case_ : str = initializer_range snake_case_ : Optional[Any] = scope snake_case_ : Dict = encoder_stride # in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens) snake_case_ : Dict = (image_size // patch_size) ** 2 snake_case_ : List[str] = num_patches + 2 def a__ ( self :Any ): snake_case_ : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) snake_case_ : str = None if self.use_labels: snake_case_ : Dict = ids_tensor([self.batch_size] ,self.type_sequence_label_size ) snake_case_ : List[Any] = self.get_config() return config, pixel_values, labels def a__ ( self :Any ): return DeiTConfig( image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=_UpperCamelCase ,initializer_range=self.initializer_range ,encoder_stride=self.encoder_stride ,) def a__ ( self :List[str] ,_UpperCamelCase :List[Any] ,_UpperCamelCase :Optional[int] ,_UpperCamelCase :Optional[int] ): snake_case_ : List[str] = TFDeiTModel(config=_UpperCamelCase ) snake_case_ : List[str] = model(_UpperCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) def a__ ( self :List[Any] ,_UpperCamelCase :str ,_UpperCamelCase :Optional[Any] ,_UpperCamelCase :Any ): snake_case_ : List[Any] = TFDeiTForMaskedImageModeling(config=_UpperCamelCase ) snake_case_ : Union[str, Any] = model(_UpperCamelCase ) self.parent.assertEqual( result.reconstruction.shape ,(self.batch_size, self.num_channels, self.image_size, self.image_size) ) # test greyscale images snake_case_ : Dict = 1 snake_case_ : Dict = TFDeiTForMaskedImageModeling(_UpperCamelCase ) snake_case_ : Union[str, Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) snake_case_ : List[Any] = model(_UpperCamelCase ) self.parent.assertEqual(result.reconstruction.shape ,(self.batch_size, 1, self.image_size, self.image_size) ) def a__ ( self :Union[str, Any] ,_UpperCamelCase :List[str] ,_UpperCamelCase :Optional[Any] ,_UpperCamelCase :int ): snake_case_ : Tuple = self.type_sequence_label_size snake_case_ : Dict = TFDeiTForImageClassification(_UpperCamelCase ) snake_case_ : Tuple = model(_UpperCamelCase ,labels=_UpperCamelCase ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) ) # test greyscale images snake_case_ : Dict = 1 snake_case_ : Optional[Any] = TFDeiTForImageClassification(_UpperCamelCase ) snake_case_ : List[str] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) snake_case_ : Any = model(_UpperCamelCase ,labels=_UpperCamelCase ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) ) def a__ ( self :Dict ): snake_case_ : Dict = self.prepare_config_and_inputs() snake_case_ : Optional[Any] = config_and_inputs snake_case_ : Any = {"""pixel_values""": pixel_values} return config, inputs_dict @require_tf class __UpperCamelCase ( lowercase__ , lowercase__ , unittest.TestCase ): lowercase : List[str] = ( ( TFDeiTModel, TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher, TFDeiTForMaskedImageModeling, ) if is_tf_available() else () ) lowercase : Tuple = ( { 'feature-extraction': TFDeiTModel, 'image-classification': (TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher), } if is_tf_available() else {} ) lowercase : List[str] = False lowercase : Dict = False lowercase : Any = False lowercase : Dict = False def a__ ( self :Optional[Any] ): snake_case_ : str = TFDeiTModelTester(self ) snake_case_ : Optional[int] = ConfigTester(self ,config_class=_UpperCamelCase ,has_text_modality=_UpperCamelCase ,hidden_size=3_7 ) def a__ ( self :int ): self.config_tester.run_common_tests() @unittest.skip(reason="""DeiT does not use inputs_embeds""" ) def a__ ( self :Any ): pass def a__ ( self :Optional[Any] ): snake_case_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: snake_case_ : List[Any] = model_class(_UpperCamelCase ) self.assertIsInstance(model.get_input_embeddings() ,(tf.keras.layers.Layer) ) snake_case_ : str = model.get_output_embeddings() self.assertTrue(x is None or isinstance(_UpperCamelCase ,tf.keras.layers.Dense ) ) def a__ ( self :str ): snake_case_ : int = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: snake_case_ : Optional[int] = model_class(_UpperCamelCase ) snake_case_ : int = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic snake_case_ : int = [*signature.parameters.keys()] snake_case_ : Optional[Any] = ["""pixel_values"""] self.assertListEqual(arg_names[:1] ,_UpperCamelCase ) def a__ ( self :Optional[int] ): snake_case_ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_UpperCamelCase ) def a__ ( self :List[Any] ): snake_case_ : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*_UpperCamelCase ) def a__ ( self :Tuple ): snake_case_ : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*_UpperCamelCase ) def a__ ( self :Tuple ,_UpperCamelCase :Optional[Any] ,_UpperCamelCase :Tuple ,_UpperCamelCase :Union[str, Any]=False ): snake_case_ : str = super()._prepare_for_class(_UpperCamelCase ,_UpperCamelCase ,return_labels=_UpperCamelCase ) if return_labels: if "labels" in inputs_dict and "labels" not in inspect.signature(model_class.call ).parameters: del inputs_dict["labels"] return inputs_dict @slow def a__ ( self :Union[str, Any] ): for model_name in TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: snake_case_ : List[str] = TFDeiTModel.from_pretrained(_UpperCamelCase ) self.assertIsNotNone(_UpperCamelCase ) def UpperCAmelCase ( ): '''simple docstring''' snake_case_ : Union[str, Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_tf @require_vision class __UpperCamelCase ( unittest.TestCase ): @cached_property def a__ ( self :Any ): return ( DeiTImageProcessor.from_pretrained("""facebook/deit-base-distilled-patch16-224""" ) if is_vision_available() else None ) @slow def a__ ( self :List[Any] ): snake_case_ : int = TFDeiTForImageClassificationWithTeacher.from_pretrained("""facebook/deit-base-distilled-patch16-224""" ) snake_case_ : Tuple = self.default_image_processor snake_case_ : List[str] = prepare_img() snake_case_ : Dict = image_processor(images=_UpperCamelCase ,return_tensors="""tf""" ) # forward pass snake_case_ : Optional[int] = model(**_UpperCamelCase ) # verify the logits snake_case_ : List[str] = tf.TensorShape((1, 1_0_0_0) ) self.assertEqual(outputs.logits.shape ,_UpperCamelCase ) snake_case_ : Union[str, Any] = tf.constant([-1.02_66, 0.19_12, -1.28_61] ) self.assertTrue(np.allclose(outputs.logits[0, :3] ,_UpperCamelCase ,atol=1E-4 ) )
367
'''simple docstring''' from dataclasses import dataclass from enum import Enum from typing import List, Optional, Union import numpy as np import PIL from PIL import Image from ...utils import BaseOutput, is_torch_available, is_transformers_available @dataclass class __UpperCamelCase ( lowercase__ ): lowercase : Union[List[PIL.Image.Image], np.ndarray] lowercase : Optional[List[bool]] if is_transformers_available() and is_torch_available(): from .pipeline_semantic_stable_diffusion import SemanticStableDiffusionPipeline
8
0
'''simple docstring''' from dataclasses import dataclass from typing import Optional, Tuple, Union import torch import torch.nn as nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, apply_forward_hook from .modeling_utils import ModelMixin from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer @dataclass class __UpperCamelCase ( lowercase__ ): lowercase : torch.FloatTensor class __UpperCamelCase ( lowercase__ , lowercase__ ): @register_to_config def __init__( self :str ,_UpperCamelCase :int = 3 ,_UpperCamelCase :int = 3 ,_UpperCamelCase :Tuple[str] = ("DownEncoderBlock2D",) ,_UpperCamelCase :Tuple[str] = ("UpDecoderBlock2D",) ,_UpperCamelCase :Tuple[int] = (6_4,) ,_UpperCamelCase :int = 1 ,_UpperCamelCase :str = "silu" ,_UpperCamelCase :int = 3 ,_UpperCamelCase :int = 3_2 ,_UpperCamelCase :int = 2_5_6 ,_UpperCamelCase :int = 3_2 ,_UpperCamelCase :Optional[int] = None ,_UpperCamelCase :float = 0.1_82_15 ,_UpperCamelCase :str = "group" ,): super().__init__() # pass init params to Encoder snake_case_ : Optional[Any] = Encoder( in_channels=_UpperCamelCase ,out_channels=_UpperCamelCase ,down_block_types=_UpperCamelCase ,block_out_channels=_UpperCamelCase ,layers_per_block=_UpperCamelCase ,act_fn=_UpperCamelCase ,norm_num_groups=_UpperCamelCase ,double_z=_UpperCamelCase ,) snake_case_ : Dict = vq_embed_dim if vq_embed_dim is not None else latent_channels snake_case_ : str = nn.Convad(_UpperCamelCase ,_UpperCamelCase ,1 ) snake_case_ : int = VectorQuantizer(_UpperCamelCase ,_UpperCamelCase ,beta=0.25 ,remap=_UpperCamelCase ,sane_index_shape=_UpperCamelCase ) snake_case_ : List[str] = nn.Convad(_UpperCamelCase ,_UpperCamelCase ,1 ) # pass init params to Decoder snake_case_ : str = Decoder( in_channels=_UpperCamelCase ,out_channels=_UpperCamelCase ,up_block_types=_UpperCamelCase ,block_out_channels=_UpperCamelCase ,layers_per_block=_UpperCamelCase ,act_fn=_UpperCamelCase ,norm_num_groups=_UpperCamelCase ,norm_type=_UpperCamelCase ,) @apply_forward_hook def a__ ( self :Dict ,_UpperCamelCase :torch.FloatTensor ,_UpperCamelCase :bool = True ): snake_case_ : str = self.encoder(_UpperCamelCase ) snake_case_ : Optional[Any] = self.quant_conv(_UpperCamelCase ) if not return_dict: return (h,) return VQEncoderOutput(latents=_UpperCamelCase ) @apply_forward_hook def a__ ( self :Any ,_UpperCamelCase :torch.FloatTensor ,_UpperCamelCase :bool = False ,_UpperCamelCase :bool = True ): # also go through quantization layer if not force_not_quantize: snake_case_ : List[str] = self.quantize(_UpperCamelCase ) else: snake_case_ : List[str] = h snake_case_ : List[str] = self.post_quant_conv(_UpperCamelCase ) snake_case_ : str = self.decoder(_UpperCamelCase ,quant if self.config.norm_type == """spatial""" else None ) if not return_dict: return (dec,) return DecoderOutput(sample=_UpperCamelCase ) def a__ ( self :int ,_UpperCamelCase :torch.FloatTensor ,_UpperCamelCase :bool = True ): snake_case_ : Any = sample snake_case_ : Any = self.encode(_UpperCamelCase ).latents snake_case_ : Tuple = self.decode(_UpperCamelCase ).sample if not return_dict: return (dec,) return DecoderOutput(sample=_UpperCamelCase )
368
'''simple docstring''' import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class __UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ , unittest.TestCase ): lowercase : Dict = StableDiffusionInpaintPipeline lowercase : str = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS lowercase : Union[str, Any] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS lowercase : Dict = frozenset( [] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess lowercase : Optional[int] = frozenset([] ) def a__ ( self :Any ): torch.manual_seed(0 ) snake_case_ : Optional[int] = UNetaDConditionModel( block_out_channels=(3_2, 6_4) ,layers_per_block=2 ,sample_size=3_2 ,in_channels=9 ,out_channels=4 ,down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") ,up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") ,cross_attention_dim=3_2 ,attention_head_dim=(2, 4) ,use_linear_projection=_UpperCamelCase ,) snake_case_ : Tuple = PNDMScheduler(skip_prk_steps=_UpperCamelCase ) torch.manual_seed(0 ) snake_case_ : List[str] = AutoencoderKL( block_out_channels=[3_2, 6_4] ,in_channels=3 ,out_channels=3 ,down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] ,up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] ,latent_channels=4 ,sample_size=1_2_8 ,) torch.manual_seed(0 ) snake_case_ : Optional[int] = CLIPTextConfig( bos_token_id=0 ,eos_token_id=2 ,hidden_size=3_2 ,intermediate_size=3_7 ,layer_norm_eps=1E-0_5 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1_0_0_0 ,hidden_act="""gelu""" ,projection_dim=5_1_2 ,) snake_case_ : Tuple = CLIPTextModel(_UpperCamelCase ) snake_case_ : Optional[int] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) snake_case_ : str = { """unet""": unet, """scheduler""": scheduler, """vae""": vae, """text_encoder""": text_encoder, """tokenizer""": tokenizer, """safety_checker""": None, """feature_extractor""": None, } return components def a__ ( self :str ,_UpperCamelCase :Optional[int] ,_UpperCamelCase :Union[str, Any]=0 ): # TODO: use tensor inputs instead of PIL, this is here just to leave the old expected_slices untouched snake_case_ : List[Any] = floats_tensor((1, 3, 3_2, 3_2) ,rng=random.Random(_UpperCamelCase ) ).to(_UpperCamelCase ) snake_case_ : int = image.cpu().permute(0 ,2 ,3 ,1 )[0] snake_case_ : List[str] = Image.fromarray(np.uinta(_UpperCamelCase ) ).convert("""RGB""" ).resize((6_4, 6_4) ) snake_case_ : Optional[Any] = Image.fromarray(np.uinta(image + 4 ) ).convert("""RGB""" ).resize((6_4, 6_4) ) if str(_UpperCamelCase ).startswith("""mps""" ): snake_case_ : Optional[Any] = torch.manual_seed(_UpperCamelCase ) else: snake_case_ : Optional[int] = torch.Generator(device=_UpperCamelCase ).manual_seed(_UpperCamelCase ) snake_case_ : int = { """prompt""": """A painting of a squirrel eating a burger""", """image""": init_image, """mask_image""": mask_image, """generator""": generator, """num_inference_steps""": 2, """guidance_scale""": 6.0, """output_type""": """numpy""", } return inputs def a__ ( self :Any ): snake_case_ : Union[str, Any] = """cpu""" # ensure determinism for the device-dependent torch.Generator snake_case_ : Optional[Any] = self.get_dummy_components() snake_case_ : Dict = StableDiffusionInpaintPipeline(**_UpperCamelCase ) snake_case_ : List[str] = sd_pipe.to(_UpperCamelCase ) sd_pipe.set_progress_bar_config(disable=_UpperCamelCase ) snake_case_ : Union[str, Any] = self.get_dummy_inputs(_UpperCamelCase ) snake_case_ : Tuple = sd_pipe(**_UpperCamelCase ).images snake_case_ : List[Any] = image[0, -3:, -3:, -1] assert image.shape == (1, 6_4, 6_4, 3) snake_case_ : Dict = np.array([0.47_27, 0.57_35, 0.39_41, 0.54_46, 0.59_26, 0.43_94, 0.50_62, 0.46_54, 0.44_76] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def a__ ( self :Any ): super().test_inference_batch_single_identical(expected_max_diff=3E-3 ) @slow @require_torch_gpu class __UpperCamelCase ( unittest.TestCase ): def a__ ( self :List[Any] ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def a__ ( self :Tuple ): snake_case_ : Union[str, Any] = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/sd2-inpaint/init_image.png""" ) snake_case_ : List[str] = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" ) snake_case_ : Dict = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint""" """/yellow_cat_sitting_on_a_park_bench.npy""" ) snake_case_ : str = """stabilityai/stable-diffusion-2-inpainting""" snake_case_ : Optional[Any] = StableDiffusionInpaintPipeline.from_pretrained(_UpperCamelCase ,safety_checker=_UpperCamelCase ) pipe.to(_UpperCamelCase ) pipe.set_progress_bar_config(disable=_UpperCamelCase ) pipe.enable_attention_slicing() snake_case_ : Optional[Any] = """Face of a yellow cat, high resolution, sitting on a park bench""" snake_case_ : List[str] = torch.manual_seed(0 ) snake_case_ : Dict = pipe( prompt=_UpperCamelCase ,image=_UpperCamelCase ,mask_image=_UpperCamelCase ,generator=_UpperCamelCase ,output_type="""np""" ,) snake_case_ : Union[str, Any] = output.images[0] assert image.shape == (5_1_2, 5_1_2, 3) assert np.abs(expected_image - image ).max() < 9E-3 def a__ ( self :Tuple ): snake_case_ : int = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/sd2-inpaint/init_image.png""" ) snake_case_ : Dict = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" ) snake_case_ : List[str] = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint""" """/yellow_cat_sitting_on_a_park_bench_fp16.npy""" ) snake_case_ : Optional[int] = """stabilityai/stable-diffusion-2-inpainting""" snake_case_ : List[str] = StableDiffusionInpaintPipeline.from_pretrained( _UpperCamelCase ,torch_dtype=torch.floataa ,safety_checker=_UpperCamelCase ,) pipe.to(_UpperCamelCase ) pipe.set_progress_bar_config(disable=_UpperCamelCase ) pipe.enable_attention_slicing() snake_case_ : Optional[Any] = """Face of a yellow cat, high resolution, sitting on a park bench""" snake_case_ : List[Any] = torch.manual_seed(0 ) snake_case_ : Any = pipe( prompt=_UpperCamelCase ,image=_UpperCamelCase ,mask_image=_UpperCamelCase ,generator=_UpperCamelCase ,output_type="""np""" ,) snake_case_ : List[str] = output.images[0] assert image.shape == (5_1_2, 5_1_2, 3) assert np.abs(expected_image - image ).max() < 5E-1 def a__ ( self :Union[str, Any] ): torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() snake_case_ : Optional[int] = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/sd2-inpaint/init_image.png""" ) snake_case_ : int = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" ) snake_case_ : int = """stabilityai/stable-diffusion-2-inpainting""" snake_case_ : Dict = PNDMScheduler.from_pretrained(_UpperCamelCase ,subfolder="""scheduler""" ) snake_case_ : List[Any] = StableDiffusionInpaintPipeline.from_pretrained( _UpperCamelCase ,safety_checker=_UpperCamelCase ,scheduler=_UpperCamelCase ,torch_dtype=torch.floataa ,) pipe.to(_UpperCamelCase ) pipe.set_progress_bar_config(disable=_UpperCamelCase ) pipe.enable_attention_slicing(1 ) pipe.enable_sequential_cpu_offload() snake_case_ : List[Any] = """Face of a yellow cat, high resolution, sitting on a park bench""" snake_case_ : Optional[int] = torch.manual_seed(0 ) snake_case_ : Tuple = pipe( prompt=_UpperCamelCase ,image=_UpperCamelCase ,mask_image=_UpperCamelCase ,generator=_UpperCamelCase ,num_inference_steps=2 ,output_type="""np""" ,) snake_case_ : Any = torch.cuda.max_memory_allocated() # make sure that less than 2.65 GB is allocated assert mem_bytes < 2.65 * 1_0**9
8
0
'''simple docstring''' __A : int = 256 # Modulus to hash a string __A : Any = 1_000_003 def UpperCAmelCase ( lowerCamelCase_ :str , lowerCamelCase_ :str ): '''simple docstring''' snake_case_ : int = len(lowerCamelCase_ ) snake_case_ : Optional[int] = len(lowerCamelCase_ ) if p_len > t_len: return False snake_case_ : Dict = 0 snake_case_ : int = 0 snake_case_ : int = 1 # Calculating the hash of pattern and substring of text for i in range(lowerCamelCase_ ): snake_case_ : Any = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus snake_case_ : Optional[Any] = (ord(text[i] ) + text_hash * alphabet_size) % modulus if i == p_len - 1: continue snake_case_ : Any = (modulus_power * alphabet_size) % modulus for i in range(0 , t_len - p_len + 1 ): if text_hash == p_hash and text[i : i + p_len] == pattern: return True if i == t_len - p_len: continue # Calculate the https://en.wikipedia.org/wiki/Rolling_hash snake_case_ : Optional[int] = ( (text_hash - ord(text[i] ) * modulus_power) * alphabet_size + ord(text[i + p_len] ) ) % modulus return False def UpperCAmelCase ( ): '''simple docstring''' snake_case_ : Union[str, Any] = """abc1abc12""" snake_case_ : Optional[int] = """alskfjaldsabc1abc1abc12k23adsfabcabc""" snake_case_ : List[Any] = """alskfjaldsk23adsfabcabc""" assert rabin_karp(lowerCamelCase_ , lowerCamelCase_ ) and not rabin_karp(lowerCamelCase_ , lowerCamelCase_ ) # Test 2) snake_case_ : List[Any] = """ABABX""" snake_case_ : List[Any] = """ABABZABABYABABX""" assert rabin_karp(lowerCamelCase_ , lowerCamelCase_ ) # Test 3) snake_case_ : Optional[int] = """AAAB""" snake_case_ : Optional[int] = """ABAAAAAB""" assert rabin_karp(lowerCamelCase_ , lowerCamelCase_ ) # Test 4) snake_case_ : Tuple = """abcdabcy""" snake_case_ : List[str] = """abcxabcdabxabcdabcdabcy""" assert rabin_karp(lowerCamelCase_ , lowerCamelCase_ ) # Test 5) snake_case_ : Union[str, Any] = """Lü""" snake_case_ : Tuple = """Lüsai""" assert rabin_karp(lowerCamelCase_ , lowerCamelCase_ ) snake_case_ : str = """Lue""" assert not rabin_karp(lowerCamelCase_ , lowerCamelCase_ ) print("""Success.""" ) if __name__ == "__main__": test_rabin_karp()
369
'''simple docstring''' import collections import os import re from pathlib import Path __A : Dict = 'src/transformers' # Matches is_xxx_available() __A : Dict = re.compile(r'is\_([a-z_]*)_available()') # Catches a one-line _import_struct = {xxx} __A : Any = re.compile(r'^_import_structure\s+=\s+\{([^\}]+)\}') # Catches a line with a key-values pattern: "bla": ["foo", "bar"] __A : Tuple = re.compile(r'\s+"\S*":\s+\[([^\]]*)\]') # Catches a line if not is_foo_available __A : Optional[Any] = re.compile(r'^\s*if\s+not\s+is\_[a-z_]*\_available\(\)') # Catches a line _import_struct["bla"].append("foo") __A : Optional[int] = re.compile(r'^\s*_import_structure\["\S*"\]\.append\("(\S*)"\)') # Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"] __A : List[Any] = re.compile(r'^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]') # Catches a line with an object between quotes and a comma: "MyModel", __A : Union[str, Any] = re.compile(r'^\s+"([^"]+)",') # Catches a line with objects between brackets only: ["foo", "bar"], __A : int = re.compile(r'^\s+\[([^\]]+)\]') # Catches a line with from foo import bar, bla, boo __A : int = re.compile(r'\s+from\s+\S*\s+import\s+([^\(\s].*)\n') # Catches a line with try: __A : List[Any] = re.compile(r'^\s*try:') # Catches a line with else: __A : Any = re.compile(r'^\s*else:') def UpperCAmelCase ( lowerCamelCase_ :str ): '''simple docstring''' if _re_test_backend.search(lowerCamelCase_ ) is None: return None snake_case_ : Tuple = [b[0] for b in _re_backend.findall(lowerCamelCase_ )] backends.sort() return "_and_".join(lowerCamelCase_ ) def UpperCAmelCase ( lowerCamelCase_ :Optional[int] ): '''simple docstring''' with open(lowerCamelCase_ , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f: snake_case_ : str = f.readlines() snake_case_ : List[Any] = 0 while line_index < len(lowerCamelCase_ ) and not lines[line_index].startswith("""_import_structure = {""" ): line_index += 1 # If this is a traditional init, just return. if line_index >= len(lowerCamelCase_ ): return None # First grab the objects without a specific backend in _import_structure snake_case_ : Union[str, Any] = [] while not lines[line_index].startswith("""if TYPE_CHECKING""" ) and find_backend(lines[line_index] ) is None: snake_case_ : str = lines[line_index] # If we have everything on a single line, let's deal with it. if _re_one_line_import_struct.search(lowerCamelCase_ ): snake_case_ : Optional[int] = _re_one_line_import_struct.search(lowerCamelCase_ ).groups()[0] snake_case_ : Union[str, Any] = re.findall(R"""\[([^\]]+)\]""" , lowerCamelCase_ ) for imp in imports: objects.extend([obj[1:-1] for obj in imp.split(""", """ )] ) line_index += 1 continue snake_case_ : Any = _re_import_struct_key_value.search(lowerCamelCase_ ) if single_line_import_search is not None: snake_case_ : Optional[int] = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(""", """ ) if len(lowerCamelCase_ ) > 0] objects.extend(lowerCamelCase_ ) elif line.startswith(""" """ * 8 + """\"""" ): objects.append(line[9:-3] ) line_index += 1 snake_case_ : Union[str, Any] = {"""none""": objects} # Let's continue with backend-specific objects in _import_structure while not lines[line_index].startswith("""if TYPE_CHECKING""" ): # If the line is an if not is_backend_available, we grab all objects associated. snake_case_ : List[str] = find_backend(lines[line_index] ) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1] ) is None: snake_case_ : Tuple = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index] ) is None: line_index += 1 line_index += 1 snake_case_ : Dict = [] # Until we unindent, add backend objects to the list while len(lines[line_index] ) <= 1 or lines[line_index].startswith(""" """ * 4 ): snake_case_ : List[Any] = lines[line_index] if _re_import_struct_add_one.search(lowerCamelCase_ ) is not None: objects.append(_re_import_struct_add_one.search(lowerCamelCase_ ).groups()[0] ) elif _re_import_struct_add_many.search(lowerCamelCase_ ) is not None: snake_case_ : Optional[int] = _re_import_struct_add_many.search(lowerCamelCase_ ).groups()[0].split(""", """ ) snake_case_ : List[str] = [obj[1:-1] for obj in imports if len(lowerCamelCase_ ) > 0] objects.extend(lowerCamelCase_ ) elif _re_between_brackets.search(lowerCamelCase_ ) is not None: snake_case_ : List[str] = _re_between_brackets.search(lowerCamelCase_ ).groups()[0].split(""", """ ) snake_case_ : Any = [obj[1:-1] for obj in imports if len(lowerCamelCase_ ) > 0] objects.extend(lowerCamelCase_ ) elif _re_quote_object.search(lowerCamelCase_ ) is not None: objects.append(_re_quote_object.search(lowerCamelCase_ ).groups()[0] ) elif line.startswith(""" """ * 8 + """\"""" ): objects.append(line[9:-3] ) elif line.startswith(""" """ * 12 + """\"""" ): objects.append(line[13:-3] ) line_index += 1 snake_case_ : int = objects else: line_index += 1 # At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend snake_case_ : List[Any] = [] while ( line_index < len(lowerCamelCase_ ) and find_backend(lines[line_index] ) is None and not lines[line_index].startswith("""else""" ) ): snake_case_ : Union[str, Any] = lines[line_index] snake_case_ : Union[str, Any] = _re_import.search(lowerCamelCase_ ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(""", """ ) ) elif line.startswith(""" """ * 8 ): objects.append(line[8:-2] ) line_index += 1 snake_case_ : Dict = {"""none""": objects} # Let's continue with backend-specific objects while line_index < len(lowerCamelCase_ ): # If the line is an if is_backend_available, we grab all objects associated. snake_case_ : Optional[Any] = find_backend(lines[line_index] ) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1] ) is None: snake_case_ : str = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index] ) is None: line_index += 1 line_index += 1 snake_case_ : Any = [] # Until we unindent, add backend objects to the list while len(lines[line_index] ) <= 1 or lines[line_index].startswith(""" """ * 8 ): snake_case_ : Dict = lines[line_index] snake_case_ : Any = _re_import.search(lowerCamelCase_ ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(""", """ ) ) elif line.startswith(""" """ * 12 ): objects.append(line[12:-2] ) line_index += 1 snake_case_ : int = objects else: line_index += 1 return import_dict_objects, type_hint_objects def UpperCAmelCase ( lowerCamelCase_ :int , lowerCamelCase_ :List[str] ): '''simple docstring''' def find_duplicates(lowerCamelCase_ :Union[str, Any] ): return [k for k, v in collections.Counter(lowerCamelCase_ ).items() if v > 1] if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ): return ["Both sides of the init do not have the same backends!"] snake_case_ : Optional[int] = [] for key in import_dict_objects.keys(): snake_case_ : int = find_duplicates(import_dict_objects[key] ) if duplicate_imports: errors.append(F'''Duplicate _import_structure definitions for: {duplicate_imports}''' ) snake_case_ : List[str] = find_duplicates(type_hint_objects[key] ) if duplicate_type_hints: errors.append(F'''Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}''' ) if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ): snake_case_ : str = """base imports""" if key == """none""" else F'''{key} backend''' errors.append(F'''Differences for {name}:''' ) for a in type_hint_objects[key]: if a not in import_dict_objects[key]: errors.append(F''' {a} in TYPE_HINT but not in _import_structure.''' ) for a in import_dict_objects[key]: if a not in type_hint_objects[key]: errors.append(F''' {a} in _import_structure but not in TYPE_HINT.''' ) return errors def UpperCAmelCase ( ): '''simple docstring''' snake_case_ : Tuple = [] for root, _, files in os.walk(lowerCamelCase_ ): if "__init__.py" in files: snake_case_ : Any = os.path.join(lowerCamelCase_ , """__init__.py""" ) snake_case_ : Dict = parse_init(lowerCamelCase_ ) if objects is not None: snake_case_ : Any = analyze_results(*lowerCamelCase_ ) if len(lowerCamelCase_ ) > 0: snake_case_ : Tuple = F'''Problem in {fname}, both halves do not define the same objects.\n{errors[0]}''' failures.append("""\n""".join(lowerCamelCase_ ) ) if len(lowerCamelCase_ ) > 0: raise ValueError("""\n\n""".join(lowerCamelCase_ ) ) def UpperCAmelCase ( ): '''simple docstring''' snake_case_ : Union[str, Any] = [] for path, directories, files in os.walk(lowerCamelCase_ ): for folder in directories: # Ignore private modules if folder.startswith("""_""" ): directories.remove(lowerCamelCase_ ) continue # Ignore leftovers from branches (empty folders apart from pycache) if len(list((Path(lowerCamelCase_ ) / folder).glob("""*.py""" ) ) ) == 0: continue snake_case_ : Tuple = str((Path(lowerCamelCase_ ) / folder).relative_to(lowerCamelCase_ ) ) snake_case_ : List[str] = short_path.replace(os.path.sep , """.""" ) submodules.append(lowerCamelCase_ ) for fname in files: if fname == "__init__.py": continue snake_case_ : Dict = str((Path(lowerCamelCase_ ) / fname).relative_to(lowerCamelCase_ ) ) snake_case_ : List[str] = short_path.replace(""".py""" , """""" ).replace(os.path.sep , """.""" ) if len(submodule.split(""".""" ) ) == 1: submodules.append(lowerCamelCase_ ) return submodules __A : List[Any] = [ 'convert_pytorch_checkpoint_to_tf2', 'modeling_flax_pytorch_utils', 'models.esm.openfold_utils', ] def UpperCAmelCase ( ): '''simple docstring''' # This is to make sure the transformers module imported is the one in the repo. from transformers.utils import direct_transformers_import snake_case_ : Union[str, Any] = direct_transformers_import(lowerCamelCase_ ) snake_case_ : List[str] = set(transformers._import_structure.keys() ) # This contains all the base keys of the _import_structure object defined in the init, but if the user is missing # some optional dependencies, they may not have all of them. Thus we read the init to read all additions and # (potentiall re-) add them. with open(os.path.join(lowerCamelCase_ , """__init__.py""" ) , """r""" ) as f: snake_case_ : str = f.read() import_structure_keys.update(set(re.findall(R"""import_structure\[\"([^\"]*)\"\]""" , lowerCamelCase_ ) ) ) snake_case_ : Dict = [ module for module in get_transformers_submodules() if module not in IGNORE_SUBMODULES and module not in import_structure_keys ] if len(lowerCamelCase_ ) > 0: snake_case_ : str = """\n""".join(F'''- {module}''' for module in module_not_registered ) raise ValueError( """The following submodules are not properly registed in the main init of Transformers:\n""" F'''{list_of_modules}\n''' """Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.""" ) if __name__ == "__main__": check_all_inits() check_submodules()
8
0
'''simple docstring''' import warnings from transformers import AutoTokenizer from transformers.utils import is_torch_available from transformers.utils.generic import ExplicitEnum from ...processing_utils import ProcessorMixin if is_torch_available(): import torch class __UpperCamelCase ( lowercase__ ): lowercase : Optional[Any] = 'char' lowercase : Optional[Any] = 'bpe' lowercase : List[str] = 'wp' __A : Optional[int] = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE) class __UpperCamelCase ( lowercase__ ): lowercase : Tuple = ['image_processor', 'char_tokenizer'] lowercase : Optional[int] = 'ViTImageProcessor' lowercase : Tuple = 'MgpstrTokenizer' def __init__( self :Dict ,_UpperCamelCase :Tuple=None ,_UpperCamelCase :str=None ,**_UpperCamelCase :Union[str, Any] ): snake_case_ : Any = None if "feature_extractor" in kwargs: warnings.warn( """The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`""" """ instead.""" ,_UpperCamelCase ,) snake_case_ : Any = kwargs.pop("""feature_extractor""" ) snake_case_ : Union[str, Any] = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("""You need to specify an `image_processor`.""" ) if tokenizer is None: raise ValueError("""You need to specify a `tokenizer`.""" ) snake_case_ : List[Any] = tokenizer snake_case_ : List[str] = AutoTokenizer.from_pretrained("""gpt2""" ) snake_case_ : Optional[int] = AutoTokenizer.from_pretrained("""bert-base-uncased""" ) super().__init__(_UpperCamelCase ,_UpperCamelCase ) def __call__( self :str ,_UpperCamelCase :Any=None ,_UpperCamelCase :List[Any]=None ,_UpperCamelCase :str=None ,**_UpperCamelCase :Optional[int] ): if images is None and text is None: raise ValueError("""You need to specify either an `images` or `text` input to process.""" ) if images is not None: snake_case_ : Union[str, Any] = self.image_processor(_UpperCamelCase ,return_tensors=_UpperCamelCase ,**_UpperCamelCase ) if text is not None: snake_case_ : Union[str, Any] = self.char_tokenizer(_UpperCamelCase ,return_tensors=_UpperCamelCase ,**_UpperCamelCase ) if text is None: return inputs elif images is None: return encodings else: snake_case_ : Optional[int] = encodings["""input_ids"""] return inputs def a__ ( self :Union[str, Any] ,_UpperCamelCase :Union[str, Any] ): snake_case_ : List[Any] = sequences snake_case_ : Any = char_preds.size(0 ) snake_case_ : int = self._decode_helper(_UpperCamelCase ,"""char""" ) snake_case_ : Dict = self._decode_helper(_UpperCamelCase ,"""bpe""" ) snake_case_ : Optional[Any] = self._decode_helper(_UpperCamelCase ,"""wp""" ) snake_case_ : Optional[int] = [] snake_case_ : Optional[Any] = [] for i in range(_UpperCamelCase ): snake_case_ : Optional[Any] = [char_scores[i], bpe_scores[i], wp_scores[i]] snake_case_ : Any = [char_strs[i], bpe_strs[i], wp_strs[i]] snake_case_ : str = scores.index(max(_UpperCamelCase ) ) final_strs.append(strs[max_score_index] ) final_scores.append(scores[max_score_index] ) snake_case_ : Any = {} snake_case_ : Tuple = final_strs snake_case_ : List[Any] = final_scores snake_case_ : Any = char_strs snake_case_ : int = bpe_strs snake_case_ : Union[str, Any] = wp_strs return out def a__ ( self :Union[str, Any] ,_UpperCamelCase :str ,_UpperCamelCase :Union[str, Any] ): if format == DecodeType.CHARACTER: snake_case_ : Optional[Any] = self.char_decode snake_case_ : int = 1 snake_case_ : str = """[s]""" elif format == DecodeType.BPE: snake_case_ : Optional[int] = self.bpe_decode snake_case_ : Optional[Any] = 2 snake_case_ : List[str] = """#""" elif format == DecodeType.WORDPIECE: snake_case_ : Optional[Any] = self.wp_decode snake_case_ : List[Any] = 1_0_2 snake_case_ : Dict = """[SEP]""" else: raise ValueError(F'''Format {format} is not supported.''' ) snake_case_ : List[Any] = [], [] snake_case_ : Tuple = pred_logits.size(0 ) snake_case_ : Union[str, Any] = pred_logits.size(1 ) snake_case_ : List[Any] = pred_logits.topk(1 ,dim=-1 ,largest=_UpperCamelCase ,sorted=_UpperCamelCase ) snake_case_ : Any = preds_index.view(-1 ,_UpperCamelCase )[:, 1:] snake_case_ : List[str] = decoder(_UpperCamelCase ) snake_case_ : Tuple = torch.nn.functional.softmax(_UpperCamelCase ,dim=2 ).max(dim=2 ) snake_case_ : Optional[int] = preds_max_prob[:, 1:] for index in range(_UpperCamelCase ): snake_case_ : Any = preds_str[index].find(_UpperCamelCase ) snake_case_ : Tuple = preds_str[index][:pred_eos] snake_case_ : Optional[Any] = preds_index[index].cpu().tolist() snake_case_ : Union[str, Any] = pred_index.index(_UpperCamelCase ) if eos_token in pred_index else -1 snake_case_ : Optional[Any] = preds_max_prob[index][: pred_eos_index + 1] snake_case_ : Optional[Any] = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0 dec_strs.append(_UpperCamelCase ) conf_scores.append(_UpperCamelCase ) return dec_strs, conf_scores def a__ ( self :Any ,_UpperCamelCase :List[Any] ): snake_case_ : List[Any] = [seq.replace(""" """ ,"""""" ) for seq in self.char_tokenizer.batch_decode(_UpperCamelCase )] return decode_strs def a__ ( self :List[str] ,_UpperCamelCase :Dict ): return self.bpe_tokenizer.batch_decode(_UpperCamelCase ) def a__ ( self :str ,_UpperCamelCase :List[Any] ): snake_case_ : int = [seq.replace(""" """ ,"""""" ) for seq in self.wp_tokenizer.batch_decode(_UpperCamelCase )] return decode_strs
370
'''simple docstring''' import unittest import numpy as np from transformers.testing_utils import require_pytesseract, require_torch from transformers.utils import is_pytesseract_available, is_torch_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_pytesseract_available(): from PIL import Image from transformers import LayoutLMvaImageProcessor class __UpperCamelCase ( unittest.TestCase ): def __init__( self :List[Any] ,_UpperCamelCase :List[str] ,_UpperCamelCase :Optional[Any]=7 ,_UpperCamelCase :Union[str, Any]=3 ,_UpperCamelCase :Any=1_8 ,_UpperCamelCase :Optional[Any]=3_0 ,_UpperCamelCase :List[str]=4_0_0 ,_UpperCamelCase :Optional[Any]=True ,_UpperCamelCase :Union[str, Any]=None ,_UpperCamelCase :List[Any]=True ,): snake_case_ : List[str] = size if size is not None else {"""height""": 1_8, """width""": 1_8} snake_case_ : Union[str, Any] = parent snake_case_ : str = batch_size snake_case_ : List[Any] = num_channels snake_case_ : Tuple = image_size snake_case_ : int = min_resolution snake_case_ : int = max_resolution snake_case_ : Union[str, Any] = do_resize snake_case_ : Optional[Any] = size snake_case_ : Any = apply_ocr def a__ ( self :Union[str, Any] ): return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr} @require_torch @require_pytesseract class __UpperCamelCase ( lowercase__ , unittest.TestCase ): lowercase : Tuple = LayoutLMvaImageProcessor if is_pytesseract_available() else None def a__ ( self :List[Any] ): snake_case_ : Union[str, Any] = LayoutLMvaImageProcessingTester(self ) @property def a__ ( self :int ): return self.image_processor_tester.prepare_image_processor_dict() def a__ ( self :Any ): snake_case_ : Tuple = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_UpperCamelCase ,"""do_resize""" ) ) self.assertTrue(hasattr(_UpperCamelCase ,"""size""" ) ) self.assertTrue(hasattr(_UpperCamelCase ,"""apply_ocr""" ) ) def a__ ( self :int ): snake_case_ : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size ,{"""height""": 1_8, """width""": 1_8} ) snake_case_ : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict ,size=4_2 ) self.assertEqual(image_processor.size ,{"""height""": 4_2, """width""": 4_2} ) def a__ ( self :Optional[Any] ): pass def a__ ( self :Union[str, Any] ): # Initialize image_processing snake_case_ : Tuple = self.image_processing_class(**self.image_processor_dict ) # create random PIL images snake_case_ : List[str] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_UpperCamelCase ) for image in image_inputs: self.assertIsInstance(_UpperCamelCase ,Image.Image ) # Test not batched input snake_case_ : List[str] = image_processing(image_inputs[0] ,return_tensors="""pt""" ) self.assertEqual( encoding.pixel_values.shape ,( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) ,) self.assertIsInstance(encoding.words ,_UpperCamelCase ) self.assertIsInstance(encoding.boxes ,_UpperCamelCase ) # Test batched snake_case_ : List[Any] = image_processing(_UpperCamelCase ,return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape ,( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) ,) def a__ ( self :Tuple ): # Initialize image_processing snake_case_ : Tuple = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors snake_case_ : Optional[Any] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_UpperCamelCase ,numpify=_UpperCamelCase ) for image in image_inputs: self.assertIsInstance(_UpperCamelCase ,np.ndarray ) # Test not batched input snake_case_ : Optional[int] = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape ,( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) ,) # Test batched snake_case_ : Any = image_processing(_UpperCamelCase ,return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape ,( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) ,) def a__ ( self :Optional[Any] ): # Initialize image_processing snake_case_ : Any = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors snake_case_ : Optional[int] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_UpperCamelCase ,torchify=_UpperCamelCase ) for image in image_inputs: self.assertIsInstance(_UpperCamelCase ,torch.Tensor ) # Test not batched input snake_case_ : Tuple = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape ,( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) ,) # Test batched snake_case_ : Union[str, Any] = image_processing(_UpperCamelCase ,return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape ,( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) ,) def a__ ( self :List[Any] ): # with apply_OCR = True snake_case_ : Any = LayoutLMvaImageProcessor() from datasets import load_dataset snake_case_ : List[Any] = load_dataset("""hf-internal-testing/fixtures_docvqa""" ,split="""test""" ) snake_case_ : str = Image.open(ds[0]["""file"""] ).convert("""RGB""" ) snake_case_ : Dict = image_processing(_UpperCamelCase ,return_tensors="""pt""" ) self.assertEqual(encoding.pixel_values.shape ,(1, 3, 2_2_4, 2_2_4) ) self.assertEqual(len(encoding.words ) ,len(encoding.boxes ) ) # fmt: off # the words and boxes were obtained with Tesseract 4.1.1 snake_case_ : Tuple = [["""11:14""", """to""", """11:39""", """a.m""", """11:39""", """to""", """11:44""", """a.m.""", """11:44""", """a.m.""", """to""", """12:25""", """p.m.""", """12:25""", """to""", """12:58""", """p.m.""", """12:58""", """to""", """4:00""", """p.m.""", """2:00""", """to""", """5:00""", """p.m.""", """Coffee""", """Break""", """Coffee""", """will""", """be""", """served""", """for""", """men""", """and""", """women""", """in""", """the""", """lobby""", """adjacent""", """to""", """exhibit""", """area.""", """Please""", """move""", """into""", """exhibit""", """area.""", """(Exhibits""", """Open)""", """TRRF""", """GENERAL""", """SESSION""", """(PART""", """|)""", """Presiding:""", """Lee""", """A.""", """Waller""", """TRRF""", """Vice""", """President""", """“Introductory""", """Remarks”""", """Lee""", """A.""", """Waller,""", """TRRF""", """Vice""", """Presi-""", """dent""", """Individual""", """Interviews""", """with""", """TRRF""", """Public""", """Board""", """Members""", """and""", """Sci-""", """entific""", """Advisory""", """Council""", """Mem-""", """bers""", """Conducted""", """by""", """TRRF""", """Treasurer""", """Philip""", """G.""", """Kuehn""", """to""", """get""", """answers""", """which""", """the""", """public""", """refrigerated""", """warehousing""", """industry""", """is""", """looking""", """for.""", """Plus""", """questions""", """from""", """the""", """floor.""", """Dr.""", """Emil""", """M.""", """Mrak,""", """University""", """of""", """Cal-""", """ifornia,""", """Chairman,""", """TRRF""", """Board;""", """Sam""", """R.""", """Cecil,""", """University""", """of""", """Georgia""", """College""", """of""", """Agriculture;""", """Dr.""", """Stanley""", """Charm,""", """Tufts""", """University""", """School""", """of""", """Medicine;""", """Dr.""", """Robert""", """H.""", """Cotton,""", """ITT""", """Continental""", """Baking""", """Company;""", """Dr.""", """Owen""", """Fennema,""", """University""", """of""", """Wis-""", """consin;""", """Dr.""", """Robert""", """E.""", """Hardenburg,""", """USDA.""", """Questions""", """and""", """Answers""", """Exhibits""", """Open""", """Capt.""", """Jack""", """Stoney""", """Room""", """TRRF""", """Scientific""", """Advisory""", """Council""", """Meeting""", """Ballroom""", """Foyer"""]] # noqa: E231 snake_case_ : Any = [[[1_4_1, 5_7, 2_1_4, 6_9], [2_2_8, 5_8, 2_5_2, 6_9], [1_4_1, 7_5, 2_1_6, 8_8], [2_3_0, 7_9, 2_8_0, 8_8], [1_4_2, 2_6_0, 2_1_8, 2_7_3], [2_3_0, 2_6_1, 2_5_5, 2_7_3], [1_4_3, 2_7_9, 2_1_8, 2_9_0], [2_3_1, 2_8_2, 2_9_0, 2_9_1], [1_4_3, 3_4_2, 2_1_8, 3_5_4], [2_3_1, 3_4_5, 2_8_9, 3_5_5], [2_0_2, 3_6_2, 2_2_7, 3_7_3], [1_4_3, 3_7_9, 2_2_0, 3_9_2], [2_3_1, 3_8_2, 2_9_1, 3_9_4], [1_4_4, 7_1_4, 2_2_0, 7_2_6], [2_3_1, 7_1_5, 2_5_6, 7_2_6], [1_4_4, 7_3_2, 2_2_0, 7_4_5], [2_3_2, 7_3_6, 2_9_1, 7_4_7], [1_4_4, 7_6_9, 2_1_8, 7_8_2], [2_3_1, 7_7_0, 2_5_6, 7_8_2], [1_4_1, 7_8_8, 2_0_2, 8_0_1], [2_1_5, 7_9_1, 2_7_4, 8_0_4], [1_4_3, 8_2_6, 2_0_4, 8_3_8], [2_1_5, 8_2_6, 2_4_0, 8_3_8], [1_4_2, 8_4_4, 2_0_2, 8_5_7], [2_1_5, 8_4_7, 2_7_4, 8_5_9], [3_3_4, 5_7, 4_2_7, 6_9], [4_4_0, 5_7, 5_2_2, 6_9], [3_6_9, 7_5, 4_6_1, 8_8], [4_6_9, 7_5, 5_1_6, 8_8], [5_2_8, 7_6, 5_6_2, 8_8], [5_7_0, 7_6, 6_6_7, 8_8], [6_7_5, 7_5, 7_1_1, 8_7], [7_2_1, 7_9, 7_7_8, 8_8], [7_8_9, 7_5, 8_4_0, 8_8], [3_6_9, 9_7, 4_7_0, 1_0_7], [4_8_4, 9_4, 5_0_7, 1_0_6], [5_1_8, 9_4, 5_6_2, 1_0_7], [5_7_6, 9_4, 6_5_5, 1_1_0], [6_6_8, 9_4, 7_9_2, 1_0_9], [8_0_4, 9_5, 8_2_9, 1_0_7], [3_6_9, 1_1_3, 4_6_5, 1_2_5], [4_7_7, 1_1_6, 5_4_7, 1_2_5], [5_6_2, 1_1_3, 6_5_8, 1_2_5], [6_7_1, 1_1_6, 7_4_8, 1_2_5], [7_6_1, 1_1_3, 8_1_1, 1_2_5], [3_6_9, 1_3_1, 4_6_5, 1_4_3], [4_7_7, 1_3_3, 5_4_8, 1_4_3], [5_6_3, 1_3_0, 6_9_8, 1_4_5], [7_1_0, 1_3_0, 8_0_2, 1_4_6], [3_3_6, 1_7_1, 4_1_2, 1_8_3], [4_2_3, 1_7_1, 5_7_2, 1_8_3], [5_8_2, 1_7_0, 7_1_6, 1_8_4], [7_2_8, 1_7_1, 8_1_7, 1_8_7], [8_2_9, 1_7_1, 8_4_4, 1_8_6], [3_3_8, 1_9_7, 4_8_2, 2_1_2], [5_0_7, 1_9_6, 5_5_7, 2_0_9], [5_6_9, 1_9_6, 5_9_5, 2_0_8], [6_1_0, 1_9_6, 7_0_2, 2_0_9], [5_0_5, 2_1_4, 5_8_3, 2_2_6], [5_9_5, 2_1_4, 6_5_6, 2_2_7], [6_7_0, 2_1_5, 8_0_7, 2_2_7], [3_3_5, 2_5_9, 5_4_3, 2_7_4], [5_5_6, 2_5_9, 7_0_8, 2_7_2], [3_7_2, 2_7_9, 4_2_2, 2_9_1], [4_3_5, 2_7_9, 4_6_0, 2_9_1], [4_7_4, 2_7_9, 5_7_4, 2_9_2], [5_8_7, 2_7_8, 6_6_4, 2_9_1], [6_7_6, 2_7_8, 7_3_8, 2_9_1], [7_5_1, 2_7_9, 8_3_4, 2_9_1], [3_7_2, 2_9_8, 4_3_4, 3_1_0], [3_3_5, 3_4_1, 4_8_3, 3_5_4], [4_9_7, 3_4_1, 6_5_5, 3_5_4], [6_6_7, 3_4_1, 7_2_8, 3_5_4], [7_4_0, 3_4_1, 8_2_5, 3_5_4], [3_3_5, 3_6_0, 4_3_0, 3_7_2], [4_4_2, 3_6_0, 5_3_4, 3_7_2], [5_4_5, 3_5_9, 6_8_7, 3_7_2], [6_9_7, 3_6_0, 7_5_4, 3_7_2], [7_6_5, 3_6_0, 8_2_3, 3_7_3], [3_3_4, 3_7_8, 4_2_8, 3_9_1], [4_4_0, 3_7_8, 5_7_7, 3_9_4], [5_9_0, 3_7_8, 7_0_5, 3_9_1], [7_2_0, 3_7_8, 8_0_1, 3_9_1], [3_3_4, 3_9_7, 4_0_0, 4_0_9], [3_7_0, 4_1_6, 5_2_9, 4_2_9], [5_4_4, 4_1_6, 5_7_6, 4_3_2], [5_8_7, 4_1_6, 6_6_5, 4_2_8], [6_7_7, 4_1_6, 8_1_4, 4_2_9], [3_7_2, 4_3_5, 4_5_2, 4_5_0], [4_6_5, 4_3_4, 4_9_5, 4_4_7], [5_1_1, 4_3_4, 6_0_0, 4_4_7], [6_1_1, 4_3_6, 6_3_7, 4_4_7], [6_4_9, 4_3_6, 6_9_4, 4_5_1], [7_0_5, 4_3_8, 8_2_4, 4_4_7], [3_6_9, 4_5_3, 4_5_2, 4_6_6], [4_6_4, 4_5_4, 5_0_9, 4_6_6], [5_2_2, 4_5_3, 6_1_1, 4_6_9], [6_2_5, 4_5_3, 7_9_2, 4_6_9], [3_7_0, 4_7_2, 5_5_6, 4_8_8], [5_7_0, 4_7_2, 6_8_4, 4_8_7], [6_9_7, 4_7_2, 7_1_8, 4_8_5], [7_3_2, 4_7_2, 8_3_5, 4_8_8], [3_6_9, 4_9_0, 4_1_1, 5_0_3], [4_2_5, 4_9_0, 4_8_4, 5_0_3], [4_9_6, 4_9_0, 6_3_5, 5_0_6], [6_4_5, 4_9_0, 7_0_7, 5_0_3], [7_1_8, 4_9_1, 7_6_1, 5_0_3], [7_7_1, 4_9_0, 8_4_0, 5_0_3], [3_3_6, 5_1_0, 3_7_4, 5_2_1], [3_8_8, 5_1_0, 4_4_7, 5_2_2], [4_6_0, 5_1_0, 4_8_9, 5_2_1], [5_0_3, 5_1_0, 5_8_0, 5_2_2], [5_9_2, 5_0_9, 7_3_6, 5_2_5], [7_4_5, 5_0_9, 7_7_0, 5_2_2], [7_8_1, 5_0_9, 8_4_0, 5_2_2], [3_3_8, 5_2_8, 4_3_4, 5_4_1], [4_4_8, 5_2_8, 5_9_6, 5_4_1], [6_0_9, 5_2_7, 6_8_7, 5_4_0], [7_0_0, 5_2_8, 7_9_2, 5_4_1], [3_3_6, 5_4_6, 3_9_7, 5_5_9], [4_0_7, 5_4_6, 4_3_1, 5_5_9], [4_4_3, 5_4_6, 5_2_5, 5_6_0], [5_3_7, 5_4_6, 6_8_0, 5_6_2], [6_8_8, 5_4_6, 7_1_4, 5_5_9], [7_2_2, 5_4_6, 8_3_7, 5_6_2], [3_3_6, 5_6_5, 4_4_9, 5_8_1], [4_6_1, 5_6_5, 4_8_5, 5_7_7], [4_9_7, 5_6_5, 6_6_5, 5_8_1], [6_8_1, 5_6_5, 7_1_8, 5_7_7], [7_3_2, 5_6_5, 8_3_7, 5_8_0], [3_3_7, 5_8_4, 4_3_8, 5_9_7], [4_5_2, 5_8_3, 5_2_1, 5_9_6], [5_3_5, 5_8_4, 6_7_7, 5_9_9], [6_9_0, 5_8_3, 7_8_7, 5_9_6], [8_0_1, 5_8_3, 8_2_5, 5_9_6], [3_3_8, 6_0_2, 4_7_8, 6_1_5], [4_9_2, 6_0_2, 5_3_0, 6_1_4], [5_4_3, 6_0_2, 6_3_8, 6_1_5], [6_5_0, 6_0_2, 6_7_6, 6_1_4], [6_8_8, 6_0_2, 7_8_8, 6_1_5], [8_0_2, 6_0_2, 8_4_3, 6_1_4], [3_3_7, 6_2_1, 5_0_2, 6_3_3], [5_1_6, 6_2_1, 6_1_5, 6_3_7], [6_2_9, 6_2_1, 7_7_4, 6_3_6], [7_8_9, 6_2_1, 8_2_7, 6_3_3], [3_3_7, 6_3_9, 4_1_8, 6_5_2], [4_3_2, 6_4_0, 5_7_1, 6_5_3], [5_8_7, 6_3_9, 7_3_1, 6_5_5], [7_4_3, 6_3_9, 7_6_9, 6_5_2], [7_8_0, 6_3_9, 8_4_1, 6_5_2], [3_3_8, 6_5_8, 4_4_0, 6_7_3], [4_5_5, 6_5_8, 4_9_1, 6_7_0], [5_0_8, 6_5_8, 6_0_2, 6_7_1], [6_1_6, 6_5_8, 6_3_8, 6_7_0], [6_5_4, 6_5_8, 8_3_5, 6_7_4], [3_3_7, 6_7_7, 4_2_9, 6_8_9], [3_3_7, 7_1_4, 4_8_2, 7_2_6], [4_9_5, 7_1_4, 5_4_8, 7_2_6], [5_6_1, 7_1_4, 6_8_3, 7_2_6], [3_3_8, 7_7_0, 4_6_1, 7_8_2], [4_7_4, 7_6_9, 5_5_4, 7_8_5], [4_8_9, 7_8_8, 5_6_2, 8_0_3], [5_7_6, 7_8_8, 6_4_3, 8_0_1], [6_5_6, 7_8_7, 7_5_1, 8_0_4], [7_6_4, 7_8_8, 8_4_4, 8_0_1], [3_3_4, 8_2_5, 4_2_1, 8_3_8], [4_3_0, 8_2_4, 5_7_4, 8_3_8], [5_8_4, 8_2_4, 7_2_3, 8_4_1], [3_3_5, 8_4_4, 4_5_0, 8_5_7], [4_6_4, 8_4_3, 5_8_3, 8_6_0], [6_2_8, 8_6_2, 7_5_5, 8_7_5], [7_6_9, 8_6_1, 8_4_8, 8_7_8]]] # noqa: E231 # fmt: on self.assertListEqual(encoding.words ,_UpperCamelCase ) self.assertListEqual(encoding.boxes ,_UpperCamelCase ) # with apply_OCR = False snake_case_ : Dict = LayoutLMvaImageProcessor(apply_ocr=_UpperCamelCase ) snake_case_ : Optional[int] = image_processing(_UpperCamelCase ,return_tensors="""pt""" ) self.assertEqual(encoding.pixel_values.shape ,(1, 3, 2_2_4, 2_2_4) )
8
0
'''simple docstring''' class __UpperCamelCase : def __init__( self :Any ,_UpperCamelCase :int ): snake_case_ : List[str] = n snake_case_ : List[Any] = [None] * self.n snake_case_ : Optional[Any] = 0 # index of the first element snake_case_ : Union[str, Any] = 0 snake_case_ : int = 0 def __len__( self :Union[str, Any] ): return self.size def a__ ( self :Optional[int] ): return self.size == 0 def a__ ( self :str ): return False if self.is_empty() else self.array[self.front] def a__ ( self :List[str] ,_UpperCamelCase :List[str] ): if self.size >= self.n: raise Exception("""QUEUE IS FULL""" ) snake_case_ : Tuple = data snake_case_ : List[str] = (self.rear + 1) % self.n self.size += 1 return self def a__ ( self :Union[str, Any] ): if self.size == 0: raise Exception("""UNDERFLOW""" ) snake_case_ : Union[str, Any] = self.array[self.front] snake_case_ : Optional[Any] = None snake_case_ : Optional[Any] = (self.front + 1) % self.n self.size -= 1 return temp
371
'''simple docstring''' def UpperCAmelCase ( lowerCamelCase_ :int ): '''simple docstring''' snake_case_ : List[Any] = generate_pascal_triangle(lowerCamelCase_ ) for row_idx in range(lowerCamelCase_ ): # Print left spaces for _ in range(num_rows - row_idx - 1 ): print(end=""" """ ) # Print row values for col_idx in range(row_idx + 1 ): if col_idx != row_idx: print(triangle[row_idx][col_idx] , end=""" """ ) else: print(triangle[row_idx][col_idx] , end="""""" ) print() def UpperCAmelCase ( lowerCamelCase_ :int ): '''simple docstring''' if not isinstance(lowerCamelCase_ , lowerCamelCase_ ): raise TypeError("""The input value of 'num_rows' should be 'int'""" ) if num_rows == 0: return [] elif num_rows < 0: raise ValueError( """The input value of 'num_rows' should be greater than or equal to 0""" ) snake_case_ : list[list[int]] = [] for current_row_idx in range(lowerCamelCase_ ): snake_case_ : List[str] = populate_current_row(lowerCamelCase_ , lowerCamelCase_ ) triangle.append(lowerCamelCase_ ) return triangle def UpperCAmelCase ( lowerCamelCase_ :list[list[int]] , lowerCamelCase_ :int ): '''simple docstring''' snake_case_ : Union[str, Any] = [-1] * (current_row_idx + 1) # first and last elements of current row are equal to 1 snake_case_ , snake_case_ : Optional[Any] = 1, 1 for current_col_idx in range(1 , lowerCamelCase_ ): calculate_current_element( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) return current_row def UpperCAmelCase ( lowerCamelCase_ :list[list[int]] , lowerCamelCase_ :list[int] , lowerCamelCase_ :int , lowerCamelCase_ :int , ): '''simple docstring''' snake_case_ : Union[str, Any] = triangle[current_row_idx - 1][current_col_idx - 1] snake_case_ : List[Any] = triangle[current_row_idx - 1][current_col_idx] snake_case_ : Optional[int] = above_to_left_elt + above_to_right_elt def UpperCAmelCase ( lowerCamelCase_ :int ): '''simple docstring''' if not isinstance(lowerCamelCase_ , lowerCamelCase_ ): raise TypeError("""The input value of 'num_rows' should be 'int'""" ) if num_rows == 0: return [] elif num_rows < 0: raise ValueError( """The input value of 'num_rows' should be greater than or equal to 0""" ) snake_case_ : list[list[int]] = [[1]] for row_index in range(1 , lowerCamelCase_ ): snake_case_ : Optional[Any] = [0] + result[-1] + [0] snake_case_ : Dict = row_index + 1 # Calculate the number of distinct elements in a row snake_case_ : Any = sum(divmod(lowerCamelCase_ , 2 ) ) snake_case_ : Tuple = [ temp_row[i - 1] + temp_row[i] for i in range(1 , distinct_elements + 1 ) ] snake_case_ : Optional[int] = row_first_half[: (row_index + 1) // 2] row_second_half.reverse() snake_case_ : str = row_first_half + row_second_half result.append(lowerCamelCase_ ) return result def UpperCAmelCase ( ): '''simple docstring''' from collections.abc import Callable from timeit import timeit def benchmark_a_function(lowerCamelCase_ :Callable , lowerCamelCase_ :int ) -> None: snake_case_ : Dict = F'''{func.__name__}({value})''' snake_case_ : Dict = timeit(F'''__main__.{call}''' , setup="""import __main__""" ) # print(f"{call:38} = {func(value)} -- {timing:.4f} seconds") print(F'''{call:38} -- {timing:.4f} seconds''' ) for value in range(15 ): # (1, 7, 14): for func in (generate_pascal_triangle, generate_pascal_triangle_optimized): benchmark_a_function(lowerCamelCase_ , lowerCamelCase_ ) print() if __name__ == "__main__": import doctest doctest.testmod() benchmark()
8
0
'''simple docstring''' import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging _UpperCAmelCase : List[str] = logging.get_logger(__name__) _UpperCAmelCase : int = { """facebook/wav2vec2-base-960h""": """https://huggingface.co/facebook/wav2vec2-base-960h/resolve/main/config.json""", # See all Wav2Vec2 models at https://huggingface.co/models?filter=wav2vec2 } class a__ ( __A ): """simple docstring""" __UpperCamelCase : int = 'wav2vec2' def __init__(self , __lowercase=32 , __lowercase=7_68 , __lowercase=12 , __lowercase=12 , __lowercase=30_72 , __lowercase="gelu" , __lowercase=0.1 , __lowercase=0.1 , __lowercase=0.1 , __lowercase=0.0 , __lowercase=0.0 , __lowercase=0.1 , __lowercase=0.1 , __lowercase=0.0_2 , __lowercase=1e-5 , __lowercase="group" , __lowercase="gelu" , __lowercase=(5_12, 5_12, 5_12, 5_12, 5_12, 5_12, 5_12) , __lowercase=(5, 2, 2, 2, 2, 2, 2) , __lowercase=(10, 3, 3, 3, 3, 2, 2) , __lowercase=False , __lowercase=1_28 , __lowercase=16 , __lowercase=False , __lowercase=True , __lowercase=0.0_5 , __lowercase=10 , __lowercase=2 , __lowercase=0.0 , __lowercase=10 , __lowercase=0 , __lowercase=3_20 , __lowercase=2 , __lowercase=0.1 , __lowercase=1_00 , __lowercase=2_56 , __lowercase=2_56 , __lowercase=0.1 , __lowercase="sum" , __lowercase=False , __lowercase=False , __lowercase=2_56 , __lowercase=(5_12, 5_12, 5_12, 5_12, 15_00) , __lowercase=(5, 3, 3, 1, 1) , __lowercase=(1, 2, 3, 1, 1) , __lowercase=5_12 , __lowercase=0 , __lowercase=1 , __lowercase=2 , __lowercase=False , __lowercase=3 , __lowercase=2 , __lowercase=3 , __lowercase=None , __lowercase=None , **__lowercase , ): super().__init__(**__lowercase , pad_token_id=__lowercase , bos_token_id=__lowercase , eos_token_id=__lowercase ) __lowerCAmelCase = hidden_size __lowerCAmelCase = feat_extract_norm __lowerCAmelCase = feat_extract_activation __lowerCAmelCase = list(__lowercase ) __lowerCAmelCase = list(__lowercase ) __lowerCAmelCase = list(__lowercase ) __lowerCAmelCase = conv_bias __lowerCAmelCase = num_conv_pos_embeddings __lowerCAmelCase = num_conv_pos_embedding_groups __lowerCAmelCase = len(self.conv_dim ) __lowerCAmelCase = num_hidden_layers __lowerCAmelCase = intermediate_size __lowerCAmelCase = hidden_act __lowerCAmelCase = num_attention_heads __lowerCAmelCase = hidden_dropout __lowerCAmelCase = attention_dropout __lowerCAmelCase = activation_dropout __lowerCAmelCase = feat_proj_dropout __lowerCAmelCase = final_dropout __lowerCAmelCase = layerdrop __lowerCAmelCase = layer_norm_eps __lowerCAmelCase = initializer_range __lowerCAmelCase = vocab_size __lowerCAmelCase = do_stable_layer_norm __lowerCAmelCase = use_weighted_layer_sum if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( '''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` ==''' ''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) =''' F""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,""" F""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 __lowerCAmelCase = apply_spec_augment __lowerCAmelCase = mask_time_prob __lowerCAmelCase = mask_time_length __lowerCAmelCase = mask_time_min_masks __lowerCAmelCase = mask_feature_prob __lowerCAmelCase = mask_feature_length __lowerCAmelCase = mask_feature_min_masks # parameters for pretraining with codevector quantized representations __lowerCAmelCase = num_codevectors_per_group __lowerCAmelCase = num_codevector_groups __lowerCAmelCase = contrastive_logits_temperature __lowerCAmelCase = feat_quantizer_dropout __lowerCAmelCase = num_negatives __lowerCAmelCase = codevector_dim __lowerCAmelCase = proj_codevector_dim __lowerCAmelCase = diversity_loss_weight # ctc loss __lowerCAmelCase = ctc_loss_reduction __lowerCAmelCase = ctc_zero_infinity # adapter __lowerCAmelCase = add_adapter __lowerCAmelCase = adapter_kernel_size __lowerCAmelCase = adapter_stride __lowerCAmelCase = num_adapter_layers __lowerCAmelCase = output_hidden_size or hidden_size __lowerCAmelCase = adapter_attn_dim # SequenceClassification-specific parameter. Feel free to ignore for other classes. __lowerCAmelCase = classifier_proj_size # XVector-specific parameters. Feel free to ignore for other classes. __lowerCAmelCase = list(__lowercase ) __lowerCAmelCase = list(__lowercase ) __lowerCAmelCase = list(__lowercase ) __lowerCAmelCase = xvector_output_dim @property def _snake_case (self ): return functools.reduce(operator.mul , self.conv_stride , 1 )
9
'''simple docstring''' import argparse import re from pathlib import Path import requests import torch from PIL import Image from torchvision.transforms import CenterCrop, Compose, Normalize, Resize, ToTensor from transformers import ( EfficientFormerConfig, EfficientFormerForImageClassificationWithTeacher, EfficientFormerImageProcessor, ) from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling def __magic_name__( lowerCamelCase, lowerCamelCase): __lowerCAmelCase = old_name if "patch_embed" in old_name: __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = old_name.split('''.''') if layer == "0": __lowerCAmelCase = old_name.replace('''0''', '''convolution1''') elif layer == "1": __lowerCAmelCase = old_name.replace('''1''', '''batchnorm_before''') elif layer == "3": __lowerCAmelCase = old_name.replace('''3''', '''convolution2''') else: __lowerCAmelCase = old_name.replace('''4''', '''batchnorm_after''') if "network" in old_name and re.search(r'''\d\.\d''', lowerCamelCase): __lowerCAmelCase = r'''\b\d{2}\b''' if bool(re.search(lowerCamelCase, lowerCamelCase)): __lowerCAmelCase = re.search(r'''\d\.\d\d.''', lowerCamelCase).group() else: __lowerCAmelCase = re.search(r'''\d\.\d.''', lowerCamelCase).group() if int(match[0]) < 6: __lowerCAmelCase = old_name.replace(lowerCamelCase, '''''') __lowerCAmelCase = trimmed_name.replace('''network''', match[0] + '''.meta4D_layers.blocks.''' + match[2:-1]) __lowerCAmelCase = '''intermediate_stages.''' + trimmed_name else: __lowerCAmelCase = old_name.replace(lowerCamelCase, '''''') if int(match[2]) < num_meta4D_last_stage: __lowerCAmelCase = trimmed_name.replace('''network''', '''meta4D_layers.blocks.''' + match[2]) else: __lowerCAmelCase = str(int(match[2]) - num_meta4D_last_stage) __lowerCAmelCase = trimmed_name.replace('''network''', '''meta3D_layers.blocks.''' + layer_index) if "norm1" in old_name: __lowerCAmelCase = trimmed_name.replace('''norm1''', '''layernorm1''') elif "norm2" in old_name: __lowerCAmelCase = trimmed_name.replace('''norm2''', '''layernorm2''') elif "fc1" in old_name: __lowerCAmelCase = trimmed_name.replace('''fc1''', '''linear_in''') elif "fc2" in old_name: __lowerCAmelCase = trimmed_name.replace('''fc2''', '''linear_out''') __lowerCAmelCase = '''last_stage.''' + trimmed_name elif "network" in old_name and re.search(r'''.\d.''', lowerCamelCase): __lowerCAmelCase = old_name.replace('''network''', '''intermediate_stages''') if "fc" in new_name: __lowerCAmelCase = new_name.replace('''fc''', '''convolution''') elif ("norm1" in new_name) and ("layernorm1" not in new_name): __lowerCAmelCase = new_name.replace('''norm1''', '''batchnorm_before''') elif ("norm2" in new_name) and ("layernorm2" not in new_name): __lowerCAmelCase = new_name.replace('''norm2''', '''batchnorm_after''') if "proj" in new_name: __lowerCAmelCase = new_name.replace('''proj''', '''projection''') if "dist_head" in new_name: __lowerCAmelCase = new_name.replace('''dist_head''', '''distillation_classifier''') elif "head" in new_name: __lowerCAmelCase = new_name.replace('''head''', '''classifier''') elif "patch_embed" in new_name: __lowerCAmelCase = '''efficientformer.''' + new_name elif new_name == "norm.weight" or new_name == "norm.bias": __lowerCAmelCase = new_name.replace('''norm''', '''layernorm''') __lowerCAmelCase = '''efficientformer.''' + new_name else: __lowerCAmelCase = '''efficientformer.encoder.''' + new_name return new_name def __magic_name__( lowerCamelCase, lowerCamelCase): for key in checkpoint.copy().keys(): __lowerCAmelCase = checkpoint.pop(lowerCamelCase) __lowerCAmelCase = val return checkpoint def __magic_name__( ): __lowerCAmelCase = '''http://images.cocodataset.org/val2017/000000039769.jpg''' __lowerCAmelCase = Image.open(requests.get(lowerCamelCase, stream=lowerCamelCase).raw) return image def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase): __lowerCAmelCase = torch.load(lowerCamelCase, map_location='''cpu''')['''model'''] __lowerCAmelCase = EfficientFormerConfig.from_json_file(lowerCamelCase) __lowerCAmelCase = EfficientFormerForImageClassificationWithTeacher(lowerCamelCase) __lowerCAmelCase = '''_'''.join(checkpoint_path.split('''/''')[-1].split('''.''')[0].split('''_''')[:-1]) __lowerCAmelCase = config.depths[-1] - config.num_metaad_blocks + 1 __lowerCAmelCase = convert_torch_checkpoint(lowerCamelCase, lowerCamelCase) model.load_state_dict(lowerCamelCase) model.eval() __lowerCAmelCase = { '''bilinear''': PILImageResampling.BILINEAR, '''bicubic''': PILImageResampling.BICUBIC, '''nearest''': PILImageResampling.NEAREST, } # prepare image __lowerCAmelCase = prepare_img() __lowerCAmelCase = 2_5_6 __lowerCAmelCase = 2_2_4 __lowerCAmelCase = EfficientFormerImageProcessor( size={'''shortest_edge''': image_size}, crop_size={'''height''': crop_size, '''width''': crop_size}, resample=pillow_resamplings['''bicubic'''], ) __lowerCAmelCase = processor(images=lowerCamelCase, return_tensors='''pt''').pixel_values # original processing pipeline __lowerCAmelCase = Compose( [ Resize(lowerCamelCase, interpolation=pillow_resamplings['''bicubic''']), CenterCrop(lowerCamelCase), ToTensor(), Normalize(lowerCamelCase, lowerCamelCase), ]) __lowerCAmelCase = image_transforms(lowerCamelCase).unsqueeze(0) assert torch.allclose(lowerCamelCase, lowerCamelCase) __lowerCAmelCase = model(lowerCamelCase) __lowerCAmelCase = outputs.logits __lowerCAmelCase = (1, 1_0_0_0) if "l1" in model_name: __lowerCAmelCase = torch.Tensor( [-0.13_12, 0.43_53, -1.04_99, -0.51_24, 0.41_83, -0.67_93, -1.37_77, -0.08_93, -0.73_58, -2.43_28]) assert torch.allclose(logits[0, :1_0], lowerCamelCase, atol=1E-3) assert logits.shape == expected_shape elif "l3" in model_name: __lowerCAmelCase = torch.Tensor( [-1.31_50, -1.54_56, -1.25_56, -0.84_96, -0.71_27, -0.78_97, -0.97_28, -0.30_52, 0.37_51, -0.31_27]) assert torch.allclose(logits[0, :1_0], lowerCamelCase, atol=1E-3) assert logits.shape == expected_shape elif "l7" in model_name: __lowerCAmelCase = torch.Tensor( [-1.02_83, -1.41_31, -0.56_44, -1.31_15, -0.57_85, -1.20_49, -0.75_28, 0.19_92, -0.38_22, -0.08_78]) assert logits.shape == expected_shape else: raise ValueError( F"""Unknown model checkpoint: {checkpoint_path}. Supported version of efficientformer are l1, l3 and l7""") # Save Checkpoints Path(lowerCamelCase).mkdir(exist_ok=lowerCamelCase) model.save_pretrained(lowerCamelCase) print(F"""Checkpoint successfuly converted. Model saved at {pytorch_dump_path}""") processor.save_pretrained(lowerCamelCase) print(F"""Processor successfuly saved at {pytorch_dump_path}""") if push_to_hub: print('''Pushing model to the hub...''') model.push_to_hub( repo_id=F"""Bearnardd/{pytorch_dump_path}""", commit_message='''Add model''', use_temp_dir=lowerCamelCase, ) processor.push_to_hub( repo_id=F"""Bearnardd/{pytorch_dump_path}""", commit_message='''Add image processor''', use_temp_dir=lowerCamelCase, ) if __name__ == "__main__": _UpperCAmelCase : List[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( """--pytorch_model_path""", default=None, type=str, required=True, help="""Path to EfficientFormer pytorch checkpoint.""", ) parser.add_argument( """--config_file""", default=None, type=str, required=True, help="""The json file for EfficientFormer model config.""", ) parser.add_argument( """--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Push model and image processor to the hub""") parser.add_argument( """--no-push_to_hub""", dest="""push_to_hub""", action="""store_false""", help="""Do not push model and image processor to the hub""", ) parser.set_defaults(push_to_hub=True) _UpperCAmelCase : List[str] = parser.parse_args() convert_efficientformer_checkpoint( checkpoint_path=args.pytorch_model_path, efficientformer_config_file=args.config_file, pytorch_dump_path=args.pytorch_dump_path, push_to_hub=args.push_to_hub, )
9
1
'''simple docstring''' import numpy as np from scipy.spatial.distance import cdist from sklearn.metrics import fa_score import datasets _UpperCAmelCase : Tuple = """\ @inproceedings{kakwani2020indicnlpsuite, title={{IndicNLPSuite: Monolingual Corpora, Evaluation Benchmarks and Pre-trained Multilingual Language Models for Indian Languages}}, author={Divyanshu Kakwani and Anoop Kunchukuttan and Satish Golla and Gokul N.C. and Avik Bhattacharyya and Mitesh M. Khapra and Pratyush Kumar}, year={2020}, booktitle={Findings of EMNLP}, } """ _UpperCAmelCase : Tuple = """\ IndicGLUE is a natural language understanding benchmark for Indian languages. It contains a wide variety of tasks and covers 11 major Indian languages - as, bn, gu, hi, kn, ml, mr, or, pa, ta, te. """ _UpperCAmelCase : Union[str, Any] = """ Compute IndicGLUE evaluation metric associated to each IndicGLUE dataset. Args: predictions: list of predictions to score (as int64), except for 'cvit-mkb-clsr' where each prediction is a vector (of float32). references: list of ground truth labels corresponding to the predictions (as int64), except for 'cvit-mkb-clsr' where each reference is a vector (of float32). Returns: depending on the IndicGLUE subset, one or several of: \"accuracy\": Accuracy \"f1\": F1 score \"precision\": Precision@10 Examples: >>> indic_glue_metric = datasets.load_metric('indic_glue', 'wnli') # 'wnli' or any of [\"copa\", \"sna\", \"csqa\", \"wstp\", \"inltkh\", \"bbca\", \"iitp-mr\", \"iitp-pr\", \"actsa-sc\", \"md\"] >>> references = [0, 1] >>> predictions = [0, 1] >>> results = indic_glue_metric.compute(predictions=predictions, references=references) >>> print(results) {'accuracy': 1.0} >>> indic_glue_metric = datasets.load_metric('indic_glue', 'wiki-ner') >>> references = [0, 1] >>> predictions = [0, 1] >>> results = indic_glue_metric.compute(predictions=predictions, references=references) >>> print(results) {'accuracy': 1.0, 'f1': 1.0} >>> indic_glue_metric = datasets.load_metric('indic_glue', 'cvit-mkb-clsr') >>> references = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]] >>> predictions = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]] >>> results = indic_glue_metric.compute(predictions=predictions, references=references) >>> print(results) {'precision@10': 1.0} """ def __magic_name__( lowerCamelCase, lowerCamelCase): return float((preds == labels).mean()) def __magic_name__( lowerCamelCase, lowerCamelCase): __lowerCAmelCase = simple_accuracy(lowerCamelCase, lowerCamelCase) __lowerCAmelCase = float(fa_score(y_true=lowerCamelCase, y_pred=lowerCamelCase)) return { "accuracy": acc, "f1": fa, } def __magic_name__( lowerCamelCase, lowerCamelCase): __lowerCAmelCase = np.array(lowerCamelCase) __lowerCAmelCase = np.array(lowerCamelCase) __lowerCAmelCase = en_sentvecs.shape[0] # mean centering __lowerCAmelCase = en_sentvecs - np.mean(lowerCamelCase, axis=0) __lowerCAmelCase = in_sentvecs - np.mean(lowerCamelCase, axis=0) __lowerCAmelCase = cdist(lowerCamelCase, lowerCamelCase, '''cosine''') __lowerCAmelCase = np.array(range(lowerCamelCase)) __lowerCAmelCase = sim.argsort(axis=1)[:, :1_0] __lowerCAmelCase = np.any(preds == actual[:, None], axis=1) return float(matches.mean()) @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class a__ ( datasets.Metric ): """simple docstring""" def _snake_case (self ): if self.config_name not in [ "wnli", "copa", "sna", "csqa", "wstp", "inltkh", "bbca", "cvit-mkb-clsr", "iitp-mr", "iitp-pr", "actsa-sc", "md", "wiki-ner", ]: raise KeyError( '''You should supply a configuration name selected in ''' '''["wnli", "copa", "sna", "csqa", "wstp", "inltkh", "bbca", ''' '''"cvit-mkb-clsr", "iitp-mr", "iitp-pr", "actsa-sc", "md", ''' '''"wiki-ner"]''' ) return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Value('''int64''' ) if self.config_name != '''cvit-mkb-clsr''' else datasets.Sequence(datasets.Value('''float32''' ) ), '''references''': datasets.Value('''int64''' ) if self.config_name != '''cvit-mkb-clsr''' else datasets.Sequence(datasets.Value('''float32''' ) ), } ) , codebase_urls=[] , reference_urls=[] , format='''numpy''' if self.config_name != '''cvit-mkb-clsr''' else None , ) def _snake_case (self , __lowercase , __lowercase ): if self.config_name == "cvit-mkb-clsr": return {"precision@10": precision_at_aa(__lowercase , __lowercase )} elif self.config_name in ["wiki-ner"]: return acc_and_fa(__lowercase , __lowercase ) elif self.config_name in [ "wnli", "copa", "sna", "csqa", "wstp", "inltkh", "bbca", "iitp-mr", "iitp-pr", "actsa-sc", "md", ]: return {"accuracy": simple_accuracy(__lowercase , __lowercase )} else: raise KeyError( '''You should supply a configuration name selected in ''' '''["wnli", "copa", "sna", "csqa", "wstp", "inltkh", "bbca", ''' '''"cvit-mkb-clsr", "iitp-mr", "iitp-pr", "actsa-sc", "md", ''' '''"wiki-ner"]''' )
9
'''simple docstring''' from __future__ import annotations import math def __magic_name__( lowerCamelCase, lowerCamelCase): if len(lowerCamelCase) != 2 or len(a[0]) != 2 or len(lowerCamelCase) != 2 or len(b[0]) != 2: raise Exception('''Matrices are not 2x2''') __lowerCAmelCase = [ [a[0][0] * b[0][0] + a[0][1] * b[1][0], a[0][0] * b[0][1] + a[0][1] * b[1][1]], [a[1][0] * b[0][0] + a[1][1] * b[1][0], a[1][0] * b[0][1] + a[1][1] * b[1][1]], ] return new_matrix def __magic_name__( lowerCamelCase, lowerCamelCase): return [ [matrix_a[row][col] + matrix_b[row][col] for col in range(len(matrix_a[row]))] for row in range(len(lowerCamelCase)) ] def __magic_name__( lowerCamelCase, lowerCamelCase): return [ [matrix_a[row][col] - matrix_b[row][col] for col in range(len(matrix_a[row]))] for row in range(len(lowerCamelCase)) ] def __magic_name__( lowerCamelCase): if len(lowerCamelCase) % 2 != 0 or len(a[0]) % 2 != 0: raise Exception('''Odd matrices are not supported!''') __lowerCAmelCase = len(lowerCamelCase) __lowerCAmelCase = matrix_length // 2 __lowerCAmelCase = [[a[i][j] for j in range(lowerCamelCase, lowerCamelCase)] for i in range(lowerCamelCase)] __lowerCAmelCase = [ [a[i][j] for j in range(lowerCamelCase, lowerCamelCase)] for i in range(lowerCamelCase, lowerCamelCase) ] __lowerCAmelCase = [[a[i][j] for j in range(lowerCamelCase)] for i in range(lowerCamelCase)] __lowerCAmelCase = [[a[i][j] for j in range(lowerCamelCase)] for i in range(lowerCamelCase, lowerCamelCase)] return top_left, top_right, bot_left, bot_right def __magic_name__( lowerCamelCase): return len(lowerCamelCase), len(matrix[0]) def __magic_name__( lowerCamelCase): print('''\n'''.join(str(lowerCamelCase) for line in matrix)) def __magic_name__( lowerCamelCase, lowerCamelCase): if matrix_dimensions(lowerCamelCase) == (2, 2): return default_matrix_multiplication(lowerCamelCase, lowerCamelCase) __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = split_matrix(lowerCamelCase) __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = split_matrix(lowerCamelCase) __lowerCAmelCase = actual_strassen(lowerCamelCase, matrix_subtraction(lowerCamelCase, lowerCamelCase)) __lowerCAmelCase = actual_strassen(matrix_addition(lowerCamelCase, lowerCamelCase), lowerCamelCase) __lowerCAmelCase = actual_strassen(matrix_addition(lowerCamelCase, lowerCamelCase), lowerCamelCase) __lowerCAmelCase = actual_strassen(lowerCamelCase, matrix_subtraction(lowerCamelCase, lowerCamelCase)) __lowerCAmelCase = actual_strassen(matrix_addition(lowerCamelCase, lowerCamelCase), matrix_addition(lowerCamelCase, lowerCamelCase)) __lowerCAmelCase = actual_strassen(matrix_subtraction(lowerCamelCase, lowerCamelCase), matrix_addition(lowerCamelCase, lowerCamelCase)) __lowerCAmelCase = actual_strassen(matrix_subtraction(lowerCamelCase, lowerCamelCase), matrix_addition(lowerCamelCase, lowerCamelCase)) __lowerCAmelCase = matrix_addition(matrix_subtraction(matrix_addition(lowerCamelCase, lowerCamelCase), lowerCamelCase), lowerCamelCase) __lowerCAmelCase = matrix_addition(lowerCamelCase, lowerCamelCase) __lowerCAmelCase = matrix_addition(lowerCamelCase, lowerCamelCase) __lowerCAmelCase = matrix_subtraction(matrix_subtraction(matrix_addition(lowerCamelCase, lowerCamelCase), lowerCamelCase), lowerCamelCase) # construct the new matrix from our 4 quadrants __lowerCAmelCase = [] for i in range(len(lowerCamelCase)): new_matrix.append(top_left[i] + top_right[i]) for i in range(len(lowerCamelCase)): new_matrix.append(bot_left[i] + bot_right[i]) return new_matrix def __magic_name__( lowerCamelCase, lowerCamelCase): if matrix_dimensions(lowerCamelCase)[1] != matrix_dimensions(lowerCamelCase)[0]: __lowerCAmelCase = ( '''Unable to multiply these matrices, please check the dimensions.\n''' F"""Matrix A: {matrixa}\n""" F"""Matrix B: {matrixa}""" ) raise Exception(lowerCamelCase) __lowerCAmelCase = matrix_dimensions(lowerCamelCase) __lowerCAmelCase = matrix_dimensions(lowerCamelCase) if dimensiona[0] == dimensiona[1] and dimensiona[0] == dimensiona[1]: return [matrixa, matrixa] __lowerCAmelCase = max(*lowerCamelCase, *lowerCamelCase) __lowerCAmelCase = int(math.pow(2, math.ceil(math.loga(lowerCamelCase)))) __lowerCAmelCase = matrixa __lowerCAmelCase = matrixa # Adding zeros to the matrices so that the arrays dimensions are the same and also # power of 2 for i in range(0, lowerCamelCase): if i < dimensiona[0]: for _ in range(dimensiona[1], lowerCamelCase): new_matrixa[i].append(0) else: new_matrixa.append([0] * maxim) if i < dimensiona[0]: for _ in range(dimensiona[1], lowerCamelCase): new_matrixa[i].append(0) else: new_matrixa.append([0] * maxim) __lowerCAmelCase = actual_strassen(lowerCamelCase, lowerCamelCase) # Removing the additional zeros for i in range(0, lowerCamelCase): if i < dimensiona[0]: for _ in range(dimensiona[1], lowerCamelCase): final_matrix[i].pop() else: final_matrix.pop() return final_matrix if __name__ == "__main__": _UpperCAmelCase : List[str] = [ [2, 3, 4, 5], [6, 4, 3, 1], [2, 3, 6, 7], [3, 1, 2, 4], [2, 3, 4, 5], [6, 4, 3, 1], [2, 3, 6, 7], [3, 1, 2, 4], [2, 3, 4, 5], [6, 2, 3, 1], ] _UpperCAmelCase : Optional[Any] = [[0, 2, 1, 1], [1_6, 2, 3, 3], [2, 2, 7, 7], [1_3, 1_1, 2_2, 4]] print(strassen(matrixa, matrixa))
9
1
'''simple docstring''' import argparse import datetime def __magic_name__( lowerCamelCase): __lowerCAmelCase = { '''0''': '''Sunday''', '''1''': '''Monday''', '''2''': '''Tuesday''', '''3''': '''Wednesday''', '''4''': '''Thursday''', '''5''': '''Friday''', '''6''': '''Saturday''', } __lowerCAmelCase = {0: 1, 1: 2, 2: 3, 3: 4, 4: 5, 5: 6, 6: 0} # Validate if not 0 < len(lowerCamelCase) < 1_1: raise ValueError('''Must be 10 characters long''') # Get month __lowerCAmelCase = int(date_input[0] + date_input[1]) # Validate if not 0 < m < 1_3: raise ValueError('''Month must be between 1 - 12''') __lowerCAmelCase = date_input[2] # Validate if sep_a not in ["-", "/"]: raise ValueError('''Date separator must be \'-\' or \'/\'''') # Get day __lowerCAmelCase = int(date_input[3] + date_input[4]) # Validate if not 0 < d < 3_2: raise ValueError('''Date must be between 1 - 31''') # Get second separator __lowerCAmelCase = date_input[5] # Validate if sep_a not in ["-", "/"]: raise ValueError('''Date separator must be \'-\' or \'/\'''') # Get year __lowerCAmelCase = int(date_input[6] + date_input[7] + date_input[8] + date_input[9]) # Arbitrary year range if not 4_5 < y < 8_5_0_0: raise ValueError( '''Year out of range. There has to be some sort of limit...right?''') # Get datetime obj for validation __lowerCAmelCase = datetime.date(int(lowerCamelCase), int(lowerCamelCase), int(lowerCamelCase)) # Start math if m <= 2: __lowerCAmelCase = y - 1 __lowerCAmelCase = m + 1_2 # maths var __lowerCAmelCase = int(str(lowerCamelCase)[:2]) __lowerCAmelCase = int(str(lowerCamelCase)[2:]) __lowerCAmelCase = int(2.6 * m - 5.39) __lowerCAmelCase = int(c / 4) __lowerCAmelCase = int(k / 4) __lowerCAmelCase = int(d + k) __lowerCAmelCase = int(t + u + v + x) __lowerCAmelCase = int(z - (2 * c)) __lowerCAmelCase = round(w % 7) # End math # Validate math if f != convert_datetime_days[dt_ck.weekday()]: raise AssertionError('''The date was evaluated incorrectly. Contact developer.''') # Response __lowerCAmelCase = F"""Your date {date_input}, is a {days[str(lowerCamelCase)]}!""" return response if __name__ == "__main__": import doctest doctest.testmod() _UpperCAmelCase : List[str] = argparse.ArgumentParser( description=( """Find out what day of the week nearly any date is or was. Enter """ """date as a string in the mm-dd-yyyy or mm/dd/yyyy format""" ) ) parser.add_argument( """date_input""", type=str, help="""Date as a string (mm-dd-yyyy or mm/dd/yyyy)""" ) _UpperCAmelCase : Dict = parser.parse_args() zeller(args.date_input)
9
'''simple docstring''' import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import CLIPTokenizer, CLIPTokenizerFast from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import CLIPImageProcessor, CLIPProcessor @require_vision class a__ ( unittest.TestCase ): """simple docstring""" def _snake_case (self ): __lowerCAmelCase = tempfile.mkdtemp() # fmt: off __lowerCAmelCase = ['''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''lo''', '''l</w>''', '''w</w>''', '''r</w>''', '''t</w>''', '''low</w>''', '''er</w>''', '''lowest</w>''', '''newer</w>''', '''wider''', '''<unk>''', '''<|startoftext|>''', '''<|endoftext|>'''] # fmt: on __lowerCAmelCase = dict(zip(__lowercase , range(len(__lowercase ) ) ) ) __lowerCAmelCase = ['''#version: 0.2''', '''l o''', '''lo w</w>''', '''e r</w>''', ''''''] __lowerCAmelCase = {'''unk_token''': '''<unk>'''} __lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) __lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(__lowercase ) + '''\n''' ) with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write('''\n'''.join(__lowercase ) ) __lowerCAmelCase = { '''do_resize''': True, '''size''': 20, '''do_center_crop''': True, '''crop_size''': 18, '''do_normalize''': True, '''image_mean''': [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3], '''image_std''': [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1], } __lowerCAmelCase = os.path.join(self.tmpdirname , __lowercase ) with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp: json.dump(__lowercase , __lowercase ) def _snake_case (self , **__lowercase ): return CLIPTokenizer.from_pretrained(self.tmpdirname , **__lowercase ) def _snake_case (self , **__lowercase ): return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **__lowercase ) def _snake_case (self , **__lowercase ): return CLIPImageProcessor.from_pretrained(self.tmpdirname , **__lowercase ) def _snake_case (self ): shutil.rmtree(self.tmpdirname ) def _snake_case (self ): __lowerCAmelCase = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )] __lowerCAmelCase = [Image.fromarray(np.moveaxis(__lowercase , 0 , -1 ) ) for x in image_inputs] return image_inputs def _snake_case (self ): __lowerCAmelCase = self.get_tokenizer() __lowerCAmelCase = self.get_rust_tokenizer() __lowerCAmelCase = self.get_image_processor() __lowerCAmelCase = CLIPProcessor(tokenizer=__lowercase , image_processor=__lowercase ) processor_slow.save_pretrained(self.tmpdirname ) __lowerCAmelCase = CLIPProcessor.from_pretrained(self.tmpdirname , use_fast=__lowercase ) __lowerCAmelCase = CLIPProcessor(tokenizer=__lowercase , image_processor=__lowercase ) processor_fast.save_pretrained(self.tmpdirname ) __lowerCAmelCase = CLIPProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() ) self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() ) self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() ) self.assertIsInstance(processor_slow.tokenizer , __lowercase ) self.assertIsInstance(processor_fast.tokenizer , __lowercase ) self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor_slow.image_processor , __lowercase ) self.assertIsInstance(processor_fast.image_processor , __lowercase ) def _snake_case (self ): __lowerCAmelCase = CLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) __lowerCAmelCase = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' ) __lowerCAmelCase = self.get_image_processor(do_normalize=__lowercase , padding_value=1.0 ) __lowerCAmelCase = CLIPProcessor.from_pretrained( self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=__lowercase , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , __lowercase ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , __lowercase ) def _snake_case (self ): __lowerCAmelCase = self.get_image_processor() __lowerCAmelCase = self.get_tokenizer() __lowerCAmelCase = CLIPProcessor(tokenizer=__lowercase , image_processor=__lowercase ) __lowerCAmelCase = self.prepare_image_inputs() __lowerCAmelCase = image_processor(__lowercase , return_tensors='''np''' ) __lowerCAmelCase = processor(images=__lowercase , return_tensors='''np''' ) for key in input_image_proc.keys(): self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 ) def _snake_case (self ): __lowerCAmelCase = self.get_image_processor() __lowerCAmelCase = self.get_tokenizer() __lowerCAmelCase = CLIPProcessor(tokenizer=__lowercase , image_processor=__lowercase ) __lowerCAmelCase = '''lower newer''' __lowerCAmelCase = processor(text=__lowercase ) __lowerCAmelCase = tokenizer(__lowercase ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def _snake_case (self ): __lowerCAmelCase = self.get_image_processor() __lowerCAmelCase = self.get_tokenizer() __lowerCAmelCase = CLIPProcessor(tokenizer=__lowercase , image_processor=__lowercase ) __lowerCAmelCase = '''lower newer''' __lowerCAmelCase = self.prepare_image_inputs() __lowerCAmelCase = processor(text=__lowercase , images=__lowercase ) self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask''', '''pixel_values'''] ) # test if it raises when no input is passed with pytest.raises(__lowercase ): processor() def _snake_case (self ): __lowerCAmelCase = self.get_image_processor() __lowerCAmelCase = self.get_tokenizer() __lowerCAmelCase = CLIPProcessor(tokenizer=__lowercase , image_processor=__lowercase ) __lowerCAmelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] __lowerCAmelCase = processor.batch_decode(__lowercase ) __lowerCAmelCase = tokenizer.batch_decode(__lowercase ) self.assertListEqual(__lowercase , __lowercase ) def _snake_case (self ): __lowerCAmelCase = self.get_image_processor() __lowerCAmelCase = self.get_tokenizer() __lowerCAmelCase = CLIPProcessor(tokenizer=__lowercase , image_processor=__lowercase ) __lowerCAmelCase = '''lower newer''' __lowerCAmelCase = self.prepare_image_inputs() __lowerCAmelCase = processor(text=__lowercase , images=__lowercase ) self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
9
1
'''simple docstring''' import numpy as np from cva import COLOR_BGR2GRAY, cvtColor, imread from numpy import array, uinta from PIL import Image from digital_image_processing import change_contrast as cc from digital_image_processing import convert_to_negative as cn from digital_image_processing import sepia as sp from digital_image_processing.dithering import burkes as bs from digital_image_processing.edge_detection import canny from digital_image_processing.filters import convolve as conv from digital_image_processing.filters import gaussian_filter as gg from digital_image_processing.filters import local_binary_pattern as lbp from digital_image_processing.filters import median_filter as med from digital_image_processing.filters import sobel_filter as sob from digital_image_processing.resize import resize as rs _UpperCAmelCase : List[Any] = imread(r"""digital_image_processing/image_data/lena_small.jpg""") _UpperCAmelCase : Optional[Any] = cvtColor(img, COLOR_BGR2GRAY) def __magic_name__( ): __lowerCAmelCase = cn.convert_to_negative(lowerCamelCase) # assert negative_img array for at least one True assert negative_img.any() def __magic_name__( ): with Image.open('''digital_image_processing/image_data/lena_small.jpg''') as img: # Work around assertion for response assert str(cc.change_contrast(lowerCamelCase, 1_1_0)).startswith( '''<PIL.Image.Image image mode=RGB size=100x100 at''') def __magic_name__( ): __lowerCAmelCase = canny.gen_gaussian_kernel(9, sigma=1.4) # Assert ambiguous array assert resp.all() def __magic_name__( ): __lowerCAmelCase = imread('''digital_image_processing/image_data/lena_small.jpg''', 0) # assert ambiguous array for all == True assert canny_img.all() __lowerCAmelCase = canny.canny(lowerCamelCase) # assert canny array for at least one True assert canny_array.any() def __magic_name__( ): assert gg.gaussian_filter(lowerCamelCase, 5, sigma=0.9).all() def __magic_name__( ): # laplace diagonals __lowerCAmelCase = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]]) __lowerCAmelCase = conv.img_convolve(lowerCamelCase, lowerCamelCase).astype(lowerCamelCase) assert res.any() def __magic_name__( ): assert med.median_filter(lowerCamelCase, 3).any() def __magic_name__( ): __lowerCAmelCase , __lowerCAmelCase = sob.sobel_filter(lowerCamelCase) assert grad.any() and theta.any() def __magic_name__( ): __lowerCAmelCase = sp.make_sepia(lowerCamelCase, 2_0) assert sepia.all() def __magic_name__( lowerCamelCase = "digital_image_processing/image_data/lena_small.jpg"): __lowerCAmelCase = bs.Burkes(imread(lowerCamelCase, 1), 1_2_0) burkes.process() assert burkes.output_img.any() def __magic_name__( lowerCamelCase = "digital_image_processing/image_data/lena_small.jpg", ): __lowerCAmelCase = rs.NearestNeighbour(imread(lowerCamelCase, 1), 4_0_0, 2_0_0) nn.process() assert nn.output.any() def __magic_name__( ): __lowerCAmelCase = '''digital_image_processing/image_data/lena.jpg''' # Reading the image and converting it to grayscale. __lowerCAmelCase = imread(lowerCamelCase, 0) # Test for get_neighbors_pixel function() return not None __lowerCAmelCase = 0 __lowerCAmelCase = 0 __lowerCAmelCase = image[x_coordinate][y_coordinate] __lowerCAmelCase = lbp.get_neighbors_pixel( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase) assert neighbors_pixels is not None # Test for local_binary_pattern function() # Create a numpy array as the same height and width of read image __lowerCAmelCase = np.zeros((image.shape[0], image.shape[1])) # Iterating through the image and calculating the local binary pattern value # for each pixel. for i in range(0, image.shape[0]): for j in range(0, image.shape[1]): __lowerCAmelCase = lbp.local_binary_value(lowerCamelCase, lowerCamelCase, lowerCamelCase) assert lbp_image.any()
9
'''simple docstring''' from typing import Callable, Dict, Optional, Tuple import torch from torch import nn from torch.distributions import ( AffineTransform, Distribution, Independent, NegativeBinomial, Normal, StudentT, TransformedDistribution, ) class a__ ( __A ): """simple docstring""" def __init__(self , __lowercase , __lowercase=None , __lowercase=None , __lowercase=0 ): __lowerCAmelCase = 1.0 if scale is None else scale __lowerCAmelCase = 0.0 if loc is None else loc super().__init__(__lowercase , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=__lowercase )] ) @property def _snake_case (self ): return self.base_dist.mean * self.scale + self.loc @property def _snake_case (self ): return self.base_dist.variance * self.scale**2 @property def _snake_case (self ): return self.variance.sqrt() class a__ ( nn.Module ): """simple docstring""" def __init__(self , __lowercase , __lowercase , __lowercase , **__lowercase ): super().__init__(**__lowercase ) __lowerCAmelCase = args_dim __lowerCAmelCase = nn.ModuleList([nn.Linear(__lowercase , __lowercase ) for dim in args_dim.values()] ) __lowerCAmelCase = domain_map def _snake_case (self , __lowercase ): __lowerCAmelCase = [proj(__lowercase ) for proj in self.proj] return self.domain_map(*__lowercase ) class a__ ( nn.Module ): """simple docstring""" def __init__(self , __lowercase ): super().__init__() __lowerCAmelCase = function def _snake_case (self , __lowercase , *__lowercase ): return self.function(__lowercase , *__lowercase ) class a__ : """simple docstring""" __UpperCamelCase : type __UpperCamelCase : int __UpperCamelCase : Dict[str, int] def __init__(self , __lowercase = 1 ): __lowerCAmelCase = dim __lowerCAmelCase = {k: dim * self.args_dim[k] for k in self.args_dim} def _snake_case (self , __lowercase ): if self.dim == 1: return self.distribution_class(*__lowercase ) else: return Independent(self.distribution_class(*__lowercase ) , 1 ) def _snake_case (self , __lowercase , __lowercase = None , __lowercase = None , ): __lowerCAmelCase = self._base_distribution(__lowercase ) if loc is None and scale is None: return distr else: return AffineTransformed(__lowercase , loc=__lowercase , scale=__lowercase , event_dim=self.event_dim ) @property def _snake_case (self ): return () if self.dim == 1 else (self.dim,) @property def _snake_case (self ): return len(self.event_shape ) @property def _snake_case (self ): return 0.0 def _snake_case (self , __lowercase ): return ParameterProjection( in_features=__lowercase , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map ) , ) def _snake_case (self , *__lowercase ): raise NotImplementedError() @staticmethod def _snake_case (__lowercase ): return (x + torch.sqrt(torch.square(__lowercase ) + 4.0 )) / 2.0 class a__ ( __A ): """simple docstring""" __UpperCamelCase : Dict[str, int] = {"df": 1, "loc": 1, "scale": 1} __UpperCamelCase : type = StudentT @classmethod def _snake_case (cls , __lowercase , __lowercase , __lowercase ): __lowerCAmelCase = cls.squareplus(__lowercase ).clamp_min(torch.finfo(scale.dtype ).eps ) __lowerCAmelCase = 2.0 + cls.squareplus(__lowercase ) return df.squeeze(-1 ), loc.squeeze(-1 ), scale.squeeze(-1 ) class a__ ( __A ): """simple docstring""" __UpperCamelCase : Dict[str, int] = {"loc": 1, "scale": 1} __UpperCamelCase : type = Normal @classmethod def _snake_case (cls , __lowercase , __lowercase ): __lowerCAmelCase = cls.squareplus(__lowercase ).clamp_min(torch.finfo(scale.dtype ).eps ) return loc.squeeze(-1 ), scale.squeeze(-1 ) class a__ ( __A ): """simple docstring""" __UpperCamelCase : Dict[str, int] = {"total_count": 1, "logits": 1} __UpperCamelCase : type = NegativeBinomial @classmethod def _snake_case (cls , __lowercase , __lowercase ): __lowerCAmelCase = cls.squareplus(__lowercase ) return total_count.squeeze(-1 ), logits.squeeze(-1 ) def _snake_case (self , __lowercase ): __lowerCAmelCase , __lowerCAmelCase = distr_args if self.dim == 1: return self.distribution_class(total_count=__lowercase , logits=__lowercase ) else: return Independent(self.distribution_class(total_count=__lowercase , logits=__lowercase ) , 1 ) def _snake_case (self , __lowercase , __lowercase = None , __lowercase = None ): __lowerCAmelCase , __lowerCAmelCase = distr_args if scale is not None: # See scaling property of Gamma. logits += scale.log() return self._base_distribution((total_count, logits) )
9
1
'''simple docstring''' def __magic_name__( ): return [ a * b * (1_0_0_0 - a - b) for a in range(1, 9_9_9) for b in range(lowerCamelCase, 9_9_9) if (a * a + b * b == (1_0_0_0 - a - b) ** 2) ][0] if __name__ == "__main__": print(f"""{solution() = }""")
9
'''simple docstring''' # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import re from ..models.auto import AutoProcessor from ..models.vision_encoder_decoder import VisionEncoderDecoderModel from ..utils import is_vision_available from .base import PipelineTool if is_vision_available(): from PIL import Image class a__ ( __A ): """simple docstring""" __UpperCamelCase : Tuple = 'naver-clova-ix/donut-base-finetuned-docvqa' __UpperCamelCase : List[str] = ( 'This is a tool that answers a question about an document (pdf). It takes an input named `document` which ' 'should be the document containing the information, as well as a `question` that is the question about the ' 'document. It returns a text that contains the answer to the question.' ) __UpperCamelCase : Optional[int] = 'document_qa' __UpperCamelCase : Optional[int] = AutoProcessor __UpperCamelCase : Tuple = VisionEncoderDecoderModel __UpperCamelCase : Any = ['image', 'text'] __UpperCamelCase : Optional[Any] = ['text'] def __init__(self , *__lowercase , **__lowercase ): if not is_vision_available(): raise ValueError('''Pillow must be installed to use the DocumentQuestionAnsweringTool.''' ) super().__init__(*__lowercase , **__lowercase ) def _snake_case (self , __lowercase , __lowercase ): __lowerCAmelCase = '''<s_docvqa><s_question>{user_input}</s_question><s_answer>''' __lowerCAmelCase = task_prompt.replace('''{user_input}''' , __lowercase ) __lowerCAmelCase = self.pre_processor.tokenizer( __lowercase , add_special_tokens=__lowercase , return_tensors='''pt''' ).input_ids __lowerCAmelCase = self.pre_processor(__lowercase , return_tensors='''pt''' ).pixel_values return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values} def _snake_case (self , __lowercase ): return self.model.generate( inputs['''pixel_values'''].to(self.device ) , decoder_input_ids=inputs['''decoder_input_ids'''].to(self.device ) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=__lowercase , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=__lowercase , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=__lowercase , ).sequences def _snake_case (self , __lowercase ): __lowerCAmelCase = self.pre_processor.batch_decode(__lowercase )[0] __lowerCAmelCase = sequence.replace(self.pre_processor.tokenizer.eos_token , '''''' ) __lowerCAmelCase = sequence.replace(self.pre_processor.tokenizer.pad_token , '''''' ) __lowerCAmelCase = re.sub(R'''<.*?>''' , '''''' , __lowercase , count=1 ).strip() # remove first task start token __lowerCAmelCase = self.pre_processor.tokenajson(__lowercase ) return sequence["answer"]
9
1
'''simple docstring''' def __magic_name__( lowerCamelCase): if not isinstance(lowerCamelCase, lowerCamelCase): __lowerCAmelCase = F"""Input value of [number={number}] must be an integer""" raise TypeError(lowerCamelCase) if number < 0: return False __lowerCAmelCase = number * number while number > 0: if number % 1_0 != number_square % 1_0: return False number //= 1_0 number_square //= 1_0 return True if __name__ == "__main__": import doctest doctest.testmod()
9
'''simple docstring''' def __magic_name__( lowerCamelCase): __lowerCAmelCase = 1 __lowerCAmelCase = 2 while i * i <= n: __lowerCAmelCase = 0 while n % i == 0: n //= i multiplicity += 1 n_divisors *= multiplicity + 1 i += 1 if n > 1: n_divisors *= 2 return n_divisors def __magic_name__( ): __lowerCAmelCase = 1 __lowerCAmelCase = 1 while True: i += 1 t_num += i if count_divisors(lowerCamelCase) > 5_0_0: break return t_num if __name__ == "__main__": print(solution())
9
1
'''simple docstring''' from __future__ import annotations import numpy as np from numpy import floataa from numpy.typing import NDArray def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, ): __lowerCAmelCase , __lowerCAmelCase = coefficient_matrix.shape __lowerCAmelCase , __lowerCAmelCase = constant_matrix.shape if rowsa != colsa: __lowerCAmelCase = F"""Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}""" raise ValueError(lowerCamelCase) if colsa != 1: __lowerCAmelCase = F"""Constant matrix must be nx1 but received {rowsa}x{colsa}""" raise ValueError(lowerCamelCase) if rowsa != rowsa: __lowerCAmelCase = ( '''Coefficient and constant matrices dimensions must be nxn and nx1 but ''' F"""received {rowsa}x{colsa} and {rowsa}x{colsa}""" ) raise ValueError(lowerCamelCase) if len(lowerCamelCase) != rowsa: __lowerCAmelCase = ( '''Number of initial values must be equal to number of rows in coefficient ''' F"""matrix but received {len(lowerCamelCase)} and {rowsa}""" ) raise ValueError(lowerCamelCase) if iterations <= 0: raise ValueError('''Iterations must be at least 1''') __lowerCAmelCase = np.concatenate( (coefficient_matrix, constant_matrix), axis=1) __lowerCAmelCase , __lowerCAmelCase = table.shape strictly_diagonally_dominant(lowerCamelCase) # Iterates the whole matrix for given number of times for _ in range(lowerCamelCase): __lowerCAmelCase = [] for row in range(lowerCamelCase): __lowerCAmelCase = 0 for col in range(lowerCamelCase): if col == row: __lowerCAmelCase = table[row][col] elif col == cols - 1: __lowerCAmelCase = table[row][col] else: temp += (-1) * table[row][col] * init_val[col] __lowerCAmelCase = (temp + val) / denom new_val.append(lowerCamelCase) __lowerCAmelCase = new_val return [float(lowerCamelCase) for i in new_val] def __magic_name__( lowerCamelCase): __lowerCAmelCase , __lowerCAmelCase = table.shape __lowerCAmelCase = True for i in range(0, lowerCamelCase): __lowerCAmelCase = 0 for j in range(0, cols - 1): if i == j: continue else: total += table[i][j] if table[i][i] <= total: raise ValueError('''Coefficient matrix is not strictly diagonally dominant''') return is_diagonally_dominant # Test Cases if __name__ == "__main__": import doctest doctest.testmod()
9
'''simple docstring''' import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch if is_torch_available(): import torch from transformers.generation import DisjunctiveConstraint @require_torch class a__ ( unittest.TestCase ): """simple docstring""" def _snake_case (self ): # For consistency across different places the DisjunctiveConstraint is called, # dc.token_ids is a list of integers. It is also initialized only by integers. __lowerCAmelCase = [[1, 2, 4], [1, 2, 3, 4]] __lowerCAmelCase = DisjunctiveConstraint(__lowercase ) self.assertTrue(isinstance(dc.token_ids , __lowercase ) ) with self.assertRaises(__lowercase ): DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) ) with self.assertRaises(__lowercase ): DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] ) def _snake_case (self ): # We can't have constraints that are complete subsets of another. This leads to a preverse # interpretation of "constraint fulfillment": does generating [1,2,3] fulfill the constraint? # It would mean that it generated [1,2] which fulfills it, but it's in the middle of potentially # fulfilling [1,2,3,4]. If we believe that [1,2,3] does fulfill the constraint, then the algorithm # will necessarily never reach [1,2,3,4], giving users a false sense of control (better to just not allow it). __lowerCAmelCase = [[1, 2], [1, 2, 3, 4]] with self.assertRaises(__lowercase ): DisjunctiveConstraint(__lowercase ) # fails here def _snake_case (self ): __lowerCAmelCase = [[1, 2, 3], [1, 2, 4]] __lowerCAmelCase = DisjunctiveConstraint(__lowercase ) __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = dc.update(1 ) __lowerCAmelCase = stepped is True and completed is False and reset is False self.assertTrue(__lowercase ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1] ) __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = dc.update(2 ) __lowerCAmelCase = stepped is True and completed is False and reset is False self.assertTrue(__lowercase ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2] ) __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = dc.update(3 ) __lowerCAmelCase = stepped is True and completed is True and reset is False self.assertTrue(__lowercase ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.current_seq == [1, 2, 3] ) def _snake_case (self ): __lowerCAmelCase = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]] __lowerCAmelCase = DisjunctiveConstraint(__lowercase ) __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = dc.update(1 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1] ) __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = dc.update(2 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2] ) __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = dc.update(4 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2, 4] ) __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = dc.update(5 ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.current_seq == [1, 2, 4, 5] ) dc.reset() __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = dc.update(1 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.remaining() == 3 ) self.assertTrue(dc.current_seq == [1] ) __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = dc.update(2 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.remaining() == 2 ) self.assertTrue(dc.current_seq == [1, 2] ) __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = dc.update(5 ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.remaining() == 0 ) self.assertTrue(dc.current_seq == [1, 2, 5] )
9
1
'''simple docstring''' from typing import Any, Dict, Optional import torch import torch.nn.functional as F from torch import nn from ..utils import maybe_allow_in_graph from .activations import get_activation from .attention_processor import Attention from .embeddings import CombinedTimestepLabelEmbeddings @maybe_allow_in_graph class a__ ( nn.Module ): """simple docstring""" def __init__(self , __lowercase , __lowercase , __lowercase , __lowercase=0.0 , __lowercase = None , __lowercase = "geglu" , __lowercase = None , __lowercase = False , __lowercase = False , __lowercase = False , __lowercase = False , __lowercase = True , __lowercase = "layer_norm" , __lowercase = False , ): super().__init__() __lowerCAmelCase = only_cross_attention __lowerCAmelCase = (num_embeds_ada_norm is not None) and norm_type == '''ada_norm_zero''' __lowerCAmelCase = (num_embeds_ada_norm is not None) and norm_type == '''ada_norm''' if norm_type in ("ada_norm", "ada_norm_zero") and num_embeds_ada_norm is None: raise ValueError( F"""`norm_type` is set to {norm_type}, but `num_embeds_ada_norm` is not defined. Please make sure to""" F""" define `num_embeds_ada_norm` if setting `norm_type` to {norm_type}.""" ) # Define 3 blocks. Each block has its own normalization layer. # 1. Self-Attn if self.use_ada_layer_norm: __lowerCAmelCase = AdaLayerNorm(__lowercase , __lowercase ) elif self.use_ada_layer_norm_zero: __lowerCAmelCase = AdaLayerNormZero(__lowercase , __lowercase ) else: __lowerCAmelCase = nn.LayerNorm(__lowercase , elementwise_affine=__lowercase ) __lowerCAmelCase = Attention( query_dim=__lowercase , heads=__lowercase , dim_head=__lowercase , dropout=__lowercase , bias=__lowercase , cross_attention_dim=cross_attention_dim if only_cross_attention else None , upcast_attention=__lowercase , ) # 2. Cross-Attn if cross_attention_dim is not None or double_self_attention: # We currently only use AdaLayerNormZero for self attention where there will only be one attention block. # I.e. the number of returned modulation chunks from AdaLayerZero would not make sense if returned during # the second cross attention block. __lowerCAmelCase = ( AdaLayerNorm(__lowercase , __lowercase ) if self.use_ada_layer_norm else nn.LayerNorm(__lowercase , elementwise_affine=__lowercase ) ) __lowerCAmelCase = Attention( query_dim=__lowercase , cross_attention_dim=cross_attention_dim if not double_self_attention else None , heads=__lowercase , dim_head=__lowercase , dropout=__lowercase , bias=__lowercase , upcast_attention=__lowercase , ) # is self-attn if encoder_hidden_states is none else: __lowerCAmelCase = None __lowerCAmelCase = None # 3. Feed-forward __lowerCAmelCase = nn.LayerNorm(__lowercase , elementwise_affine=__lowercase ) __lowerCAmelCase = FeedForward(__lowercase , dropout=__lowercase , activation_fn=__lowercase , final_dropout=__lowercase ) # let chunk size default to None __lowerCAmelCase = None __lowerCAmelCase = 0 def _snake_case (self , __lowercase , __lowercase ): # Sets chunk feed-forward __lowerCAmelCase = chunk_size __lowerCAmelCase = dim def _snake_case (self , __lowercase , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , ): # Notice that normalization is always applied before the real computation in the following blocks. # 1. Self-Attention if self.use_ada_layer_norm: __lowerCAmelCase = self.norma(__lowercase , __lowercase ) elif self.use_ada_layer_norm_zero: __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = self.norma( __lowercase , __lowercase , __lowercase , hidden_dtype=hidden_states.dtype ) else: __lowerCAmelCase = self.norma(__lowercase ) __lowerCAmelCase = cross_attention_kwargs if cross_attention_kwargs is not None else {} __lowerCAmelCase = self.attna( __lowercase , encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None , attention_mask=__lowercase , **__lowercase , ) if self.use_ada_layer_norm_zero: __lowerCAmelCase = gate_msa.unsqueeze(1 ) * attn_output __lowerCAmelCase = attn_output + hidden_states # 2. Cross-Attention if self.attna is not None: __lowerCAmelCase = ( self.norma(__lowercase , __lowercase ) if self.use_ada_layer_norm else self.norma(__lowercase ) ) __lowerCAmelCase = self.attna( __lowercase , encoder_hidden_states=__lowercase , attention_mask=__lowercase , **__lowercase , ) __lowerCAmelCase = attn_output + hidden_states # 3. Feed-forward __lowerCAmelCase = self.norma(__lowercase ) if self.use_ada_layer_norm_zero: __lowerCAmelCase = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None] if self._chunk_size is not None: # "feed_forward_chunk_size" can be used to save memory if norm_hidden_states.shape[self._chunk_dim] % self._chunk_size != 0: raise ValueError( F"""`hidden_states` dimension to be chunked: {norm_hidden_states.shape[self._chunk_dim]} has to be divisible by chunk size: {self._chunk_size}. Make sure to set an appropriate `chunk_size` when calling `unet.enable_forward_chunking`.""" ) __lowerCAmelCase = norm_hidden_states.shape[self._chunk_dim] // self._chunk_size __lowerCAmelCase = torch.cat( [self.ff(__lowercase ) for hid_slice in norm_hidden_states.chunk(__lowercase , dim=self._chunk_dim )] , dim=self._chunk_dim , ) else: __lowerCAmelCase = self.ff(__lowercase ) if self.use_ada_layer_norm_zero: __lowerCAmelCase = gate_mlp.unsqueeze(1 ) * ff_output __lowerCAmelCase = ff_output + hidden_states return hidden_states class a__ ( nn.Module ): """simple docstring""" def __init__(self , __lowercase , __lowercase = None , __lowercase = 4 , __lowercase = 0.0 , __lowercase = "geglu" , __lowercase = False , ): super().__init__() __lowerCAmelCase = int(dim * mult ) __lowerCAmelCase = dim_out if dim_out is not None else dim if activation_fn == "gelu": __lowerCAmelCase = GELU(__lowercase , __lowercase ) if activation_fn == "gelu-approximate": __lowerCAmelCase = GELU(__lowercase , __lowercase , approximate='''tanh''' ) elif activation_fn == "geglu": __lowerCAmelCase = GEGLU(__lowercase , __lowercase ) elif activation_fn == "geglu-approximate": __lowerCAmelCase = ApproximateGELU(__lowercase , __lowercase ) __lowerCAmelCase = nn.ModuleList([] ) # project in self.net.append(__lowercase ) # project dropout self.net.append(nn.Dropout(__lowercase ) ) # project out self.net.append(nn.Linear(__lowercase , __lowercase ) ) # FF as used in Vision Transformer, MLP-Mixer, etc. have a final dropout if final_dropout: self.net.append(nn.Dropout(__lowercase ) ) def _snake_case (self , __lowercase ): for module in self.net: __lowerCAmelCase = module(__lowercase ) return hidden_states class a__ ( nn.Module ): """simple docstring""" def __init__(self , __lowercase , __lowercase , __lowercase = "none" ): super().__init__() __lowerCAmelCase = nn.Linear(__lowercase , __lowercase ) __lowerCAmelCase = approximate def _snake_case (self , __lowercase ): if gate.device.type != "mps": return F.gelu(__lowercase , approximate=self.approximate ) # mps: gelu is not implemented for float16 return F.gelu(gate.to(dtype=torch.floataa ) , approximate=self.approximate ).to(dtype=gate.dtype ) def _snake_case (self , __lowercase ): __lowerCAmelCase = self.proj(__lowercase ) __lowerCAmelCase = self.gelu(__lowercase ) return hidden_states class a__ ( nn.Module ): """simple docstring""" def __init__(self , __lowercase , __lowercase ): super().__init__() __lowerCAmelCase = nn.Linear(__lowercase , dim_out * 2 ) def _snake_case (self , __lowercase ): if gate.device.type != "mps": return F.gelu(__lowercase ) # mps: gelu is not implemented for float16 return F.gelu(gate.to(dtype=torch.floataa ) ).to(dtype=gate.dtype ) def _snake_case (self , __lowercase ): __lowerCAmelCase , __lowerCAmelCase = self.proj(__lowercase ).chunk(2 , dim=-1 ) return hidden_states * self.gelu(__lowercase ) class a__ ( nn.Module ): """simple docstring""" def __init__(self , __lowercase , __lowercase ): super().__init__() __lowerCAmelCase = nn.Linear(__lowercase , __lowercase ) def _snake_case (self , __lowercase ): __lowerCAmelCase = self.proj(__lowercase ) return x * torch.sigmoid(1.7_0_2 * x ) class a__ ( nn.Module ): """simple docstring""" def __init__(self , __lowercase , __lowercase ): super().__init__() __lowerCAmelCase = nn.Embedding(__lowercase , __lowercase ) __lowerCAmelCase = nn.SiLU() __lowerCAmelCase = nn.Linear(__lowercase , embedding_dim * 2 ) __lowerCAmelCase = nn.LayerNorm(__lowercase , elementwise_affine=__lowercase ) def _snake_case (self , __lowercase , __lowercase ): __lowerCAmelCase = self.linear(self.silu(self.emb(__lowercase ) ) ) __lowerCAmelCase , __lowerCAmelCase = torch.chunk(__lowercase , 2 ) __lowerCAmelCase = self.norm(__lowercase ) * (1 + scale) + shift return x class a__ ( nn.Module ): """simple docstring""" def __init__(self , __lowercase , __lowercase ): super().__init__() __lowerCAmelCase = CombinedTimestepLabelEmbeddings(__lowercase , __lowercase ) __lowerCAmelCase = nn.SiLU() __lowerCAmelCase = nn.Linear(__lowercase , 6 * embedding_dim , bias=__lowercase ) __lowerCAmelCase = nn.LayerNorm(__lowercase , elementwise_affine=__lowercase , eps=1e-6 ) def _snake_case (self , __lowercase , __lowercase , __lowercase , __lowercase=None ): __lowerCAmelCase = self.linear(self.silu(self.emb(__lowercase , __lowercase , hidden_dtype=__lowercase ) ) ) __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = emb.chunk(6 , dim=1 ) __lowerCAmelCase = self.norm(__lowercase ) * (1 + scale_msa[:, None]) + shift_msa[:, None] return x, gate_msa, shift_mlp, scale_mlp, gate_mlp class a__ ( nn.Module ): """simple docstring""" def __init__(self , __lowercase , __lowercase , __lowercase , __lowercase = None , __lowercase = 1e-5 ): super().__init__() __lowerCAmelCase = num_groups __lowerCAmelCase = eps if act_fn is None: __lowerCAmelCase = None else: __lowerCAmelCase = get_activation(__lowercase ) __lowerCAmelCase = nn.Linear(__lowercase , out_dim * 2 ) def _snake_case (self , __lowercase , __lowercase ): if self.act: __lowerCAmelCase = self.act(__lowercase ) __lowerCAmelCase = self.linear(__lowercase ) __lowerCAmelCase = emb[:, :, None, None] __lowerCAmelCase , __lowerCAmelCase = emb.chunk(2 , dim=1 ) __lowerCAmelCase = F.group_norm(__lowercase , self.num_groups , eps=self.eps ) __lowerCAmelCase = x * (1 + scale) + shift return x
9
'''simple docstring''' from typing import Dict, Optional import numpy as np import datasets _UpperCAmelCase : List[str] = """ IoU is the area of overlap between the predicted segmentation and the ground truth divided by the area of union between the predicted segmentation and the ground truth. For binary (two classes) or multi-class segmentation, the mean IoU of the image is calculated by taking the IoU of each class and averaging them. """ _UpperCAmelCase : str = """ Args: predictions (`List[ndarray]`): List of predicted segmentation maps, each of shape (height, width). Each segmentation map can be of a different size. references (`List[ndarray]`): List of ground truth segmentation maps, each of shape (height, width). Each segmentation map can be of a different size. num_labels (`int`): Number of classes (categories). ignore_index (`int`): Index that will be ignored during evaluation. nan_to_num (`int`, *optional*): If specified, NaN values will be replaced by the number defined by the user. label_map (`dict`, *optional*): If specified, dictionary mapping old label indices to new label indices. reduce_labels (`bool`, *optional*, defaults to `False`): Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background, and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by 255. Returns: `Dict[str, float | ndarray]` comprising various elements: - *mean_iou* (`float`): Mean Intersection-over-Union (IoU averaged over all categories). - *mean_accuracy* (`float`): Mean accuracy (averaged over all categories). - *overall_accuracy* (`float`): Overall accuracy on all images. - *per_category_accuracy* (`ndarray` of shape `(num_labels,)`): Per category accuracy. - *per_category_iou* (`ndarray` of shape `(num_labels,)`): Per category IoU. Examples: >>> import numpy as np >>> mean_iou = datasets.load_metric(\"mean_iou\") >>> # suppose one has 3 different segmentation maps predicted >>> predicted_1 = np.array([[1, 2], [3, 4], [5, 255]]) >>> actual_1 = np.array([[0, 3], [5, 4], [6, 255]]) >>> predicted_2 = np.array([[2, 7], [9, 2], [3, 6]]) >>> actual_2 = np.array([[1, 7], [9, 2], [3, 6]]) >>> predicted_3 = np.array([[2, 2, 3], [8, 2, 4], [3, 255, 2]]) >>> actual_3 = np.array([[1, 2, 2], [8, 2, 1], [3, 255, 1]]) >>> predicted = [predicted_1, predicted_2, predicted_3] >>> ground_truth = [actual_1, actual_2, actual_3] >>> results = mean_iou.compute(predictions=predicted, references=ground_truth, num_labels=10, ignore_index=255, reduce_labels=False) >>> print(results) # doctest: +NORMALIZE_WHITESPACE {'mean_iou': 0.47750000000000004, 'mean_accuracy': 0.5916666666666666, 'overall_accuracy': 0.5263157894736842, 'per_category_iou': array([0. , 0. , 0.375, 0.4 , 0.5 , 0. , 0.5 , 1. , 1. , 1. ]), 'per_category_accuracy': array([0. , 0. , 0.75 , 0.66666667, 1. , 0. , 0.5 , 1. , 1. , 1. ])} """ _UpperCAmelCase : Tuple = """\ @software{MMSegmentation_Contributors_OpenMMLab_Semantic_Segmentation_2020, author = {{MMSegmentation Contributors}}, license = {Apache-2.0}, month = {7}, title = {{OpenMMLab Semantic Segmentation Toolbox and Benchmark}}, url = {https://github.com/open-mmlab/mmsegmentation}, year = {2020} }""" def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase = None, lowerCamelCase = False, ): if label_map is not None: for old_id, new_id in label_map.items(): __lowerCAmelCase = new_id # turn into Numpy arrays __lowerCAmelCase = np.array(lowerCamelCase) __lowerCAmelCase = np.array(lowerCamelCase) if reduce_labels: __lowerCAmelCase = 2_5_5 __lowerCAmelCase = label - 1 __lowerCAmelCase = 2_5_5 __lowerCAmelCase = label != ignore_index __lowerCAmelCase = np.not_equal(lowerCamelCase, lowerCamelCase) __lowerCAmelCase = pred_label[mask] __lowerCAmelCase = np.array(lowerCamelCase)[mask] __lowerCAmelCase = pred_label[pred_label == label] __lowerCAmelCase = np.histogram(lowerCamelCase, bins=lowerCamelCase, range=(0, num_labels - 1))[0] __lowerCAmelCase = np.histogram(lowerCamelCase, bins=lowerCamelCase, range=(0, num_labels - 1))[0] __lowerCAmelCase = np.histogram(lowerCamelCase, bins=lowerCamelCase, range=(0, num_labels - 1))[0] __lowerCAmelCase = area_pred_label + area_label - area_intersect return area_intersect, area_union, area_pred_label, area_label def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase = None, lowerCamelCase = False, ): __lowerCAmelCase = np.zeros((num_labels,), dtype=np.floataa) __lowerCAmelCase = np.zeros((num_labels,), dtype=np.floataa) __lowerCAmelCase = np.zeros((num_labels,), dtype=np.floataa) __lowerCAmelCase = np.zeros((num_labels,), dtype=np.floataa) for result, gt_seg_map in zip(lowerCamelCase, lowerCamelCase): __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = intersect_and_union( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase) total_area_intersect += area_intersect total_area_union += area_union total_area_pred_label += area_pred_label total_area_label += area_label return total_area_intersect, total_area_union, total_area_pred_label, total_area_label def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase = None, lowerCamelCase = None, lowerCamelCase = False, ): __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = total_intersect_and_union( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase) # compute metrics __lowerCAmelCase = {} __lowerCAmelCase = total_area_intersect.sum() / total_area_label.sum() __lowerCAmelCase = total_area_intersect / total_area_union __lowerCAmelCase = total_area_intersect / total_area_label __lowerCAmelCase = np.nanmean(lowerCamelCase) __lowerCAmelCase = np.nanmean(lowerCamelCase) __lowerCAmelCase = all_acc __lowerCAmelCase = iou __lowerCAmelCase = acc if nan_to_num is not None: __lowerCAmelCase = {metric: np.nan_to_num(lowerCamelCase, nan=lowerCamelCase) for metric, metric_value in metrics.items()} return metrics @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class a__ ( datasets.Metric ): """simple docstring""" def _snake_case (self ): return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( # 1st Seq - height dim, 2nd - width dim { '''predictions''': datasets.Sequence(datasets.Sequence(datasets.Value('''uint16''' ) ) ), '''references''': datasets.Sequence(datasets.Sequence(datasets.Value('''uint16''' ) ) ), } ) , reference_urls=[ '''https://github.com/open-mmlab/mmsegmentation/blob/71c201b1813267d78764f306a297ca717827c4bf/mmseg/core/evaluation/metrics.py''' ] , ) def _snake_case (self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase = None , __lowercase = None , __lowercase = False , ): __lowerCAmelCase = mean_iou( results=__lowercase , gt_seg_maps=__lowercase , num_labels=__lowercase , ignore_index=__lowercase , nan_to_num=__lowercase , label_map=__lowercase , reduce_labels=__lowercase , ) return iou_result
9
1
'''simple docstring''' import argparse import math import traceback import dateutil.parser as date_parser import requests def __magic_name__( lowerCamelCase): __lowerCAmelCase = {} __lowerCAmelCase = job['''started_at'''] __lowerCAmelCase = job['''completed_at'''] __lowerCAmelCase = date_parser.parse(lowerCamelCase) __lowerCAmelCase = date_parser.parse(lowerCamelCase) __lowerCAmelCase = round((end_datetime - start_datetime).total_seconds() / 60.0) __lowerCAmelCase = start __lowerCAmelCase = end __lowerCAmelCase = duration_in_min return job_info def __magic_name__( lowerCamelCase, lowerCamelCase=None): __lowerCAmelCase = None if token is not None: __lowerCAmelCase = {'''Accept''': '''application/vnd.github+json''', '''Authorization''': F"""Bearer {token}"""} __lowerCAmelCase = F"""https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100""" __lowerCAmelCase = requests.get(lowerCamelCase, headers=lowerCamelCase).json() __lowerCAmelCase = {} try: job_time.update({job['''name''']: extract_time_from_single_job(lowerCamelCase) for job in result['''jobs''']}) __lowerCAmelCase = math.ceil((result['''total_count'''] - 1_0_0) / 1_0_0) for i in range(lowerCamelCase): __lowerCAmelCase = requests.get(url + F"""&page={i + 2}""", headers=lowerCamelCase).json() job_time.update({job['''name''']: extract_time_from_single_job(lowerCamelCase) for job in result['''jobs''']}) return job_time except Exception: print(F"""Unknown error, could not fetch links:\n{traceback.format_exc()}""") return {} if __name__ == "__main__": _UpperCAmelCase : List[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument("""--workflow_run_id""", type=str, required=True, help="""A GitHub Actions workflow run id.""") _UpperCAmelCase : Tuple = parser.parse_args() _UpperCAmelCase : int = get_job_time(args.workflow_run_id) _UpperCAmelCase : Union[str, Any] = dict(sorted(job_time.items(), key=lambda item: item[1]["duration"], reverse=True)) for k, v in job_time.items(): print(f"""{k}: {v['duration']}""")
9
'''simple docstring''' import json import os import unittest from transformers import DebertaTokenizer, DebertaTokenizerFast from transformers.models.deberta.tokenization_deberta import VOCAB_FILES_NAMES from transformers.testing_utils import slow from ...test_tokenization_common import TokenizerTesterMixin class a__ ( __A , unittest.TestCase ): """simple docstring""" __UpperCamelCase : str = DebertaTokenizer __UpperCamelCase : str = True __UpperCamelCase : Any = DebertaTokenizerFast def _snake_case (self ): super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt __lowerCAmelCase = [ '''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''\u0120''', '''\u0120l''', '''\u0120n''', '''\u0120lo''', '''\u0120low''', '''er''', '''\u0120lowest''', '''\u0120newer''', '''\u0120wider''', '''[UNK]''', ] __lowerCAmelCase = dict(zip(__lowercase , range(len(__lowercase ) ) ) ) __lowerCAmelCase = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', ''''''] __lowerCAmelCase = {'''unk_token''': '''[UNK]'''} __lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) __lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(__lowercase ) + '''\n''' ) with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write('''\n'''.join(__lowercase ) ) def _snake_case (self , **__lowercase ): kwargs.update(self.special_tokens_map ) return self.tokenizer_class.from_pretrained(self.tmpdirname , **__lowercase ) def _snake_case (self , __lowercase ): __lowerCAmelCase = '''lower newer''' __lowerCAmelCase = '''lower newer''' return input_text, output_text def _snake_case (self ): __lowerCAmelCase = self.get_tokenizer() __lowerCAmelCase = '''lower newer''' __lowerCAmelCase = ['''l''', '''o''', '''w''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er'''] __lowerCAmelCase = tokenizer.tokenize(__lowercase ) self.assertListEqual(__lowercase , __lowercase ) __lowerCAmelCase = tokens + [tokenizer.unk_token] __lowerCAmelCase = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowercase ) , __lowercase ) def _snake_case (self ): __lowerCAmelCase = self.get_tokenizer() __lowerCAmelCase = tokenizer('''Hello''' , '''World''' ) __lowerCAmelCase = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1] self.assertListEqual(tokd['''token_type_ids'''] , __lowercase ) @slow def _snake_case (self ): __lowerCAmelCase = self.tokenizer_class.from_pretrained('''microsoft/deberta-base''' ) __lowerCAmelCase = tokenizer.encode('''sequence builders''' , add_special_tokens=__lowercase ) __lowerCAmelCase = tokenizer.encode('''multi-sequence build''' , add_special_tokens=__lowercase ) __lowerCAmelCase = tokenizer.encode( '''sequence builders''' , add_special_tokens=__lowercase , add_prefix_space=__lowercase ) __lowerCAmelCase = tokenizer.encode( '''sequence builders''' , '''multi-sequence build''' , add_special_tokens=__lowercase , add_prefix_space=__lowercase ) __lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(__lowercase ) __lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(__lowercase , __lowercase ) assert encoded_sentence == encoded_text_from_decode assert encoded_pair == encoded_pair_from_decode @slow def _snake_case (self ): __lowerCAmelCase = [self.tokenizer_class] if self.test_rust_tokenizer: tokenizer_classes.append(self.rust_tokenizer_class ) for tokenizer_class in tokenizer_classes: __lowerCAmelCase = tokenizer_class.from_pretrained('''microsoft/deberta-base''' ) __lowerCAmelCase = [ '''ALBERT: A Lite BERT for Self-supervised Learning of Language Representations''', '''ALBERT incorporates two parameter reduction techniques''', '''The first one is a factorized embedding parameterization. By decomposing the large vocabulary''' ''' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of''' ''' vocabulary embedding.''', ] __lowerCAmelCase = tokenizer(__lowercase , padding=__lowercase ) __lowerCAmelCase = [tokenizer.decode(__lowercase , skip_special_tokens=__lowercase ) for seq in encoding['''input_ids''']] # fmt: off __lowerCAmelCase = { '''input_ids''': [ [1, 21_18, 1_11_26, 5_65, 35, 83, 2_51_91, 1_63, 1_88_54, 13, 1_21_56, 12, 1_61_01, 2_53_76, 1_38_07, 9, 2_22_05, 2_78_93, 16_35, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 21_18, 1_11_26, 5_65, 2_45_36, 80, 4_37_97, 48_78, 73_73, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1_33, 78, 65, 16, 10, 37_24, 15_38, 3_31_83, 1_13_03, 4_37_97, 19_38, 4, 8_70, 2_41_65, 2_91_05, 5, 7_39, 3_26_44, 3_31_83, 1_13_03, 3_61_73, 88, 80, 6_50, 78_21, 4_59_40, 6, 52, 25_59, 5, 18_36, 9, 5, 73_97, 1_31_71, 31, 5, 18_36, 9, 3_26_44, 3_31_83, 1_13_03, 4, 2] ], '''token_type_ids''': [ [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] ], '''attention_mask''': [ [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1] ] } # fmt: on __lowerCAmelCase = [ '''ALBERT: A Lite BERT for Self-supervised Learning of Language Representations''', '''ALBERT incorporates two parameter reduction techniques''', '''The first one is a factorized embedding parameterization. By decomposing the large vocabulary''' ''' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of''' ''' vocabulary embedding.''', ] self.assertDictEqual(encoding.data , __lowercase ) for expected, decoded in zip(__lowercase , __lowercase ): self.assertEqual(__lowercase , __lowercase )
9
1
'''simple docstring''' from collections import defaultdict from math import ceil, sqrt def __magic_name__( lowerCamelCase = 1_0_0_0_0_0_0, lowerCamelCase = 1_0): __lowerCAmelCase = defaultdict(lowerCamelCase) for outer_width in range(3, (t_limit // 4) + 2): if outer_width * outer_width > t_limit: __lowerCAmelCase = max( ceil(sqrt(outer_width * outer_width - t_limit)), 1) else: __lowerCAmelCase = 1 hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2 for hole_width in range(lowerCamelCase, outer_width - 1, 2): count[outer_width * outer_width - hole_width * hole_width] += 1 return sum(1 for n in count.values() if 1 <= n <= 1_0) if __name__ == "__main__": print(f"""{solution() = }""")
9
'''simple docstring''' import argparse import datetime def __magic_name__( lowerCamelCase): __lowerCAmelCase = { '''0''': '''Sunday''', '''1''': '''Monday''', '''2''': '''Tuesday''', '''3''': '''Wednesday''', '''4''': '''Thursday''', '''5''': '''Friday''', '''6''': '''Saturday''', } __lowerCAmelCase = {0: 1, 1: 2, 2: 3, 3: 4, 4: 5, 5: 6, 6: 0} # Validate if not 0 < len(lowerCamelCase) < 1_1: raise ValueError('''Must be 10 characters long''') # Get month __lowerCAmelCase = int(date_input[0] + date_input[1]) # Validate if not 0 < m < 1_3: raise ValueError('''Month must be between 1 - 12''') __lowerCAmelCase = date_input[2] # Validate if sep_a not in ["-", "/"]: raise ValueError('''Date separator must be \'-\' or \'/\'''') # Get day __lowerCAmelCase = int(date_input[3] + date_input[4]) # Validate if not 0 < d < 3_2: raise ValueError('''Date must be between 1 - 31''') # Get second separator __lowerCAmelCase = date_input[5] # Validate if sep_a not in ["-", "/"]: raise ValueError('''Date separator must be \'-\' or \'/\'''') # Get year __lowerCAmelCase = int(date_input[6] + date_input[7] + date_input[8] + date_input[9]) # Arbitrary year range if not 4_5 < y < 8_5_0_0: raise ValueError( '''Year out of range. There has to be some sort of limit...right?''') # Get datetime obj for validation __lowerCAmelCase = datetime.date(int(lowerCamelCase), int(lowerCamelCase), int(lowerCamelCase)) # Start math if m <= 2: __lowerCAmelCase = y - 1 __lowerCAmelCase = m + 1_2 # maths var __lowerCAmelCase = int(str(lowerCamelCase)[:2]) __lowerCAmelCase = int(str(lowerCamelCase)[2:]) __lowerCAmelCase = int(2.6 * m - 5.39) __lowerCAmelCase = int(c / 4) __lowerCAmelCase = int(k / 4) __lowerCAmelCase = int(d + k) __lowerCAmelCase = int(t + u + v + x) __lowerCAmelCase = int(z - (2 * c)) __lowerCAmelCase = round(w % 7) # End math # Validate math if f != convert_datetime_days[dt_ck.weekday()]: raise AssertionError('''The date was evaluated incorrectly. Contact developer.''') # Response __lowerCAmelCase = F"""Your date {date_input}, is a {days[str(lowerCamelCase)]}!""" return response if __name__ == "__main__": import doctest doctest.testmod() _UpperCAmelCase : List[str] = argparse.ArgumentParser( description=( """Find out what day of the week nearly any date is or was. Enter """ """date as a string in the mm-dd-yyyy or mm/dd/yyyy format""" ) ) parser.add_argument( """date_input""", type=str, help="""Date as a string (mm-dd-yyyy or mm/dd/yyyy)""" ) _UpperCAmelCase : Dict = parser.parse_args() zeller(args.date_input)
9
1
'''simple docstring''' from ..utils import DummyObject, requires_backends class a__ ( metaclass=__A ): """simple docstring""" __UpperCamelCase : Any = ['flax', 'transformers'] def __init__(self , *__lowercase , **__lowercase ): requires_backends(self , ['''flax''', '''transformers'''] ) @classmethod def _snake_case (cls , *__lowercase , **__lowercase ): requires_backends(cls , ['''flax''', '''transformers'''] ) @classmethod def _snake_case (cls , *__lowercase , **__lowercase ): requires_backends(cls , ['''flax''', '''transformers'''] ) class a__ ( metaclass=__A ): """simple docstring""" __UpperCamelCase : List[Any] = ['flax', 'transformers'] def __init__(self , *__lowercase , **__lowercase ): requires_backends(self , ['''flax''', '''transformers'''] ) @classmethod def _snake_case (cls , *__lowercase , **__lowercase ): requires_backends(cls , ['''flax''', '''transformers'''] ) @classmethod def _snake_case (cls , *__lowercase , **__lowercase ): requires_backends(cls , ['''flax''', '''transformers'''] ) class a__ ( metaclass=__A ): """simple docstring""" __UpperCamelCase : int = ['flax', 'transformers'] def __init__(self , *__lowercase , **__lowercase ): requires_backends(self , ['''flax''', '''transformers'''] ) @classmethod def _snake_case (cls , *__lowercase , **__lowercase ): requires_backends(cls , ['''flax''', '''transformers'''] ) @classmethod def _snake_case (cls , *__lowercase , **__lowercase ): requires_backends(cls , ['''flax''', '''transformers'''] ) class a__ ( metaclass=__A ): """simple docstring""" __UpperCamelCase : Dict = ['flax', 'transformers'] def __init__(self , *__lowercase , **__lowercase ): requires_backends(self , ['''flax''', '''transformers'''] ) @classmethod def _snake_case (cls , *__lowercase , **__lowercase ): requires_backends(cls , ['''flax''', '''transformers'''] ) @classmethod def _snake_case (cls , *__lowercase , **__lowercase ): requires_backends(cls , ['''flax''', '''transformers'''] )
9
'''simple docstring''' import gc import unittest import numpy as np import torch from torch.backends.cuda import sdp_kernel from diffusers import ( CMStochasticIterativeScheduler, ConsistencyModelPipeline, UNetaDModel, ) from diffusers.utils import randn_tensor, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_a, require_torch_gpu from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class a__ ( __A , unittest.TestCase ): """simple docstring""" __UpperCamelCase : Optional[Any] = ConsistencyModelPipeline __UpperCamelCase : Optional[int] = UNCONDITIONAL_IMAGE_GENERATION_PARAMS __UpperCamelCase : int = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS # Override required_optional_params to remove num_images_per_prompt __UpperCamelCase : List[Any] = frozenset( [ 'num_inference_steps', 'generator', 'latents', 'output_type', 'return_dict', 'callback', 'callback_steps', ] ) @property def _snake_case (self ): __lowerCAmelCase = UNetaDModel.from_pretrained( '''diffusers/consistency-models-test''' , subfolder='''test_unet''' , ) return unet @property def _snake_case (self ): __lowerCAmelCase = UNetaDModel.from_pretrained( '''diffusers/consistency-models-test''' , subfolder='''test_unet_class_cond''' , ) return unet def _snake_case (self , __lowercase=False ): if class_cond: __lowerCAmelCase = self.dummy_cond_unet else: __lowerCAmelCase = self.dummy_uncond_unet # Default to CM multistep sampler __lowerCAmelCase = CMStochasticIterativeScheduler( num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , ) __lowerCAmelCase = { '''unet''': unet, '''scheduler''': scheduler, } return components def _snake_case (self , __lowercase , __lowercase=0 ): if str(__lowercase ).startswith('''mps''' ): __lowerCAmelCase = torch.manual_seed(__lowercase ) else: __lowerCAmelCase = torch.Generator(device=__lowercase ).manual_seed(__lowercase ) __lowerCAmelCase = { '''batch_size''': 1, '''num_inference_steps''': None, '''timesteps''': [22, 0], '''generator''': generator, '''output_type''': '''np''', } return inputs def _snake_case (self ): __lowerCAmelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator __lowerCAmelCase = self.get_dummy_components() __lowerCAmelCase = ConsistencyModelPipeline(**__lowercase ) __lowerCAmelCase = pipe.to(__lowercase ) pipe.set_progress_bar_config(disable=__lowercase ) __lowerCAmelCase = self.get_dummy_inputs(__lowercase ) __lowerCAmelCase = pipe(**__lowercase ).images assert image.shape == (1, 32, 32, 3) __lowerCAmelCase = image[0, -3:, -3:, -1] __lowerCAmelCase = np.array([0.3_5_7_2, 0.6_2_7_3, 0.4_0_3_1, 0.3_9_6_1, 0.4_3_2_1, 0.5_7_3_0, 0.5_2_6_6, 0.4_7_8_0, 0.5_0_0_4] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def _snake_case (self ): __lowerCAmelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator __lowerCAmelCase = self.get_dummy_components(class_cond=__lowercase ) __lowerCAmelCase = ConsistencyModelPipeline(**__lowercase ) __lowerCAmelCase = pipe.to(__lowercase ) pipe.set_progress_bar_config(disable=__lowercase ) __lowerCAmelCase = self.get_dummy_inputs(__lowercase ) __lowerCAmelCase = 0 __lowerCAmelCase = pipe(**__lowercase ).images assert image.shape == (1, 32, 32, 3) __lowerCAmelCase = image[0, -3:, -3:, -1] __lowerCAmelCase = np.array([0.3_5_7_2, 0.6_2_7_3, 0.4_0_3_1, 0.3_9_6_1, 0.4_3_2_1, 0.5_7_3_0, 0.5_2_6_6, 0.4_7_8_0, 0.5_0_0_4] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def _snake_case (self ): __lowerCAmelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator __lowerCAmelCase = self.get_dummy_components() __lowerCAmelCase = ConsistencyModelPipeline(**__lowercase ) __lowerCAmelCase = pipe.to(__lowercase ) pipe.set_progress_bar_config(disable=__lowercase ) __lowerCAmelCase = self.get_dummy_inputs(__lowercase ) __lowerCAmelCase = 1 __lowerCAmelCase = None __lowerCAmelCase = pipe(**__lowercase ).images assert image.shape == (1, 32, 32, 3) __lowerCAmelCase = image[0, -3:, -3:, -1] __lowerCAmelCase = np.array([0.5_0_0_4, 0.5_0_0_4, 0.4_9_9_4, 0.5_0_0_8, 0.4_9_7_6, 0.5_0_1_8, 0.4_9_9_0, 0.4_9_8_2, 0.4_9_8_7] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def _snake_case (self ): __lowerCAmelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator __lowerCAmelCase = self.get_dummy_components(class_cond=__lowercase ) __lowerCAmelCase = ConsistencyModelPipeline(**__lowercase ) __lowerCAmelCase = pipe.to(__lowercase ) pipe.set_progress_bar_config(disable=__lowercase ) __lowerCAmelCase = self.get_dummy_inputs(__lowercase ) __lowerCAmelCase = 1 __lowerCAmelCase = None __lowerCAmelCase = 0 __lowerCAmelCase = pipe(**__lowercase ).images assert image.shape == (1, 32, 32, 3) __lowerCAmelCase = image[0, -3:, -3:, -1] __lowerCAmelCase = np.array([0.5_0_0_4, 0.5_0_0_4, 0.4_9_9_4, 0.5_0_0_8, 0.4_9_7_6, 0.5_0_1_8, 0.4_9_9_0, 0.4_9_8_2, 0.4_9_8_7] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 @slow @require_torch_gpu class a__ ( unittest.TestCase ): """simple docstring""" def _snake_case (self ): super().tearDown() gc.collect() torch.cuda.empty_cache() def _snake_case (self , __lowercase=0 , __lowercase=False , __lowercase="cpu" , __lowercase=torch.floataa , __lowercase=(1, 3, 64, 64) ): __lowerCAmelCase = torch.manual_seed(__lowercase ) __lowerCAmelCase = { '''num_inference_steps''': None, '''timesteps''': [22, 0], '''class_labels''': 0, '''generator''': generator, '''output_type''': '''np''', } if get_fixed_latents: __lowerCAmelCase = self.get_fixed_latents(seed=__lowercase , device=__lowercase , dtype=__lowercase , shape=__lowercase ) __lowerCAmelCase = latents return inputs def _snake_case (self , __lowercase=0 , __lowercase="cpu" , __lowercase=torch.floataa , __lowercase=(1, 3, 64, 64) ): if type(__lowercase ) == str: __lowerCAmelCase = torch.device(__lowercase ) __lowerCAmelCase = torch.Generator(device=__lowercase ).manual_seed(__lowercase ) __lowerCAmelCase = randn_tensor(__lowercase , generator=__lowercase , device=__lowercase , dtype=__lowercase ) return latents def _snake_case (self ): __lowerCAmelCase = UNetaDModel.from_pretrained('''diffusers/consistency_models''' , subfolder='''diffusers_cd_imagenet64_l2''' ) __lowerCAmelCase = CMStochasticIterativeScheduler( num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , ) __lowerCAmelCase = ConsistencyModelPipeline(unet=__lowercase , scheduler=__lowercase ) pipe.to(torch_device=__lowercase ) pipe.set_progress_bar_config(disable=__lowercase ) __lowerCAmelCase = self.get_inputs() __lowerCAmelCase = pipe(**__lowercase ).images assert image.shape == (1, 64, 64, 3) __lowerCAmelCase = image[0, -3:, -3:, -1] __lowerCAmelCase = np.array([0.0_8_8_8, 0.0_8_8_1, 0.0_6_6_6, 0.0_4_7_9, 0.0_2_9_2, 0.0_1_9_5, 0.0_2_0_1, 0.0_1_6_3, 0.0_2_5_4] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2 def _snake_case (self ): __lowerCAmelCase = UNetaDModel.from_pretrained('''diffusers/consistency_models''' , subfolder='''diffusers_cd_imagenet64_l2''' ) __lowerCAmelCase = CMStochasticIterativeScheduler( num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , ) __lowerCAmelCase = ConsistencyModelPipeline(unet=__lowercase , scheduler=__lowercase ) pipe.to(torch_device=__lowercase ) pipe.set_progress_bar_config(disable=__lowercase ) __lowerCAmelCase = self.get_inputs() __lowerCAmelCase = 1 __lowerCAmelCase = None __lowerCAmelCase = pipe(**__lowercase ).images assert image.shape == (1, 64, 64, 3) __lowerCAmelCase = image[0, -3:, -3:, -1] __lowerCAmelCase = np.array([0.0_3_4_0, 0.0_1_5_2, 0.0_0_6_3, 0.0_2_6_7, 0.0_2_2_1, 0.0_1_0_7, 0.0_4_1_6, 0.0_1_8_6, 0.0_2_1_7] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2 @require_torch_a def _snake_case (self ): __lowerCAmelCase = UNetaDModel.from_pretrained('''diffusers/consistency_models''' , subfolder='''diffusers_cd_imagenet64_l2''' ) __lowerCAmelCase = CMStochasticIterativeScheduler( num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , ) __lowerCAmelCase = ConsistencyModelPipeline(unet=__lowercase , scheduler=__lowercase ) pipe.to(torch_device=__lowercase , torch_dtype=torch.floataa ) pipe.set_progress_bar_config(disable=__lowercase ) __lowerCAmelCase = self.get_inputs(get_fixed_latents=__lowercase , device=__lowercase ) # Ensure usage of flash attention in torch 2.0 with sdp_kernel(enable_flash=__lowercase , enable_math=__lowercase , enable_mem_efficient=__lowercase ): __lowerCAmelCase = pipe(**__lowercase ).images assert image.shape == (1, 64, 64, 3) __lowerCAmelCase = image[0, -3:, -3:, -1] __lowerCAmelCase = np.array([0.1_8_7_5, 0.1_4_2_8, 0.1_2_8_9, 0.2_1_5_1, 0.2_0_9_2, 0.1_4_7_7, 0.1_8_7_7, 0.1_6_4_1, 0.1_3_5_3] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 @require_torch_a def _snake_case (self ): __lowerCAmelCase = UNetaDModel.from_pretrained('''diffusers/consistency_models''' , subfolder='''diffusers_cd_imagenet64_l2''' ) __lowerCAmelCase = CMStochasticIterativeScheduler( num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , ) __lowerCAmelCase = ConsistencyModelPipeline(unet=__lowercase , scheduler=__lowercase ) pipe.to(torch_device=__lowercase , torch_dtype=torch.floataa ) pipe.set_progress_bar_config(disable=__lowercase ) __lowerCAmelCase = self.get_inputs(get_fixed_latents=__lowercase , device=__lowercase ) __lowerCAmelCase = 1 __lowerCAmelCase = None # Ensure usage of flash attention in torch 2.0 with sdp_kernel(enable_flash=__lowercase , enable_math=__lowercase , enable_mem_efficient=__lowercase ): __lowerCAmelCase = pipe(**__lowercase ).images assert image.shape == (1, 64, 64, 3) __lowerCAmelCase = image[0, -3:, -3:, -1] __lowerCAmelCase = np.array([0.1_6_6_3, 0.1_9_4_8, 0.2_2_7_5, 0.1_6_8_0, 0.1_2_0_4, 0.1_2_4_5, 0.1_8_5_8, 0.1_3_3_8, 0.2_0_9_5] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
9
1
'''simple docstring''' from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _UpperCAmelCase : str = logging.get_logger(__name__) _UpperCAmelCase : str = { """roberta-base""": """https://huggingface.co/roberta-base/resolve/main/config.json""", """roberta-large""": """https://huggingface.co/roberta-large/resolve/main/config.json""", """roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/config.json""", """distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/config.json""", """roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json""", """roberta-large-openai-detector""": """https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json""", } class a__ ( __A ): """simple docstring""" __UpperCamelCase : str = 'roberta' def __init__(self , __lowercase=5_02_65 , __lowercase=7_68 , __lowercase=12 , __lowercase=12 , __lowercase=30_72 , __lowercase="gelu" , __lowercase=0.1 , __lowercase=0.1 , __lowercase=5_12 , __lowercase=2 , __lowercase=0.0_2 , __lowercase=1e-12 , __lowercase=1 , __lowercase=0 , __lowercase=2 , __lowercase="absolute" , __lowercase=True , __lowercase=None , **__lowercase , ): super().__init__(pad_token_id=__lowercase , bos_token_id=__lowercase , eos_token_id=__lowercase , **__lowercase ) __lowerCAmelCase = vocab_size __lowerCAmelCase = hidden_size __lowerCAmelCase = num_hidden_layers __lowerCAmelCase = num_attention_heads __lowerCAmelCase = hidden_act __lowerCAmelCase = intermediate_size __lowerCAmelCase = hidden_dropout_prob __lowerCAmelCase = attention_probs_dropout_prob __lowerCAmelCase = max_position_embeddings __lowerCAmelCase = type_vocab_size __lowerCAmelCase = initializer_range __lowerCAmelCase = layer_norm_eps __lowerCAmelCase = position_embedding_type __lowerCAmelCase = use_cache __lowerCAmelCase = classifier_dropout class a__ ( __A ): """simple docstring""" @property def _snake_case (self ): if self.task == "multiple-choice": __lowerCAmelCase = {0: '''batch''', 1: '''choice''', 2: '''sequence'''} else: __lowerCAmelCase = {0: '''batch''', 1: '''sequence'''} return OrderedDict( [ ('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ] )
9
'''simple docstring''' from collections import Counter import numpy as np from sklearn import datasets from sklearn.model_selection import train_test_split _UpperCAmelCase : List[Any] = datasets.load_iris() _UpperCAmelCase : Dict = np.array(data["""data"""]) _UpperCAmelCase : int = np.array(data["""target"""]) _UpperCAmelCase : str = data["""target_names"""] _UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase : Optional[Any] = train_test_split(X, y) def __magic_name__( lowerCamelCase, lowerCamelCase): return np.linalg.norm(np.array(lowerCamelCase) - np.array(lowerCamelCase)) def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase=5): __lowerCAmelCase = zip(lowerCamelCase, lowerCamelCase) # List of distances of all points from the point to be classified __lowerCAmelCase = [] for data_point in data: __lowerCAmelCase = euclidean_distance(data_point[0], lowerCamelCase) distances.append((distance, data_point[1])) # Choosing 'k' points with the least distances. __lowerCAmelCase = [i[1] for i in sorted(lowerCamelCase)[:k]] # Most commonly occurring class among them # is the class into which the point is classified __lowerCAmelCase = Counter(lowerCamelCase).most_common(1)[0][0] return classes[result] if __name__ == "__main__": print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4]))
9
1
'''simple docstring''' import argparse import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType ######################################################################## # This is a fully working simple example to use Accelerate, # specifically showcasing how to properly calculate the metrics on the # validation dataset when in a distributed system, and builds off the # `nlp_example.py` script. # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # To help focus on the differences in the code, building `DataLoaders` # was refactored into its own function. # New additions from the base script can be found quickly by # looking for the # New Code # tags # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## _UpperCAmelCase : List[str] = 1_6 _UpperCAmelCase : int = 3_2 def __magic_name__( lowerCamelCase, lowerCamelCase = 1_6): __lowerCAmelCase = AutoTokenizer.from_pretrained('''bert-base-cased''') __lowerCAmelCase = load_dataset('''glue''', '''mrpc''') def tokenize_function(lowerCamelCase): # max_length=None => use the model max length (it's actually the default) __lowerCAmelCase = tokenizer(examples['''sentence1'''], examples['''sentence2'''], truncation=lowerCamelCase, max_length=lowerCamelCase) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): __lowerCAmelCase = datasets.map( lowerCamelCase, batched=lowerCamelCase, remove_columns=['''idx''', '''sentence1''', '''sentence2'''], ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library __lowerCAmelCase = tokenized_datasets.rename_column('''label''', '''labels''') def collate_fn(lowerCamelCase): # On TPU it's best to pad everything to the same length or training will be very slow. __lowerCAmelCase = 1_2_8 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": __lowerCAmelCase = 1_6 elif accelerator.mixed_precision != "no": __lowerCAmelCase = 8 else: __lowerCAmelCase = None return tokenizer.pad( lowerCamelCase, padding='''longest''', max_length=lowerCamelCase, pad_to_multiple_of=lowerCamelCase, return_tensors='''pt''', ) # Instantiate dataloaders. __lowerCAmelCase = DataLoader( tokenized_datasets['''train'''], shuffle=lowerCamelCase, collate_fn=lowerCamelCase, batch_size=lowerCamelCase) __lowerCAmelCase = DataLoader( tokenized_datasets['''validation'''], shuffle=lowerCamelCase, collate_fn=lowerCamelCase, batch_size=lowerCamelCase) return train_dataloader, eval_dataloader # For testing only if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1": from accelerate.test_utils.training import mocked_dataloaders _UpperCAmelCase : str = mocked_dataloaders # noqa: F811 def __magic_name__( lowerCamelCase, lowerCamelCase): # For testing only if os.environ.get('''TESTING_MOCKED_DATALOADERS''', lowerCamelCase) == "1": __lowerCAmelCase = 2 # Initialize accelerator __lowerCAmelCase = Accelerator(cpu=args.cpu, mixed_precision=args.mixed_precision) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs __lowerCAmelCase = config['''lr'''] __lowerCAmelCase = int(config['''num_epochs''']) __lowerCAmelCase = int(config['''seed''']) __lowerCAmelCase = int(config['''batch_size''']) __lowerCAmelCase = evaluate.load('''glue''', '''mrpc''') # If the batch size is too big we use gradient accumulation __lowerCAmelCase = 1 if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU: __lowerCAmelCase = batch_size // MAX_GPU_BATCH_SIZE __lowerCAmelCase = MAX_GPU_BATCH_SIZE set_seed(lowerCamelCase) __lowerCAmelCase , __lowerCAmelCase = get_dataloaders(lowerCamelCase, lowerCamelCase) # Instantiate the model (we build the model here so that the seed also control new weights initialization) __lowerCAmelCase = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''', return_dict=lowerCamelCase) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). __lowerCAmelCase = model.to(accelerator.device) # Instantiate optimizer __lowerCAmelCase = AdamW(params=model.parameters(), lr=lowerCamelCase) # Instantiate scheduler __lowerCAmelCase = get_linear_schedule_with_warmup( optimizer=lowerCamelCase, num_warmup_steps=1_0_0, num_training_steps=(len(lowerCamelCase) * num_epochs) // gradient_accumulation_steps, ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = accelerator.prepare( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase) # Now we train the model for epoch in range(lowerCamelCase): model.train() for step, batch in enumerate(lowerCamelCase): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device) __lowerCAmelCase = model(**lowerCamelCase) __lowerCAmelCase = outputs.loss __lowerCAmelCase = loss / gradient_accumulation_steps accelerator.backward(lowerCamelCase) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() __lowerCAmelCase = 0 for step, batch in enumerate(lowerCamelCase): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device) with torch.no_grad(): __lowerCAmelCase = model(**lowerCamelCase) __lowerCAmelCase = outputs.logits.argmax(dim=-1) __lowerCAmelCase , __lowerCAmelCase = accelerator.gather((predictions, batch['''labels'''])) # New Code # # First we check if it's a distributed system if accelerator.use_distributed: # Then see if we're on the last batch of our eval dataloader if step == len(lowerCamelCase) - 1: # Last batch needs to be truncated on distributed systems as it contains additional samples __lowerCAmelCase = predictions[: len(eval_dataloader.dataset) - samples_seen] __lowerCAmelCase = references[: len(eval_dataloader.dataset) - samples_seen] else: # Otherwise we add the number of samples seen samples_seen += references.shape[0] # All of this can be avoided if you use `Accelerator.gather_for_metrics` instead of `Accelerator.gather`: # accelerator.gather_for_metrics((predictions, batch["labels"])) metric.add_batch( predictions=lowerCamelCase, references=lowerCamelCase, ) __lowerCAmelCase = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(F"""epoch {epoch}:""", lowerCamelCase) def __magic_name__( ): __lowerCAmelCase = argparse.ArgumentParser(description='''Simple example of training script.''') parser.add_argument( '''--mixed_precision''', type=lowerCamelCase, default=lowerCamelCase, choices=['''no''', '''fp16''', '''bf16''', '''fp8'''], help='''Whether to use mixed precision. Choose''' '''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.''' '''and an Nvidia Ampere GPU.''', ) parser.add_argument('''--cpu''', action='''store_true''', help='''If passed, will train on the CPU.''') __lowerCAmelCase = parser.parse_args() __lowerCAmelCase = {'''lr''': 2E-5, '''num_epochs''': 3, '''seed''': 4_2, '''batch_size''': 1_6} training_function(lowerCamelCase, lowerCamelCase) if __name__ == "__main__": main()
9
'''simple docstring''' import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import CLIPTokenizer, CLIPTokenizerFast from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import OwlViTImageProcessor, OwlViTProcessor @require_vision class a__ ( unittest.TestCase ): """simple docstring""" def _snake_case (self ): __lowerCAmelCase = tempfile.mkdtemp() # fmt: off __lowerCAmelCase = ['''''', '''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''lo''', '''l</w>''', '''w</w>''', '''r</w>''', '''t</w>''', '''low</w>''', '''er</w>''', '''lowest</w>''', '''newer</w>''', '''wider''', '''<unk>''', '''<|startoftext|>''', '''<|endoftext|>'''] # fmt: on __lowerCAmelCase = dict(zip(__lowercase , range(len(__lowercase ) ) ) ) __lowerCAmelCase = ['''#version: 0.2''', '''l o''', '''lo w</w>''', '''e r</w>''', ''''''] __lowerCAmelCase = {'''unk_token''': '''<unk>'''} __lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) __lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(__lowercase ) + '''\n''' ) with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write('''\n'''.join(__lowercase ) ) __lowerCAmelCase = { '''do_resize''': True, '''size''': 20, '''do_center_crop''': True, '''crop_size''': 18, '''do_normalize''': True, '''image_mean''': [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3], '''image_std''': [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1], } __lowerCAmelCase = os.path.join(self.tmpdirname , __lowercase ) with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp: json.dump(__lowercase , __lowercase ) def _snake_case (self , **__lowercase ): return CLIPTokenizer.from_pretrained(self.tmpdirname , pad_token='''!''' , **__lowercase ) def _snake_case (self , **__lowercase ): return CLIPTokenizerFast.from_pretrained(self.tmpdirname , pad_token='''!''' , **__lowercase ) def _snake_case (self , **__lowercase ): return OwlViTImageProcessor.from_pretrained(self.tmpdirname , **__lowercase ) def _snake_case (self ): shutil.rmtree(self.tmpdirname ) def _snake_case (self ): __lowerCAmelCase = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )] __lowerCAmelCase = [Image.fromarray(np.moveaxis(__lowercase , 0 , -1 ) ) for x in image_inputs] return image_inputs def _snake_case (self ): __lowerCAmelCase = self.get_tokenizer() __lowerCAmelCase = self.get_rust_tokenizer() __lowerCAmelCase = self.get_image_processor() __lowerCAmelCase = OwlViTProcessor(tokenizer=__lowercase , image_processor=__lowercase ) processor_slow.save_pretrained(self.tmpdirname ) __lowerCAmelCase = OwlViTProcessor.from_pretrained(self.tmpdirname , use_fast=__lowercase ) __lowerCAmelCase = OwlViTProcessor(tokenizer=__lowercase , image_processor=__lowercase ) processor_fast.save_pretrained(self.tmpdirname ) __lowerCAmelCase = OwlViTProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() ) self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() ) self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() ) self.assertIsInstance(processor_slow.tokenizer , __lowercase ) self.assertIsInstance(processor_fast.tokenizer , __lowercase ) self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor_slow.image_processor , __lowercase ) self.assertIsInstance(processor_fast.image_processor , __lowercase ) def _snake_case (self ): __lowerCAmelCase = OwlViTProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) __lowerCAmelCase = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' ) __lowerCAmelCase = self.get_image_processor(do_normalize=__lowercase ) __lowerCAmelCase = OwlViTProcessor.from_pretrained( self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=__lowercase ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , __lowercase ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , __lowercase ) def _snake_case (self ): __lowerCAmelCase = self.get_image_processor() __lowerCAmelCase = self.get_tokenizer() __lowerCAmelCase = OwlViTProcessor(tokenizer=__lowercase , image_processor=__lowercase ) __lowerCAmelCase = self.prepare_image_inputs() __lowerCAmelCase = image_processor(__lowercase , return_tensors='''np''' ) __lowerCAmelCase = processor(images=__lowercase , return_tensors='''np''' ) for key in input_image_proc.keys(): self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 ) def _snake_case (self ): __lowerCAmelCase = self.get_image_processor() __lowerCAmelCase = self.get_tokenizer() __lowerCAmelCase = OwlViTProcessor(tokenizer=__lowercase , image_processor=__lowercase ) __lowerCAmelCase = '''lower newer''' __lowerCAmelCase = processor(text=__lowercase , return_tensors='''np''' ) __lowerCAmelCase = tokenizer(__lowercase , return_tensors='''np''' ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key][0].tolist() , encoded_processor[key][0].tolist() ) def _snake_case (self ): __lowerCAmelCase = self.get_image_processor() __lowerCAmelCase = self.get_tokenizer() __lowerCAmelCase = OwlViTProcessor(tokenizer=__lowercase , image_processor=__lowercase ) __lowerCAmelCase = '''lower newer''' __lowerCAmelCase = self.prepare_image_inputs() __lowerCAmelCase = processor(text=__lowercase , images=__lowercase ) self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask''', '''pixel_values'''] ) # test if it raises when no input is passed with pytest.raises(__lowercase ): processor() def _snake_case (self ): __lowerCAmelCase = '''google/owlvit-base-patch32''' __lowerCAmelCase = OwlViTProcessor.from_pretrained(__lowercase ) __lowerCAmelCase = ['''cat''', '''nasa badge'''] __lowerCAmelCase = processor(text=__lowercase ) __lowerCAmelCase = 16 self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask'''] ) self.assertEqual(inputs['''input_ids'''].shape , (2, seq_length) ) # test if it raises when no input is passed with pytest.raises(__lowercase ): processor() def _snake_case (self ): __lowerCAmelCase = '''google/owlvit-base-patch32''' __lowerCAmelCase = OwlViTProcessor.from_pretrained(__lowercase ) __lowerCAmelCase = [['''cat''', '''nasa badge'''], ['''person''']] __lowerCAmelCase = processor(text=__lowercase ) __lowerCAmelCase = 16 __lowerCAmelCase = len(__lowercase ) __lowerCAmelCase = max([len(__lowercase ) for texts in input_texts] ) self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask'''] ) self.assertEqual(inputs['''input_ids'''].shape , (batch_size * num_max_text_queries, seq_length) ) # test if it raises when no input is passed with pytest.raises(__lowercase ): processor() def _snake_case (self ): __lowerCAmelCase = '''google/owlvit-base-patch32''' __lowerCAmelCase = OwlViTProcessor.from_pretrained(__lowercase ) __lowerCAmelCase = ['''cat''', '''nasa badge'''] __lowerCAmelCase = processor(text=__lowercase ) __lowerCAmelCase = 16 __lowerCAmelCase = inputs['''input_ids'''] __lowerCAmelCase = [ [4_94_06, 23_68, 4_94_07, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [4_94_06, 68_41, 1_13_01, 4_94_07, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], ] self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask'''] ) self.assertEqual(inputs['''input_ids'''].shape , (2, seq_length) ) self.assertListEqual(list(input_ids[0] ) , predicted_ids[0] ) self.assertListEqual(list(input_ids[1] ) , predicted_ids[1] ) def _snake_case (self ): __lowerCAmelCase = self.get_image_processor() __lowerCAmelCase = self.get_tokenizer() __lowerCAmelCase = OwlViTProcessor(tokenizer=__lowercase , image_processor=__lowercase ) __lowerCAmelCase = self.prepare_image_inputs() __lowerCAmelCase = self.prepare_image_inputs() __lowerCAmelCase = processor(images=__lowercase , query_images=__lowercase ) self.assertListEqual(list(inputs.keys() ) , ['''query_pixel_values''', '''pixel_values'''] ) # test if it raises when no input is passed with pytest.raises(__lowercase ): processor() def _snake_case (self ): __lowerCAmelCase = self.get_image_processor() __lowerCAmelCase = self.get_tokenizer() __lowerCAmelCase = OwlViTProcessor(tokenizer=__lowercase , image_processor=__lowercase ) __lowerCAmelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] __lowerCAmelCase = processor.batch_decode(__lowercase ) __lowerCAmelCase = tokenizer.batch_decode(__lowercase ) self.assertListEqual(__lowercase , __lowercase )
9
1
'''simple docstring''' def __magic_name__( lowerCamelCase): __lowerCAmelCase = len(lowerCamelCase) for i in range(1, lowerCamelCase): __lowerCAmelCase = collection[i] __lowerCAmelCase = 0 __lowerCAmelCase = i - 1 while low <= high: __lowerCAmelCase = (low + high) // 2 if val < collection[mid]: __lowerCAmelCase = mid - 1 else: __lowerCAmelCase = mid + 1 for j in range(lowerCamelCase, lowerCamelCase, -1): __lowerCAmelCase = collection[j - 1] __lowerCAmelCase = val return collection if __name__ == "__main__": _UpperCAmelCase : Any = input("""Enter numbers separated by a comma:\n""").strip() _UpperCAmelCase : int = [int(item) for item in user_input.split(""",""")] print(binary_insertion_sort(unsorted))
9
'''simple docstring''' from __future__ import annotations from itertools import permutations from random import randint from timeit import repeat def __magic_name__( ): __lowerCAmelCase = [randint(-1_0_0_0, 1_0_0_0) for i in range(1_0)] __lowerCAmelCase = randint(-5_0_0_0, 5_0_0_0) return (arr, r) _UpperCAmelCase : Dict = make_dataset() def __magic_name__( lowerCamelCase, lowerCamelCase): for triplet in permutations(lowerCamelCase, 3): if sum(lowerCamelCase) == target: return tuple(sorted(lowerCamelCase)) return (0, 0, 0) def __magic_name__( lowerCamelCase, lowerCamelCase): arr.sort() __lowerCAmelCase = len(lowerCamelCase) for i in range(n - 1): __lowerCAmelCase , __lowerCAmelCase = i + 1, n - 1 while left < right: if arr[i] + arr[left] + arr[right] == target: return (arr[i], arr[left], arr[right]) elif arr[i] + arr[left] + arr[right] < target: left += 1 elif arr[i] + arr[left] + arr[right] > target: right -= 1 return (0, 0, 0) def __magic_name__( ): __lowerCAmelCase = ''' from __main__ import dataset, triplet_sum1, triplet_sum2 ''' __lowerCAmelCase = ''' triplet_sum1(*dataset) ''' __lowerCAmelCase = ''' triplet_sum2(*dataset) ''' __lowerCAmelCase = repeat(setup=lowerCamelCase, stmt=lowerCamelCase, repeat=5, number=1_0_0_0_0) __lowerCAmelCase = repeat(setup=lowerCamelCase, stmt=lowerCamelCase, repeat=5, number=1_0_0_0_0) return (min(lowerCamelCase), min(lowerCamelCase)) if __name__ == "__main__": from doctest import testmod testmod() _UpperCAmelCase : Union[str, Any] = solution_times() print(f"""The time for naive implementation is {times[0]}.""") print(f"""The time for optimized implementation is {times[1]}.""")
9
1
'''simple docstring''' from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices _UpperCAmelCase : Tuple = logging.get_logger(__name__) _UpperCAmelCase : Any = { """microsoft/resnet-50""": """https://huggingface.co/microsoft/resnet-50/blob/main/config.json""", } class a__ ( __A , __A ): """simple docstring""" __UpperCamelCase : Dict = 'resnet' __UpperCamelCase : int = ['basic', 'bottleneck'] def __init__(self , __lowercase=3 , __lowercase=64 , __lowercase=[2_56, 5_12, 10_24, 20_48] , __lowercase=[3, 4, 6, 3] , __lowercase="bottleneck" , __lowercase="relu" , __lowercase=False , __lowercase=None , __lowercase=None , **__lowercase , ): super().__init__(**__lowercase ) if layer_type not in self.layer_types: raise ValueError(F"""layer_type={layer_type} is not one of {",".join(self.layer_types )}""" ) __lowerCAmelCase = num_channels __lowerCAmelCase = embedding_size __lowerCAmelCase = hidden_sizes __lowerCAmelCase = depths __lowerCAmelCase = layer_type __lowerCAmelCase = hidden_act __lowerCAmelCase = downsample_in_first_stage __lowerCAmelCase = ['''stem'''] + [F"""stage{idx}""" for idx in range(1 , len(__lowercase ) + 1 )] __lowerCAmelCase , __lowerCAmelCase = get_aligned_output_features_output_indices( out_features=__lowercase , out_indices=__lowercase , stage_names=self.stage_names ) class a__ ( __A ): """simple docstring""" __UpperCamelCase : List[str] = version.parse('1.11' ) @property def _snake_case (self ): return OrderedDict( [ ('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}), ] ) @property def _snake_case (self ): return 1e-3
9
'''simple docstring''' import numpy as np def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase = 1E-12, lowerCamelCase = 1_0_0, ): assert np.shape(lowerCamelCase)[0] == np.shape(lowerCamelCase)[1] # Ensure proper dimensionality. assert np.shape(lowerCamelCase)[0] == np.shape(lowerCamelCase)[0] # Ensure inputs are either both complex or both real assert np.iscomplexobj(lowerCamelCase) == np.iscomplexobj(lowerCamelCase) __lowerCAmelCase = np.iscomplexobj(lowerCamelCase) if is_complex: # Ensure complex input_matrix is Hermitian assert np.array_equal(lowerCamelCase, input_matrix.conj().T) # Set convergence to False. Will define convergence when we exceed max_iterations # or when we have small changes from one iteration to next. __lowerCAmelCase = False __lowerCAmelCase = 0 __lowerCAmelCase = 0 __lowerCAmelCase = 1E12 while not convergence: # Multiple matrix by the vector. __lowerCAmelCase = np.dot(lowerCamelCase, lowerCamelCase) # Normalize the resulting output vector. __lowerCAmelCase = w / np.linalg.norm(lowerCamelCase) # Find rayleigh quotient # (faster than usual b/c we know vector is normalized already) __lowerCAmelCase = vector.conj().T if is_complex else vector.T __lowerCAmelCase = np.dot(lowerCamelCase, np.dot(lowerCamelCase, lowerCamelCase)) # Check convergence. __lowerCAmelCase = np.abs(lambda_ - lambda_previous) / lambda_ iterations += 1 if error <= error_tol or iterations >= max_iterations: __lowerCAmelCase = True __lowerCAmelCase = lambda_ if is_complex: __lowerCAmelCase = np.real(lambda_) return lambda_, vector def __magic_name__( ): __lowerCAmelCase = np.array([[4_1, 4, 2_0], [4, 2_6, 3_0], [2_0, 3_0, 5_0]]) __lowerCAmelCase = np.array([4_1, 4, 2_0]) __lowerCAmelCase = real_input_matrix.astype(np.complexaaa) __lowerCAmelCase = np.triu(1J * complex_input_matrix, 1) complex_input_matrix += imag_matrix complex_input_matrix += -1 * imag_matrix.T __lowerCAmelCase = np.array([4_1, 4, 2_0]).astype(np.complexaaa) for problem_type in ["real", "complex"]: if problem_type == "real": __lowerCAmelCase = real_input_matrix __lowerCAmelCase = real_vector elif problem_type == "complex": __lowerCAmelCase = complex_input_matrix __lowerCAmelCase = complex_vector # Our implementation. __lowerCAmelCase , __lowerCAmelCase = power_iteration(lowerCamelCase, lowerCamelCase) # Numpy implementation. # Get eigenvalues and eigenvectors using built-in numpy # eigh (eigh used for symmetric or hermetian matrices). __lowerCAmelCase , __lowerCAmelCase = np.linalg.eigh(lowerCamelCase) # Last eigenvalue is the maximum one. __lowerCAmelCase = eigen_values[-1] # Last column in this matrix is eigenvector corresponding to largest eigenvalue. __lowerCAmelCase = eigen_vectors[:, -1] # Check our implementation and numpy gives close answers. assert np.abs(eigen_value - eigen_value_max) <= 1E-6 # Take absolute values element wise of each eigenvector. # as they are only unique to a minus sign. assert np.linalg.norm(np.abs(lowerCamelCase) - np.abs(lowerCamelCase)) <= 1E-6 if __name__ == "__main__": import doctest doctest.testmod() test_power_iteration()
9
1
'''simple docstring''' from __future__ import annotations from collections.abc import Iterable, Iterator from dataclasses import dataclass _UpperCAmelCase : Optional[Any] = (3, 9, -1_1, 0, 7, 5, 1, -1) _UpperCAmelCase : Any = (4, 6, 2, 0, 8, 1_0, 3, -2) @dataclass class a__ : """simple docstring""" __UpperCamelCase : int __UpperCamelCase : Node | None class a__ : """simple docstring""" def __init__(self , __lowercase ): __lowerCAmelCase = None for i in sorted(__lowercase , reverse=__lowercase ): __lowerCAmelCase = Node(__lowercase , self.head ) def __iter__(self ): __lowerCAmelCase = self.head while node: yield node.data __lowerCAmelCase = node.next_node def __len__(self ): return sum(1 for _ in self ) def __str__(self ): return " -> ".join([str(__lowercase ) for node in self] ) def __magic_name__( lowerCamelCase, lowerCamelCase): return SortedLinkedList(list(lowerCamelCase) + list(lowerCamelCase)) if __name__ == "__main__": import doctest doctest.testmod() _UpperCAmelCase : Dict = SortedLinkedList print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
9
'''simple docstring''' from typing import Dict, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import flip_channel_order, resize, to_channel_dimension_format, to_pil_image from ...image_utils import ( ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends if is_vision_available(): import PIL # soft dependency if is_pytesseract_available(): import pytesseract _UpperCAmelCase : str = logging.get_logger(__name__) def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase): return [ int(1_0_0_0 * (box[0] / width)), int(1_0_0_0 * (box[1] / height)), int(1_0_0_0 * (box[2] / width)), int(1_0_0_0 * (box[3] / height)), ] def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase = None): __lowerCAmelCase = tesseract_config if tesseract_config is not None else '''''' # apply OCR __lowerCAmelCase = to_pil_image(lowerCamelCase) __lowerCAmelCase , __lowerCAmelCase = pil_image.size __lowerCAmelCase = pytesseract.image_to_data(lowerCamelCase, lang=lowerCamelCase, output_type='''dict''', config=lowerCamelCase) __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = data['''text'''], data['''left'''], data['''top'''], data['''width'''], data['''height'''] # filter empty words and corresponding coordinates __lowerCAmelCase = [idx for idx, word in enumerate(lowerCamelCase) if not word.strip()] __lowerCAmelCase = [word for idx, word in enumerate(lowerCamelCase) if idx not in irrelevant_indices] __lowerCAmelCase = [coord for idx, coord in enumerate(lowerCamelCase) if idx not in irrelevant_indices] __lowerCAmelCase = [coord for idx, coord in enumerate(lowerCamelCase) if idx not in irrelevant_indices] __lowerCAmelCase = [coord for idx, coord in enumerate(lowerCamelCase) if idx not in irrelevant_indices] __lowerCAmelCase = [coord for idx, coord in enumerate(lowerCamelCase) if idx not in irrelevant_indices] # turn coordinates into (left, top, left+width, top+height) format __lowerCAmelCase = [] for x, y, w, h in zip(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase): __lowerCAmelCase = [x, y, x + w, y + h] actual_boxes.append(lowerCamelCase) # finally, normalize the bounding boxes __lowerCAmelCase = [] for box in actual_boxes: normalized_boxes.append(normalize_box(lowerCamelCase, lowerCamelCase, lowerCamelCase)) assert len(lowerCamelCase) == len(lowerCamelCase), "Not as many words as there are bounding boxes" return words, normalized_boxes class a__ ( __A ): """simple docstring""" __UpperCamelCase : str = ['pixel_values'] def __init__(self , __lowercase = True , __lowercase = None , __lowercase = PILImageResampling.BILINEAR , __lowercase = True , __lowercase = None , __lowercase = "" , **__lowercase , ): super().__init__(**__lowercase ) __lowerCAmelCase = size if size is not None else {'''height''': 2_24, '''width''': 2_24} __lowerCAmelCase = get_size_dict(__lowercase ) __lowerCAmelCase = do_resize __lowerCAmelCase = size __lowerCAmelCase = resample __lowerCAmelCase = apply_ocr __lowerCAmelCase = ocr_lang __lowerCAmelCase = tesseract_config def _snake_case (self , __lowercase , __lowercase , __lowercase = PILImageResampling.BILINEAR , __lowercase = None , **__lowercase , ): __lowerCAmelCase = get_size_dict(__lowercase ) if "height" not in size or "width" not in size: raise ValueError(F"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""" ) __lowerCAmelCase = (size['''height'''], size['''width''']) return resize(__lowercase , size=__lowercase , resample=__lowercase , data_format=__lowercase , **__lowercase ) def _snake_case (self , __lowercase , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = ChannelDimension.FIRST , **__lowercase , ): __lowerCAmelCase = do_resize if do_resize is not None else self.do_resize __lowerCAmelCase = size if size is not None else self.size __lowerCAmelCase = get_size_dict(__lowercase ) __lowerCAmelCase = resample if resample is not None else self.resample __lowerCAmelCase = apply_ocr if apply_ocr is not None else self.apply_ocr __lowerCAmelCase = ocr_lang if ocr_lang is not None else self.ocr_lang __lowerCAmelCase = tesseract_config if tesseract_config is not None else self.tesseract_config __lowerCAmelCase = make_list_of_images(__lowercase ) if not valid_images(__lowercase ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) if do_resize and size is None: raise ValueError('''Size must be specified if do_resize is True.''' ) # All transformations expect numpy arrays. __lowerCAmelCase = [to_numpy_array(__lowercase ) for image in images] if apply_ocr: requires_backends(self , '''pytesseract''' ) __lowerCAmelCase = [] __lowerCAmelCase = [] for image in images: __lowerCAmelCase , __lowerCAmelCase = apply_tesseract(__lowercase , __lowercase , __lowercase ) words_batch.append(__lowercase ) boxes_batch.append(__lowercase ) if do_resize: __lowerCAmelCase = [self.resize(image=__lowercase , size=__lowercase , resample=__lowercase ) for image in images] # flip color channels from RGB to BGR (as Detectron2 requires this) __lowerCAmelCase = [flip_channel_order(__lowercase ) for image in images] __lowerCAmelCase = [to_channel_dimension_format(__lowercase , __lowercase ) for image in images] __lowerCAmelCase = BatchFeature(data={'''pixel_values''': images} , tensor_type=__lowercase ) if apply_ocr: __lowerCAmelCase = words_batch __lowerCAmelCase = boxes_batch return data
9
1
'''simple docstring''' import unittest import numpy as np import torch from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad class a__ ( unittest.TestCase ): """simple docstring""" def _snake_case (self ): __lowerCAmelCase = 10 def _snake_case (self ): __lowerCAmelCase = [1, 2, 3, 4] __lowerCAmelCase = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0] self.assertEqual(truncate_or_pad(__lowercase , self.block_size , 0 ) , __lowercase ) def _snake_case (self ): __lowerCAmelCase = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] __lowerCAmelCase = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] self.assertEqual(truncate_or_pad(__lowercase , self.block_size , 0 ) , __lowercase ) def _snake_case (self ): __lowerCAmelCase = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13] __lowerCAmelCase = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] self.assertEqual(truncate_or_pad(__lowercase , self.block_size , 0 ) , __lowercase ) def _snake_case (self ): __lowerCAmelCase = '''It was the year of Our Lord one thousand seven hundred and seventy-five.\n\nSpiritual revelations were conceded to England at that favoured period, as at this.''' __lowerCAmelCase , __lowerCAmelCase = process_story(__lowercase ) self.assertEqual(__lowercase , [] ) def _snake_case (self ): __lowerCAmelCase = '''''' __lowerCAmelCase , __lowerCAmelCase = process_story(__lowercase ) self.assertEqual(__lowercase , [] ) self.assertEqual(__lowercase , [] ) def _snake_case (self ): __lowerCAmelCase = ( '''It was the year of Our Lord one thousand seven hundred and ''' '''seventy-five\n\nSpiritual revelations were conceded to England ''' '''at that favoured period, as at this.\n@highlight\n\nIt was the best of times''' ) __lowerCAmelCase , __lowerCAmelCase = process_story(__lowercase ) __lowerCAmelCase = [ '''It was the year of Our Lord one thousand seven hundred and seventy-five.''', '''Spiritual revelations were conceded to England at that favoured period, as at this.''', ] self.assertEqual(__lowercase , __lowercase ) __lowerCAmelCase = ['''It was the best of times.'''] self.assertEqual(__lowercase , __lowercase ) def _snake_case (self ): __lowerCAmelCase = torch.tensor([1, 2, 3, 4] ) __lowerCAmelCase = torch.tensor([1, 1, 1, 1] ) np.testing.assert_array_equal(build_mask(__lowercase , 0 ).numpy() , expected.numpy() ) def _snake_case (self ): __lowerCAmelCase = torch.tensor([1, 2, 3, 4, 23, 23, 23] ) __lowerCAmelCase = torch.tensor([1, 1, 1, 1, 0, 0, 0] ) np.testing.assert_array_equal(build_mask(__lowercase , 23 ).numpy() , expected.numpy() ) def _snake_case (self ): __lowerCAmelCase = torch.tensor([8, 2, 3, 4, 1, 1, 1] ) __lowerCAmelCase = torch.tensor([1, 1, 1, 1, 0, 0, 0] ) np.testing.assert_array_equal(build_mask(__lowercase , 1 ).numpy() , expected.numpy() ) def _snake_case (self ): __lowerCAmelCase = 1_01 __lowerCAmelCase = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 1_01, 5, 6], [1, 1_01, 3, 4, 1_01, 6]] ) __lowerCAmelCase = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]] ) __lowerCAmelCase = compute_token_type_ids(__lowercase , __lowercase ) np.testing.assert_array_equal(__lowercase , __lowercase )
9
'''simple docstring''' from ..utils import DummyObject, requires_backends class a__ ( metaclass=__A ): """simple docstring""" __UpperCamelCase : int = ['torch', 'scipy'] def __init__(self , *__lowercase , **__lowercase ): requires_backends(self , ['''torch''', '''scipy'''] ) @classmethod def _snake_case (cls , *__lowercase , **__lowercase ): requires_backends(cls , ['''torch''', '''scipy'''] ) @classmethod def _snake_case (cls , *__lowercase , **__lowercase ): requires_backends(cls , ['''torch''', '''scipy'''] )
9
1
'''simple docstring''' import io import json import fsspec import pytest from datasets import Dataset, DatasetDict, Features, NamedSplit, Value from datasets.io.json import JsonDatasetReader, JsonDatasetWriter from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases def __magic_name__( lowerCamelCase, lowerCamelCase): assert isinstance(lowerCamelCase, lowerCamelCase) assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize('''keep_in_memory''', [False, True]) def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase): __lowerCAmelCase = tmp_path / '''cache''' __lowerCAmelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): __lowerCAmelCase = JsonDatasetReader(lowerCamelCase, cache_dir=lowerCamelCase, keep_in_memory=lowerCamelCase).read() _check_json_dataset(lowerCamelCase, lowerCamelCase) @pytest.mark.parametrize( '''features''', [ None, {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}, {'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''}, {'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''}, {'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''}, ], ) def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase): __lowerCAmelCase = tmp_path / '''cache''' __lowerCAmelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} __lowerCAmelCase = features.copy() if features else default_expected_features __lowerCAmelCase = ( Features({feature: Value(lowerCamelCase) for feature, dtype in features.items()}) if features is not None else None ) __lowerCAmelCase = JsonDatasetReader(lowerCamelCase, features=lowerCamelCase, cache_dir=lowerCamelCase).read() _check_json_dataset(lowerCamelCase, lowerCamelCase) @pytest.mark.parametrize( '''features''', [ None, {'''col_3''': '''float64''', '''col_1''': '''string''', '''col_2''': '''int64'''}, ], ) def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase): __lowerCAmelCase = tmp_path / '''cache''' __lowerCAmelCase = {'''col_3''': '''float64''', '''col_1''': '''string''', '''col_2''': '''int64'''} __lowerCAmelCase = features.copy() if features else default_expected_features __lowerCAmelCase = ( Features({feature: Value(lowerCamelCase) for feature, dtype in features.items()}) if features is not None else None ) __lowerCAmelCase = JsonDatasetReader(lowerCamelCase, features=lowerCamelCase, cache_dir=lowerCamelCase).read() assert isinstance(lowerCamelCase, lowerCamelCase) assert dataset.num_rows == 2 assert dataset.num_columns == 3 assert dataset.column_names == ["col_3", "col_1", "col_2"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype def __magic_name__( lowerCamelCase, lowerCamelCase): # jsonl_312_path features are {"col_3": "float64", "col_1": "string", "col_2": "int64"} __lowerCAmelCase = {'''col_2''': '''int64''', '''col_3''': '''float64''', '''col_1''': '''string'''} __lowerCAmelCase = features.copy() __lowerCAmelCase = ( Features({feature: Value(lowerCamelCase) for feature, dtype in features.items()}) if features is not None else None ) __lowerCAmelCase = tmp_path / '''cache''' __lowerCAmelCase = JsonDatasetReader(lowerCamelCase, features=lowerCamelCase, cache_dir=lowerCamelCase).read() assert isinstance(lowerCamelCase, lowerCamelCase) assert dataset.num_rows == 2 assert dataset.num_columns == 3 assert dataset.column_names == ["col_2", "col_3", "col_1"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize('''split''', [None, NamedSplit('''train'''), '''train''', '''test''']) def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase): __lowerCAmelCase = tmp_path / '''cache''' __lowerCAmelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} __lowerCAmelCase = JsonDatasetReader(lowerCamelCase, cache_dir=lowerCamelCase, split=lowerCamelCase).read() _check_json_dataset(lowerCamelCase, lowerCamelCase) assert dataset.split == split if split else "train" @pytest.mark.parametrize('''path_type''', [str, list]) def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase): if issubclass(lowerCamelCase, lowerCamelCase): __lowerCAmelCase = jsonl_path elif issubclass(lowerCamelCase, lowerCamelCase): __lowerCAmelCase = [jsonl_path] __lowerCAmelCase = tmp_path / '''cache''' __lowerCAmelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} __lowerCAmelCase = JsonDatasetReader(lowerCamelCase, cache_dir=lowerCamelCase).read() _check_json_dataset(lowerCamelCase, lowerCamelCase) def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase=("train",)): assert isinstance(lowerCamelCase, lowerCamelCase) for split in splits: __lowerCAmelCase = dataset_dict[split] assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize('''keep_in_memory''', [False, True]) def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase): __lowerCAmelCase = tmp_path / '''cache''' __lowerCAmelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): __lowerCAmelCase = JsonDatasetReader({'''train''': jsonl_path}, cache_dir=lowerCamelCase, keep_in_memory=lowerCamelCase).read() _check_json_datasetdict(lowerCamelCase, lowerCamelCase) @pytest.mark.parametrize( '''features''', [ None, {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}, {'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''}, {'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''}, {'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''}, ], ) def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase): __lowerCAmelCase = tmp_path / '''cache''' __lowerCAmelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} __lowerCAmelCase = features.copy() if features else default_expected_features __lowerCAmelCase = ( Features({feature: Value(lowerCamelCase) for feature, dtype in features.items()}) if features is not None else None ) __lowerCAmelCase = JsonDatasetReader({'''train''': jsonl_path}, features=lowerCamelCase, cache_dir=lowerCamelCase).read() _check_json_datasetdict(lowerCamelCase, lowerCamelCase) @pytest.mark.parametrize('''split''', [None, NamedSplit('''train'''), '''train''', '''test''']) def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase): if split: __lowerCAmelCase = {split: jsonl_path} else: __lowerCAmelCase = '''train''' __lowerCAmelCase = {'''train''': jsonl_path, '''test''': jsonl_path} __lowerCAmelCase = tmp_path / '''cache''' __lowerCAmelCase = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} __lowerCAmelCase = JsonDatasetReader(lowerCamelCase, cache_dir=lowerCamelCase).read() _check_json_datasetdict(lowerCamelCase, lowerCamelCase, splits=list(path.keys())) assert all(dataset[split].split == split for split in path.keys()) def __magic_name__( lowerCamelCase): return json.load(lowerCamelCase) def __magic_name__( lowerCamelCase): return [json.loads(lowerCamelCase) for line in buffer] class a__ : """simple docstring""" @pytest.mark.parametrize('''lines, load_json_function''' , [(True, load_json_lines), (False, load_json)] ) def _snake_case (self , __lowercase , __lowercase , __lowercase ): with io.BytesIO() as buffer: JsonDatasetWriter(__lowercase , __lowercase , lines=__lowercase ).write() buffer.seek(0 ) __lowerCAmelCase = load_json_function(__lowercase ) assert isinstance(__lowercase , __lowercase ) assert isinstance(exported_content[0] , __lowercase ) assert len(__lowercase ) == 10 @pytest.mark.parametrize( '''orient, container, keys, len_at''' , [ ('''records''', list, {'''tokens''', '''labels''', '''answers''', '''id'''}, None), ('''split''', dict, {'''columns''', '''data'''}, '''data'''), ('''index''', dict, set('''0123456789''' ), None), ('''columns''', dict, {'''tokens''', '''labels''', '''answers''', '''id'''}, '''tokens'''), ('''values''', list, None, None), ('''table''', dict, {'''schema''', '''data'''}, '''data'''), ] , ) def _snake_case (self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ): with io.BytesIO() as buffer: JsonDatasetWriter(__lowercase , __lowercase , lines=__lowercase , orient=__lowercase ).write() buffer.seek(0 ) __lowerCAmelCase = load_json(__lowercase ) assert isinstance(__lowercase , __lowercase ) if keys: if container is dict: assert exported_content.keys() == keys else: assert exported_content[0].keys() == keys else: assert not hasattr(__lowercase , '''keys''' ) and not hasattr(exported_content[0] , '''keys''' ) if len_at: assert len(exported_content[len_at] ) == 10 else: assert len(__lowercase ) == 10 @pytest.mark.parametrize('''lines, load_json_function''' , [(True, load_json_lines), (False, load_json)] ) def _snake_case (self , __lowercase , __lowercase , __lowercase ): with io.BytesIO() as buffer: JsonDatasetWriter(__lowercase , __lowercase , lines=__lowercase , num_proc=2 ).write() buffer.seek(0 ) __lowerCAmelCase = load_json_function(__lowercase ) assert isinstance(__lowercase , __lowercase ) assert isinstance(exported_content[0] , __lowercase ) assert len(__lowercase ) == 10 @pytest.mark.parametrize( '''orient, container, keys, len_at''' , [ ('''records''', list, {'''tokens''', '''labels''', '''answers''', '''id'''}, None), ('''split''', dict, {'''columns''', '''data'''}, '''data'''), ('''index''', dict, set('''0123456789''' ), None), ('''columns''', dict, {'''tokens''', '''labels''', '''answers''', '''id'''}, '''tokens'''), ('''values''', list, None, None), ('''table''', dict, {'''schema''', '''data'''}, '''data'''), ] , ) def _snake_case (self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ): with io.BytesIO() as buffer: JsonDatasetWriter(__lowercase , __lowercase , lines=__lowercase , orient=__lowercase , num_proc=2 ).write() buffer.seek(0 ) __lowerCAmelCase = load_json(__lowercase ) assert isinstance(__lowercase , __lowercase ) if keys: if container is dict: assert exported_content.keys() == keys else: assert exported_content[0].keys() == keys else: assert not hasattr(__lowercase , '''keys''' ) and not hasattr(exported_content[0] , '''keys''' ) if len_at: assert len(exported_content[len_at] ) == 10 else: assert len(__lowercase ) == 10 def _snake_case (self , __lowercase ): with pytest.raises(__lowercase ): with io.BytesIO() as buffer: JsonDatasetWriter(__lowercase , __lowercase , num_proc=0 ) @pytest.mark.parametrize('''compression, extension''' , [('''gzip''', '''gz'''), ('''bz2''', '''bz2'''), ('''xz''', '''xz''')] ) def _snake_case (self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ): __lowerCAmelCase = tmp_path_factory.mktemp('''data''' ) / F"""test.json.{extension}""" __lowerCAmelCase = str(shared_datadir / F"""test_file.json.{extension}""" ) JsonDatasetWriter(__lowercase , __lowercase , compression=__lowercase ).write() with fsspec.open(__lowercase , '''rb''' , compression='''infer''' ) as f: __lowerCAmelCase = f.read() with fsspec.open(__lowercase , '''rb''' , compression='''infer''' ) as f: __lowerCAmelCase = f.read() assert exported_content == original_content
9
'''simple docstring''' import unittest from typing import Dict, List, Optional, Union import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import BridgeTowerImageProcessor class a__ ( unittest.TestCase ): """simple docstring""" def __init__(self , __lowercase , __lowercase = True , __lowercase = None , __lowercase = 32 , __lowercase = True , __lowercase = 1 / 2_55 , __lowercase = True , __lowercase = True , __lowercase = [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3] , __lowercase = [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1] , __lowercase = True , __lowercase=7 , __lowercase=30 , __lowercase=4_00 , __lowercase=3 , ): __lowerCAmelCase = parent __lowerCAmelCase = do_resize __lowerCAmelCase = size if size is not None else {'''shortest_edge''': 2_88} __lowerCAmelCase = size_divisor __lowerCAmelCase = do_rescale __lowerCAmelCase = rescale_factor __lowerCAmelCase = do_normalize __lowerCAmelCase = do_center_crop __lowerCAmelCase = image_mean __lowerCAmelCase = image_std __lowerCAmelCase = do_pad __lowerCAmelCase = batch_size __lowerCAmelCase = num_channels __lowerCAmelCase = min_resolution __lowerCAmelCase = max_resolution def _snake_case (self ): return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "size": self.size, "size_divisor": self.size_divisor, } def _snake_case (self , __lowercase , __lowercase=False ): if not batched: __lowerCAmelCase = self.size['''shortest_edge'''] __lowerCAmelCase = image_inputs[0] if isinstance(__lowercase , Image.Image ): __lowerCAmelCase , __lowerCAmelCase = image.size else: __lowerCAmelCase , __lowerCAmelCase = image.shape[1], image.shape[2] __lowerCAmelCase = size / min(__lowercase , __lowercase ) if h < w: __lowerCAmelCase , __lowerCAmelCase = size, scale * w else: __lowerCAmelCase , __lowerCAmelCase = scale * h, size __lowerCAmelCase = int((13_33 / 8_00) * size ) if max(__lowercase , __lowercase ) > max_size: __lowerCAmelCase = max_size / max(__lowercase , __lowercase ) __lowerCAmelCase = newh * scale __lowerCAmelCase = neww * scale __lowerCAmelCase , __lowerCAmelCase = int(newh + 0.5 ), int(neww + 0.5 ) __lowerCAmelCase , __lowerCAmelCase = ( newh // self.size_divisor * self.size_divisor, neww // self.size_divisor * self.size_divisor, ) else: __lowerCAmelCase = [] for image in image_inputs: __lowerCAmelCase , __lowerCAmelCase = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) __lowerCAmelCase = max(__lowercase , key=lambda __lowercase : item[0] )[0] __lowerCAmelCase = max(__lowercase , key=lambda __lowercase : item[1] )[1] return expected_height, expected_width @require_torch @require_vision class a__ ( __A , unittest.TestCase ): """simple docstring""" __UpperCamelCase : Any = BridgeTowerImageProcessor if is_vision_available() else None def _snake_case (self ): __lowerCAmelCase = BridgeTowerImageProcessingTester(self ) @property def _snake_case (self ): return self.image_processor_tester.prepare_image_processor_dict() def _snake_case (self ): __lowerCAmelCase = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(__lowercase , '''image_mean''' ) ) self.assertTrue(hasattr(__lowercase , '''image_std''' ) ) self.assertTrue(hasattr(__lowercase , '''do_normalize''' ) ) self.assertTrue(hasattr(__lowercase , '''do_resize''' ) ) self.assertTrue(hasattr(__lowercase , '''size''' ) ) self.assertTrue(hasattr(__lowercase , '''size_divisor''' ) ) def _snake_case (self ): pass def _snake_case (self ): # Initialize image processor __lowerCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random PIL images __lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowercase ) for image in image_inputs: self.assertIsInstance(__lowercase , Image.Image ) # Test not batched input __lowerCAmelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values __lowerCAmelCase , __lowerCAmelCase = self.image_processor_tester.get_expected_values(__lowercase ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched __lowerCAmelCase = image_processing(__lowercase , return_tensors='''pt''' ).pixel_values __lowerCAmelCase , __lowerCAmelCase = self.image_processor_tester.get_expected_values(__lowercase , batched=__lowercase ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def _snake_case (self ): # Initialize image processor __lowerCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors __lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowercase , numpify=__lowercase ) for image in image_inputs: self.assertIsInstance(__lowercase , np.ndarray ) # Test not batched input __lowerCAmelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values __lowerCAmelCase , __lowerCAmelCase = self.image_processor_tester.get_expected_values(__lowercase ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched __lowerCAmelCase = image_processing(__lowercase , return_tensors='''pt''' ).pixel_values __lowerCAmelCase , __lowerCAmelCase = self.image_processor_tester.get_expected_values(__lowercase , batched=__lowercase ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def _snake_case (self ): # Initialize image processor __lowerCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors __lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowercase , torchify=__lowercase ) for image in image_inputs: self.assertIsInstance(__lowercase , torch.Tensor ) # Test not batched input __lowerCAmelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values __lowerCAmelCase , __lowerCAmelCase = self.image_processor_tester.get_expected_values(__lowercase ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched __lowerCAmelCase = image_processing(__lowercase , return_tensors='''pt''' ).pixel_values __lowerCAmelCase , __lowerCAmelCase = self.image_processor_tester.get_expected_values(__lowercase , batched=__lowercase ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , )
9
1
'''simple docstring''' # Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available _UpperCAmelCase : Dict = {"""configuration_mra""": ["""MRA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MraConfig"""]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCAmelCase : Any = [ """MRA_PRETRAINED_MODEL_ARCHIVE_LIST""", """MraForMaskedLM""", """MraForMultipleChoice""", """MraForQuestionAnswering""", """MraForSequenceClassification""", """MraForTokenClassification""", """MraLayer""", """MraModel""", """MraPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mra import ( MRA_PRETRAINED_MODEL_ARCHIVE_LIST, MraForMaskedLM, MraForMultipleChoice, MraForQuestionAnswering, MraForSequenceClassification, MraForTokenClassification, MraLayer, MraModel, MraPreTrainedModel, ) else: import sys _UpperCAmelCase : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
9
'''simple docstring''' # Imports import numpy as np class a__ : """simple docstring""" def __init__(self , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase=None ): self.set_matricies(red=__lowercase , green=__lowercase , blue=__lowercase , red_edge=__lowercase , nir=__lowercase ) def _snake_case (self , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase=None ): if red is not None: __lowerCAmelCase = red if green is not None: __lowerCAmelCase = green if blue is not None: __lowerCAmelCase = blue if red_edge is not None: __lowerCAmelCase = red_edge if nir is not None: __lowerCAmelCase = nir return True def _snake_case (self , __lowercase="" , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase=None ): self.set_matricies(red=__lowercase , green=__lowercase , blue=__lowercase , red_edge=__lowercase , nir=__lowercase ) __lowerCAmelCase = { '''ARVI2''': self.arvaa, '''CCCI''': self.ccci, '''CVI''': self.cvi, '''GLI''': self.gli, '''NDVI''': self.ndvi, '''BNDVI''': self.bndvi, '''redEdgeNDVI''': self.red_edge_ndvi, '''GNDVI''': self.gndvi, '''GBNDVI''': self.gbndvi, '''GRNDVI''': self.grndvi, '''RBNDVI''': self.rbndvi, '''PNDVI''': self.pndvi, '''ATSAVI''': self.atsavi, '''BWDRVI''': self.bwdrvi, '''CIgreen''': self.ci_green, '''CIrededge''': self.ci_rededge, '''CI''': self.ci, '''CTVI''': self.ctvi, '''GDVI''': self.gdvi, '''EVI''': self.evi, '''GEMI''': self.gemi, '''GOSAVI''': self.gosavi, '''GSAVI''': self.gsavi, '''Hue''': self.hue, '''IVI''': self.ivi, '''IPVI''': self.ipvi, '''I''': self.i, '''RVI''': self.rvi, '''MRVI''': self.mrvi, '''MSAVI''': self.m_savi, '''NormG''': self.norm_g, '''NormNIR''': self.norm_nir, '''NormR''': self.norm_r, '''NGRDI''': self.ngrdi, '''RI''': self.ri, '''S''': self.s, '''IF''': self._if, '''DVI''': self.dvi, '''TVI''': self.tvi, '''NDRE''': self.ndre, } try: return funcs[index]() except KeyError: print('''Index not in the list!''' ) return False def _snake_case (self ): return -0.1_8 + (1.1_7 * ((self.nir - self.red) / (self.nir + self.red))) def _snake_case (self ): return ((self.nir - self.redEdge) / (self.nir + self.redEdge)) / ( (self.nir - self.red) / (self.nir + self.red) ) def _snake_case (self ): return self.nir * (self.red / (self.green**2)) def _snake_case (self ): return (2 * self.green - self.red - self.blue) / ( 2 * self.green + self.red + self.blue ) def _snake_case (self ): return (self.nir - self.red) / (self.nir + self.red) def _snake_case (self ): return (self.nir - self.blue) / (self.nir + self.blue) def _snake_case (self ): return (self.redEdge - self.red) / (self.redEdge + self.red) def _snake_case (self ): return (self.nir - self.green) / (self.nir + self.green) def _snake_case (self ): return (self.nir - (self.green + self.blue)) / ( self.nir + (self.green + self.blue) ) def _snake_case (self ): return (self.nir - (self.green + self.red)) / ( self.nir + (self.green + self.red) ) def _snake_case (self ): return (self.nir - (self.blue + self.red)) / (self.nir + (self.blue + self.red)) def _snake_case (self ): return (self.nir - (self.green + self.red + self.blue)) / ( self.nir + (self.green + self.red + self.blue) ) def _snake_case (self , __lowercase=0.0_8 , __lowercase=1.2_2 , __lowercase=0.0_3 ): return a * ( (self.nir - a * self.red - b) / (a * self.nir + self.red - a * b + x * (1 + a**2)) ) def _snake_case (self ): return (0.1 * self.nir - self.blue) / (0.1 * self.nir + self.blue) def _snake_case (self ): return (self.nir / self.green) - 1 def _snake_case (self ): return (self.nir / self.redEdge) - 1 def _snake_case (self ): return (self.red - self.blue) / self.red def _snake_case (self ): __lowerCAmelCase = self.ndvi() return ((ndvi + 0.5) / (abs(ndvi + 0.5 ))) * (abs(ndvi + 0.5 ) ** (1 / 2)) def _snake_case (self ): return self.nir - self.green def _snake_case (self ): return 2.5 * ( (self.nir - self.red) / (self.nir + 6 * self.red - 7.5 * self.blue + 1) ) def _snake_case (self ): __lowerCAmelCase = (2 * (self.nir**2 - self.red**2) + 1.5 * self.nir + 0.5 * self.red) / ( self.nir + self.red + 0.5 ) return n * (1 - 0.2_5 * n) - (self.red - 0.1_2_5) / (1 - self.red) def _snake_case (self , __lowercase=0.1_6 ): return (self.nir - self.green) / (self.nir + self.green + y) def _snake_case (self , __lowercase=0.5 ): return ((self.nir - self.green) / (self.nir + self.green + n)) * (1 + n) def _snake_case (self ): return np.arctan( ((2 * self.red - self.green - self.blue) / 3_0.5) * (self.green - self.blue) ) def _snake_case (self , __lowercase=None , __lowercase=None ): return (self.nir - b) / (a * self.red) def _snake_case (self ): return (self.nir / ((self.nir + self.red) / 2)) * (self.ndvi() + 1) def _snake_case (self ): return (self.red + self.green + self.blue) / 3_0.5 def _snake_case (self ): return self.nir / self.red def _snake_case (self ): return (self.rvi() - 1) / (self.rvi() + 1) def _snake_case (self ): return ( (2 * self.nir + 1) - ((2 * self.nir + 1) ** 2 - 8 * (self.nir - self.red)) ** (1 / 2) ) / 2 def _snake_case (self ): return self.green / (self.nir + self.red + self.green) def _snake_case (self ): return self.nir / (self.nir + self.red + self.green) def _snake_case (self ): return self.red / (self.nir + self.red + self.green) def _snake_case (self ): return (self.green - self.red) / (self.green + self.red) def _snake_case (self ): return (self.red - self.green) / (self.red + self.green) def _snake_case (self ): __lowerCAmelCase = np.max([np.max(self.red ), np.max(self.green ), np.max(self.blue )] ) __lowerCAmelCase = np.min([np.min(self.red ), np.min(self.green ), np.min(self.blue )] ) return (max_value - min_value) / max_value def _snake_case (self ): return (2 * self.red - self.green - self.blue) / (self.green - self.blue) def _snake_case (self ): return self.nir / self.red def _snake_case (self ): return (self.ndvi() + 0.5) ** (1 / 2) def _snake_case (self ): return (self.nir - self.redEdge) / (self.nir + self.redEdge)
9
1
'''simple docstring''' from operator import delitem, getitem, setitem import pytest from data_structures.hashing.hash_map import HashMap def __magic_name__( lowerCamelCase): return getitem, k def __magic_name__( lowerCamelCase, lowerCamelCase): return setitem, k, v def __magic_name__( lowerCamelCase): return delitem, k def __magic_name__( lowerCamelCase, lowerCamelCase, *lowerCamelCase): try: return fun(lowerCamelCase, *lowerCamelCase), None except Exception as e: return None, e _UpperCAmelCase : Dict = ( _set("""key_a""", """val_a"""), _set("""key_b""", """val_b"""), ) _UpperCAmelCase : List[Any] = [ _set("""key_a""", """val_a"""), _set("""key_a""", """val_b"""), ] _UpperCAmelCase : Union[str, Any] = [ _set("""key_a""", """val_a"""), _set("""key_b""", """val_b"""), _del("""key_a"""), _del("""key_b"""), _set("""key_a""", """val_a"""), _del("""key_a"""), ] _UpperCAmelCase : Tuple = [ _get("""key_a"""), _del("""key_a"""), _set("""key_a""", """val_a"""), _del("""key_a"""), _del("""key_a"""), _get("""key_a"""), ] _UpperCAmelCase : Any = [ *[_set(x, x) for x in range(5)], # guaranteed upsize ] _UpperCAmelCase : Optional[Any] = [ *[_set(x, x) for x in range(5)], # guaranteed upsize *[_del(x) for x in range(5)], _set("""key_a""", """val_b"""), ] @pytest.mark.parametrize( '''operations''', ( pytest.param(_add_items, id='''add items'''), pytest.param(_overwrite_items, id='''overwrite items'''), pytest.param(_delete_items, id='''delete items'''), pytest.param(_access_absent_items, id='''access absent items'''), pytest.param(_add_with_resize_up, id='''add with resize up'''), pytest.param(_add_with_resize_down, id='''add with resize down'''), ), ) def __magic_name__( lowerCamelCase): __lowerCAmelCase = HashMap(initial_block_size=4) __lowerCAmelCase = {} for _, (fun, *args) in enumerate(lowerCamelCase): __lowerCAmelCase , __lowerCAmelCase = _run_operation(lowerCamelCase, lowerCamelCase, *lowerCamelCase) __lowerCAmelCase , __lowerCAmelCase = _run_operation(lowerCamelCase, lowerCamelCase, *lowerCamelCase) assert my_res == py_res assert str(lowerCamelCase) == str(lowerCamelCase) assert set(lowerCamelCase) == set(lowerCamelCase) assert len(lowerCamelCase) == len(lowerCamelCase) assert set(my.items()) == set(py.items()) def __magic_name__( ): def is_public(lowerCamelCase) -> bool: return not name.startswith('''_''') __lowerCAmelCase = {name for name in dir({}) if is_public(lowerCamelCase)} __lowerCAmelCase = {name for name in dir(HashMap()) if is_public(lowerCamelCase)} assert dict_public_names > hash_public_names
9
'''simple docstring''' from math import sqrt def __magic_name__( lowerCamelCase): assert isinstance(lowerCamelCase, lowerCamelCase) and ( number >= 0 ), "'number' must been an int and positive" __lowerCAmelCase = True # 0 and 1 are none primes. if number <= 1: __lowerCAmelCase = False for divisor in range(2, int(round(sqrt(lowerCamelCase))) + 1): # if 'number' divisible by 'divisor' then sets 'status' # of false and break up the loop. if number % divisor == 0: __lowerCAmelCase = False break # precondition assert isinstance(lowerCamelCase, lowerCamelCase), "'status' must been from type bool" return status def __magic_name__( lowerCamelCase): assert isinstance(lowerCamelCase, lowerCamelCase) and (n > 2), "'N' must been an int and > 2" # beginList: contains all natural numbers from 2 up to N __lowerCAmelCase = list(range(2, n + 1)) __lowerCAmelCase = [] # this list will be returns. # actual sieve of erathostenes for i in range(len(lowerCamelCase)): for j in range(i + 1, len(lowerCamelCase)): if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0): __lowerCAmelCase = 0 # filters actual prime numbers. __lowerCAmelCase = [x for x in begin_list if x != 0] # precondition assert isinstance(lowerCamelCase, lowerCamelCase), "'ans' must been from type list" return ans def __magic_name__( lowerCamelCase): assert isinstance(lowerCamelCase, lowerCamelCase) and (n > 2), "'N' must been an int and > 2" __lowerCAmelCase = [] # iterates over all numbers between 2 up to N+1 # if a number is prime then appends to list 'ans' for number in range(2, n + 1): if is_prime(lowerCamelCase): ans.append(lowerCamelCase) # precondition assert isinstance(lowerCamelCase, lowerCamelCase), "'ans' must been from type list" return ans def __magic_name__( lowerCamelCase): assert isinstance(lowerCamelCase, lowerCamelCase) and number >= 0, "'number' must been an int and >= 0" __lowerCAmelCase = [] # this list will be returns of the function. # potential prime number factors. __lowerCAmelCase = 2 __lowerCAmelCase = number if number == 0 or number == 1: ans.append(lowerCamelCase) # if 'number' not prime then builds the prime factorization of 'number' elif not is_prime(lowerCamelCase): while quotient != 1: if is_prime(lowerCamelCase) and (quotient % factor == 0): ans.append(lowerCamelCase) quotient /= factor else: factor += 1 else: ans.append(lowerCamelCase) # precondition assert isinstance(lowerCamelCase, lowerCamelCase), "'ans' must been from type list" return ans def __magic_name__( lowerCamelCase): assert isinstance(lowerCamelCase, lowerCamelCase) and ( number >= 0 ), "'number' bust been an int and >= 0" __lowerCAmelCase = 0 # prime factorization of 'number' __lowerCAmelCase = prime_factorization(lowerCamelCase) __lowerCAmelCase = max(lowerCamelCase) # precondition assert isinstance(lowerCamelCase, lowerCamelCase), "'ans' must been from type int" return ans def __magic_name__( lowerCamelCase): assert isinstance(lowerCamelCase, lowerCamelCase) and ( number >= 0 ), "'number' bust been an int and >= 0" __lowerCAmelCase = 0 # prime factorization of 'number' __lowerCAmelCase = prime_factorization(lowerCamelCase) __lowerCAmelCase = min(lowerCamelCase) # precondition assert isinstance(lowerCamelCase, lowerCamelCase), "'ans' must been from type int" return ans def __magic_name__( lowerCamelCase): assert isinstance(lowerCamelCase, lowerCamelCase), "'number' must been an int" assert isinstance(number % 2 == 0, lowerCamelCase), "compare bust been from type bool" return number % 2 == 0 def __magic_name__( lowerCamelCase): assert isinstance(lowerCamelCase, lowerCamelCase), "'number' must been an int" assert isinstance(number % 2 != 0, lowerCamelCase), "compare bust been from type bool" return number % 2 != 0 def __magic_name__( lowerCamelCase): assert ( isinstance(lowerCamelCase, lowerCamelCase) and (number > 2) and is_even(lowerCamelCase) ), "'number' must been an int, even and > 2" __lowerCAmelCase = [] # this list will returned # creates a list of prime numbers between 2 up to 'number' __lowerCAmelCase = get_prime_numbers(lowerCamelCase) __lowerCAmelCase = len(lowerCamelCase) # run variable for while-loops. __lowerCAmelCase = 0 __lowerCAmelCase = None # exit variable. for break up the loops __lowerCAmelCase = True while i < len_pn and loop: __lowerCAmelCase = i + 1 while j < len_pn and loop: if prime_numbers[i] + prime_numbers[j] == number: __lowerCAmelCase = False ans.append(prime_numbers[i]) ans.append(prime_numbers[j]) j += 1 i += 1 # precondition assert ( isinstance(lowerCamelCase, lowerCamelCase) and (len(lowerCamelCase) == 2) and (ans[0] + ans[1] == number) and is_prime(ans[0]) and is_prime(ans[1]) ), "'ans' must contains two primes. And sum of elements must been eq 'number'" return ans def __magic_name__( lowerCamelCase, lowerCamelCase): assert ( isinstance(lowerCamelCase, lowerCamelCase) and isinstance(lowerCamelCase, lowerCamelCase) and (numbera >= 0) and (numbera >= 0) ), "'number1' and 'number2' must been positive integer." __lowerCAmelCase = 0 while numbera != 0: __lowerCAmelCase = numbera % numbera __lowerCAmelCase = numbera __lowerCAmelCase = rest # precondition assert isinstance(lowerCamelCase, lowerCamelCase) and ( numbera >= 0 ), "'number' must been from type int and positive" return numbera def __magic_name__( lowerCamelCase, lowerCamelCase): assert ( isinstance(lowerCamelCase, lowerCamelCase) and isinstance(lowerCamelCase, lowerCamelCase) and (numbera >= 1) and (numbera >= 1) ), "'number1' and 'number2' must been positive integer." __lowerCAmelCase = 1 # actual answer that will be return. # for kgV (x,1) if numbera > 1 and numbera > 1: # builds the prime factorization of 'number1' and 'number2' __lowerCAmelCase = prime_factorization(lowerCamelCase) __lowerCAmelCase = prime_factorization(lowerCamelCase) elif numbera == 1 or numbera == 1: __lowerCAmelCase = [] __lowerCAmelCase = [] __lowerCAmelCase = max(lowerCamelCase, lowerCamelCase) __lowerCAmelCase = 0 __lowerCAmelCase = 0 __lowerCAmelCase = [] # captured numbers int both 'primeFac1' and 'primeFac2' # iterates through primeFac1 for n in prime_fac_a: if n not in done: if n in prime_fac_a: __lowerCAmelCase = prime_fac_a.count(lowerCamelCase) __lowerCAmelCase = prime_fac_a.count(lowerCamelCase) for _ in range(max(lowerCamelCase, lowerCamelCase)): ans *= n else: __lowerCAmelCase = prime_fac_a.count(lowerCamelCase) for _ in range(lowerCamelCase): ans *= n done.append(lowerCamelCase) # iterates through primeFac2 for n in prime_fac_a: if n not in done: __lowerCAmelCase = prime_fac_a.count(lowerCamelCase) for _ in range(lowerCamelCase): ans *= n done.append(lowerCamelCase) # precondition assert isinstance(lowerCamelCase, lowerCamelCase) and ( ans >= 0 ), "'ans' must been from type int and positive" return ans def __magic_name__( lowerCamelCase): assert isinstance(lowerCamelCase, lowerCamelCase) and (n >= 0), "'number' must been a positive int" __lowerCAmelCase = 0 __lowerCAmelCase = 2 # this variable holds the answer while index < n: index += 1 ans += 1 # counts to the next number # if ans not prime then # runs to the next prime number. while not is_prime(lowerCamelCase): ans += 1 # precondition assert isinstance(lowerCamelCase, lowerCamelCase) and is_prime( lowerCamelCase), "'ans' must been a prime number and from type int" return ans def __magic_name__( lowerCamelCase, lowerCamelCase): assert ( is_prime(lowerCamelCase) and is_prime(lowerCamelCase) and (p_number_a < p_number_a) ), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'" __lowerCAmelCase = p_number_a + 1 # jump to the next number __lowerCAmelCase = [] # this list will be returns. # if number is not prime then # fetch the next prime number. while not is_prime(lowerCamelCase): number += 1 while number < p_number_a: ans.append(lowerCamelCase) number += 1 # fetch the next prime number. while not is_prime(lowerCamelCase): number += 1 # precondition assert ( isinstance(lowerCamelCase, lowerCamelCase) and ans[0] != p_number_a and ans[len(lowerCamelCase) - 1] != p_number_a ), "'ans' must been a list without the arguments" # 'ans' contains not 'pNumber1' and 'pNumber2' ! return ans def __magic_name__( lowerCamelCase): assert isinstance(lowerCamelCase, lowerCamelCase) and (n >= 1), "'n' must been int and >= 1" __lowerCAmelCase = [] # will be returned. for divisor in range(1, n + 1): if n % divisor == 0: ans.append(lowerCamelCase) # precondition assert ans[0] == 1 and ans[len(lowerCamelCase) - 1] == n, "Error in function getDivisiors(...)" return ans def __magic_name__( lowerCamelCase): assert isinstance(lowerCamelCase, lowerCamelCase) and ( number > 1 ), "'number' must been an int and >= 1" __lowerCAmelCase = get_divisors(lowerCamelCase) # precondition assert ( isinstance(lowerCamelCase, lowerCamelCase) and (divisors[0] == 1) and (divisors[len(lowerCamelCase) - 1] == number) ), "Error in help-function getDivisiors(...)" # summed all divisors up to 'number' (exclusive), hence [:-1] return sum(divisors[:-1]) == number def __magic_name__( lowerCamelCase, lowerCamelCase): assert ( isinstance(lowerCamelCase, lowerCamelCase) and isinstance(lowerCamelCase, lowerCamelCase) and (denominator != 0) ), "The arguments must been from type int and 'denominator' != 0" # build the greatest common divisor of numerator and denominator. __lowerCAmelCase = gcd(abs(lowerCamelCase), abs(lowerCamelCase)) # precondition assert ( isinstance(lowerCamelCase, lowerCamelCase) and (numerator % gcd_of_fraction == 0) and (denominator % gcd_of_fraction == 0) ), "Error in function gcd(...,...)" return (numerator // gcd_of_fraction, denominator // gcd_of_fraction) def __magic_name__( lowerCamelCase): assert isinstance(lowerCamelCase, lowerCamelCase) and (n >= 0), "'n' must been a int and >= 0" __lowerCAmelCase = 1 # this will be return. for factor in range(1, n + 1): ans *= factor return ans def __magic_name__( lowerCamelCase): assert isinstance(lowerCamelCase, lowerCamelCase) and (n >= 0), "'n' must been an int and >= 0" __lowerCAmelCase = 0 __lowerCAmelCase = 1 __lowerCAmelCase = 1 # this will be return for _ in range(n - 1): __lowerCAmelCase = ans ans += fiba __lowerCAmelCase = tmp return ans
9
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _UpperCAmelCase : Union[str, Any] = {"""configuration_vit_msn""": ["""VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ViTMSNConfig"""]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCAmelCase : Any = [ """VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST""", """ViTMSNModel""", """ViTMSNForImageClassification""", """ViTMSNPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vit_msn import ( VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST, ViTMSNForImageClassification, ViTMSNModel, ViTMSNPreTrainedModel, ) else: import sys _UpperCAmelCase : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
9
'''simple docstring''' import math import os from copy import deepcopy import datasets import evaluate import torch import transformers from datasets import load_dataset from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer from accelerate import Accelerator from accelerate.test_utils import RegressionDataset, RegressionModel from accelerate.utils import is_tpu_available, set_seed _UpperCAmelCase : Dict = """true""" def __magic_name__( lowerCamelCase, lowerCamelCase=8_2, lowerCamelCase=1_6): set_seed(4_2) __lowerCAmelCase = RegressionModel() __lowerCAmelCase = deepcopy(lowerCamelCase) __lowerCAmelCase = RegressionDataset(length=lowerCamelCase) __lowerCAmelCase = DataLoader(lowerCamelCase, batch_size=lowerCamelCase) model.to(accelerator.device) __lowerCAmelCase , __lowerCAmelCase = accelerator.prepare(lowerCamelCase, lowerCamelCase) return model, ddp_model, dataloader def __magic_name__( lowerCamelCase, lowerCamelCase=False): __lowerCAmelCase = AutoTokenizer.from_pretrained('''hf-internal-testing/mrpc-bert-base-cased''') __lowerCAmelCase = load_dataset('''glue''', '''mrpc''', split='''validation''') def tokenize_function(lowerCamelCase): __lowerCAmelCase = tokenizer(examples['''sentence1'''], examples['''sentence2'''], truncation=lowerCamelCase, max_length=lowerCamelCase) return outputs with accelerator.main_process_first(): __lowerCAmelCase = dataset.map( lowerCamelCase, batched=lowerCamelCase, remove_columns=['''idx''', '''sentence1''', '''sentence2'''], ) __lowerCAmelCase = tokenized_datasets.rename_column('''label''', '''labels''') def collate_fn(lowerCamelCase): if use_longest: return tokenizer.pad(lowerCamelCase, padding='''longest''', return_tensors='''pt''') return tokenizer.pad(lowerCamelCase, padding='''max_length''', max_length=1_2_8, return_tensors='''pt''') return DataLoader(lowerCamelCase, shuffle=lowerCamelCase, collate_fn=lowerCamelCase, batch_size=1_6) def __magic_name__( lowerCamelCase, lowerCamelCase): __lowerCAmelCase = Accelerator(dispatch_batches=lowerCamelCase, split_batches=lowerCamelCase) __lowerCAmelCase = get_dataloader(lowerCamelCase, not dispatch_batches) __lowerCAmelCase = AutoModelForSequenceClassification.from_pretrained( '''hf-internal-testing/mrpc-bert-base-cased''', return_dict=lowerCamelCase) __lowerCAmelCase , __lowerCAmelCase = accelerator.prepare(lowerCamelCase, lowerCamelCase) return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase): __lowerCAmelCase = [] for batch in dataloader: __lowerCAmelCase , __lowerCAmelCase = batch.values() with torch.no_grad(): __lowerCAmelCase = model(lowerCamelCase) __lowerCAmelCase , __lowerCAmelCase = accelerator.gather_for_metrics((logit, target)) logits_and_targets.append((logit, target)) __lowerCAmelCase , __lowerCAmelCase = [], [] for logit, targ in logits_and_targets: logits.append(lowerCamelCase) targs.append(lowerCamelCase) __lowerCAmelCase , __lowerCAmelCase = torch.cat(lowerCamelCase), torch.cat(lowerCamelCase) return logits, targs def __magic_name__( lowerCamelCase, lowerCamelCase=8_2, lowerCamelCase=False, lowerCamelCase=False, lowerCamelCase=1_6): __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = get_basic_setup(lowerCamelCase, lowerCamelCase, lowerCamelCase) __lowerCAmelCase , __lowerCAmelCase = generate_predictions(lowerCamelCase, lowerCamelCase, lowerCamelCase) assert ( len(lowerCamelCase) == num_samples ), F"""Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(lowerCamelCase)}""" def __magic_name__( lowerCamelCase = False, lowerCamelCase = False): __lowerCAmelCase = evaluate.load('''glue''', '''mrpc''') __lowerCAmelCase , __lowerCAmelCase = get_mrpc_setup(lowerCamelCase, lowerCamelCase) # First do baseline __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = setup['''no'''] model.to(lowerCamelCase) model.eval() for batch in dataloader: batch.to(lowerCamelCase) with torch.inference_mode(): __lowerCAmelCase = model(**lowerCamelCase) __lowerCAmelCase = outputs.logits.argmax(dim=-1) metric.add_batch(predictions=lowerCamelCase, references=batch['''labels''']) __lowerCAmelCase = metric.compute() # Then do distributed __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = setup['''ddp'''] model.eval() for batch in dataloader: with torch.inference_mode(): __lowerCAmelCase = model(**lowerCamelCase) __lowerCAmelCase = outputs.logits.argmax(dim=-1) __lowerCAmelCase = batch['''labels'''] __lowerCAmelCase , __lowerCAmelCase = accelerator.gather_for_metrics((preds, references)) metric.add_batch(predictions=lowerCamelCase, references=lowerCamelCase) __lowerCAmelCase = metric.compute() for key in "accuracy f1".split(): assert math.isclose( baseline[key], distributed[key]), F"""Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n""" def __magic_name__( ): __lowerCAmelCase = Accelerator(split_batches=lowerCamelCase, dispatch_batches=lowerCamelCase) if accelerator.is_local_main_process: datasets.utils.logging.set_verbosity_warning() transformers.utils.logging.set_verbosity_warning() else: datasets.utils.logging.set_verbosity_error() transformers.utils.logging.set_verbosity_error() # These are a bit slower so they should only be ran on the GPU or TPU if torch.cuda.is_available() or is_tpu_available(): if accelerator.is_local_main_process: print('''**Testing gather_for_metrics**''') for split_batches in [True, False]: for dispatch_batches in [True, False]: if accelerator.is_local_main_process: print(F"""With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`""") test_mrpc(lowerCamelCase, lowerCamelCase) accelerator.state._reset_state() if accelerator.is_local_main_process: print('''**Test torch metrics**''') for split_batches in [True, False]: for dispatch_batches in [True, False]: __lowerCAmelCase = Accelerator(split_batches=lowerCamelCase, dispatch_batches=lowerCamelCase) if accelerator.is_local_main_process: print(F"""With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99""") test_torch_metrics(lowerCamelCase, 9_9) accelerator.state._reset_state() if accelerator.is_local_main_process: print('''**Test last batch is not dropped when perfectly divisible**''') __lowerCAmelCase = Accelerator() test_torch_metrics(lowerCamelCase, 5_1_2) accelerator.state._reset_state() def __magic_name__( lowerCamelCase): # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
9
1
'''simple docstring''' import os import unittest from transformers.models.transfo_xl.tokenization_transfo_xl import VOCAB_FILES_NAMES, TransfoXLTokenizer from ...test_tokenization_common import TokenizerTesterMixin class a__ ( __A , unittest.TestCase ): """simple docstring""" __UpperCamelCase : Dict = TransfoXLTokenizer __UpperCamelCase : Tuple = False __UpperCamelCase : Optional[Any] = False def _snake_case (self ): super().setUp() __lowerCAmelCase = [ '''<unk>''', '''[CLS]''', '''[SEP]''', '''want''', '''unwanted''', '''wa''', '''un''', '''running''', ''',''', '''low''', '''l''', ] __lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) ) def _snake_case (self , **__lowercase ): __lowerCAmelCase = True return TransfoXLTokenizer.from_pretrained(self.tmpdirname , **__lowercase ) def _snake_case (self , __lowercase ): __lowerCAmelCase = '''<unk> UNwanted , running''' __lowerCAmelCase = '''<unk> unwanted, running''' return input_text, output_text def _snake_case (self ): __lowerCAmelCase = TransfoXLTokenizer(vocab_file=self.vocab_file , lower_case=__lowercase ) __lowerCAmelCase = tokenizer.tokenize('''<unk> UNwanted , running''' ) self.assertListEqual(__lowercase , ['''<unk>''', '''unwanted''', ''',''', '''running'''] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowercase ) , [0, 4, 8, 7] ) def _snake_case (self ): __lowerCAmelCase = TransfoXLTokenizer(lower_case=__lowercase ) self.assertListEqual( tokenizer.tokenize(''' \tHeLLo ! how \n Are yoU ? ''' ) , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] ) def _snake_case (self ): __lowerCAmelCase = TransfoXLTokenizer(lower_case=__lowercase ) self.assertListEqual( tokenizer.tokenize(''' \tHeLLo ! how \n Are yoU ? ''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] ) def _snake_case (self ): __lowerCAmelCase = TransfoXLTokenizer(lower_case=__lowercase ) __lowerCAmelCase = '''Hello (bracket) and side-scrolled [and] Henry\'s $5,000 with 3.34 m. What\'s up!?''' __lowerCAmelCase = [ '''Hello''', '''(''', '''bracket''', ''')''', '''and''', '''side''', '''@-@''', '''scrolled''', '''[''', '''and''', ''']''', '''Henry''', '''\'s''', '''$''', '''5''', '''@,@''', '''000''', '''with''', '''3''', '''@.@''', '''34''', '''m''', '''.''', '''What''', '''\'s''', '''up''', '''!''', '''?''', ] self.assertListEqual(tokenizer.tokenize(__lowercase ) , __lowercase ) self.assertEqual(tokenizer.convert_tokens_to_string(__lowercase ) , __lowercase ) def _snake_case (self ): __lowerCAmelCase = self.get_tokenizer() __lowerCAmelCase = len(__lowercase ) tokenizer.add_tokens(['''new1''', '''new2'''] ) tokenizer.move_added_token('''new1''' , 1 ) # Check that moved token is not copied (duplicate) self.assertEqual(len(__lowercase ) , original_len + 2 ) # Check that token is moved to specified id self.assertEqual(tokenizer.encode('''new1''' ) , [1] ) self.assertEqual(tokenizer.decode([1] ) , '''new1''' )
9
'''simple docstring''' from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _UpperCAmelCase : str = logging.get_logger(__name__) _UpperCAmelCase : str = { """roberta-base""": """https://huggingface.co/roberta-base/resolve/main/config.json""", """roberta-large""": """https://huggingface.co/roberta-large/resolve/main/config.json""", """roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/config.json""", """distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/config.json""", """roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json""", """roberta-large-openai-detector""": """https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json""", } class a__ ( __A ): """simple docstring""" __UpperCamelCase : str = 'roberta' def __init__(self , __lowercase=5_02_65 , __lowercase=7_68 , __lowercase=12 , __lowercase=12 , __lowercase=30_72 , __lowercase="gelu" , __lowercase=0.1 , __lowercase=0.1 , __lowercase=5_12 , __lowercase=2 , __lowercase=0.0_2 , __lowercase=1e-12 , __lowercase=1 , __lowercase=0 , __lowercase=2 , __lowercase="absolute" , __lowercase=True , __lowercase=None , **__lowercase , ): super().__init__(pad_token_id=__lowercase , bos_token_id=__lowercase , eos_token_id=__lowercase , **__lowercase ) __lowerCAmelCase = vocab_size __lowerCAmelCase = hidden_size __lowerCAmelCase = num_hidden_layers __lowerCAmelCase = num_attention_heads __lowerCAmelCase = hidden_act __lowerCAmelCase = intermediate_size __lowerCAmelCase = hidden_dropout_prob __lowerCAmelCase = attention_probs_dropout_prob __lowerCAmelCase = max_position_embeddings __lowerCAmelCase = type_vocab_size __lowerCAmelCase = initializer_range __lowerCAmelCase = layer_norm_eps __lowerCAmelCase = position_embedding_type __lowerCAmelCase = use_cache __lowerCAmelCase = classifier_dropout class a__ ( __A ): """simple docstring""" @property def _snake_case (self ): if self.task == "multiple-choice": __lowerCAmelCase = {0: '''batch''', 1: '''choice''', 2: '''sequence'''} else: __lowerCAmelCase = {0: '''batch''', 1: '''sequence'''} return OrderedDict( [ ('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ] )
9
1
'''simple docstring''' from binascii import hexlify from hashlib import shaaaa from os import urandom # RFC 3526 - More Modular Exponential (MODP) Diffie-Hellman groups for # Internet Key Exchange (IKE) https://tools.ietf.org/html/rfc3526 _UpperCAmelCase : Optional[Any] = { # 1536-bit 5: { """prime""": int( """FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1""" + """29024E088A67CC74020BBEA63B139B22514A08798E3404DD""" + """EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245""" + """E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED""" + """EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D""" + """C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F""" + """83655D23DCA3AD961C62F356208552BB9ED529077096966D""" + """670C354E4ABC9804F1746C08CA237327FFFFFFFFFFFFFFFF""", base=1_6, ), """generator""": 2, }, # 2048-bit 1_4: { """prime""": int( """FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1""" + """29024E088A67CC74020BBEA63B139B22514A08798E3404DD""" + """EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245""" + """E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED""" + """EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D""" + """C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F""" + """83655D23DCA3AD961C62F356208552BB9ED529077096966D""" + """670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B""" + """E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9""" + """DE2BCBF6955817183995497CEA956AE515D2261898FA0510""" + """15728E5A8AACAA68FFFFFFFFFFFFFFFF""", base=1_6, ), """generator""": 2, }, # 3072-bit 1_5: { """prime""": int( """FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1""" + """29024E088A67CC74020BBEA63B139B22514A08798E3404DD""" + """EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245""" + """E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED""" + """EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D""" + """C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F""" + """83655D23DCA3AD961C62F356208552BB9ED529077096966D""" + """670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B""" + """E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9""" + """DE2BCBF6955817183995497CEA956AE515D2261898FA0510""" + """15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64""" + """ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7""" + """ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B""" + """F12FFA06D98A0864D87602733EC86A64521F2B18177B200C""" + """BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31""" + """43DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF""", base=1_6, ), """generator""": 2, }, # 4096-bit 1_6: { """prime""": int( """FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1""" + """29024E088A67CC74020BBEA63B139B22514A08798E3404DD""" + """EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245""" + """E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED""" + """EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D""" + """C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F""" + """83655D23DCA3AD961C62F356208552BB9ED529077096966D""" + """670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B""" + """E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9""" + """DE2BCBF6955817183995497CEA956AE515D2261898FA0510""" + """15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64""" + """ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7""" + """ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B""" + """F12FFA06D98A0864D87602733EC86A64521F2B18177B200C""" + """BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31""" + """43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7""" + """88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA""" + """2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6""" + """287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED""" + """1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9""" + """93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934063199""" + """FFFFFFFFFFFFFFFF""", base=1_6, ), """generator""": 2, }, # 6144-bit 1_7: { """prime""": int( """FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E08""" + """8A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B""" + """302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9""" + """A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE6""" + """49286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8""" + """FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D""" + """670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C""" + """180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF695581718""" + """3995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D""" + """04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7D""" + """B3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D226""" + """1AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200C""" + """BBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFC""" + """E0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B26""" + """99C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB""" + """04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2""" + """233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127""" + """D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492""" + """36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BDF8FF9406""" + """AD9E530EE5DB382F413001AEB06A53ED9027D831179727B0865A8918""" + """DA3EDBEBCF9B14ED44CE6CBACED4BB1BDB7F1447E6CC254B33205151""" + """2BD7AF426FB8F401378CD2BF5983CA01C64B92ECF032EA15D1721D03""" + """F482D7CE6E74FEF6D55E702F46980C82B5A84031900B1C9E59E7C97F""" + """BEC7E8F323A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA""" + """CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE32806A1D58B""" + """B7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55CDA56C9EC2EF29632""" + """387FE8D76E3C0468043E8F663F4860EE12BF2D5B0B7474D6E694F91E""" + """6DCC4024FFFFFFFFFFFFFFFF""", base=1_6, ), """generator""": 2, }, # 8192-bit 1_8: { """prime""": int( """FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1""" + """29024E088A67CC74020BBEA63B139B22514A08798E3404DD""" + """EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245""" + """E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED""" + """EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D""" + """C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F""" + """83655D23DCA3AD961C62F356208552BB9ED529077096966D""" + """670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B""" + """E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9""" + """DE2BCBF6955817183995497CEA956AE515D2261898FA0510""" + """15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64""" + """ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7""" + """ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B""" + """F12FFA06D98A0864D87602733EC86A64521F2B18177B200C""" + """BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31""" + """43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7""" + """88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA""" + """2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6""" + """287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED""" + """1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9""" + """93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492""" + """36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BD""" + """F8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831""" + """179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1B""" + """DB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF""" + """5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6""" + """D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F3""" + """23A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA""" + """CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE328""" + """06A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55C""" + """DA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE""" + """12BF2D5B0B7474D6E694F91E6DBE115974A3926F12FEE5E4""" + """38777CB6A932DF8CD8BEC4D073B931BA3BC832B68D9DD300""" + """741FA7BF8AFC47ED2576F6936BA424663AAB639C5AE4F568""" + """3423B4742BF1C978238F16CBE39D652DE3FDB8BEFC848AD9""" + """22222E04A4037C0713EB57A81A23F0C73473FC646CEA306B""" + """4BCBC8862F8385DDFA9D4B7FA2C087E879683303ED5BDD3A""" + """062B3CF5B3A278A66D2A13F83F44F82DDF310EE074AB6A36""" + """4597E899A0255DC164F31CC50846851DF9AB48195DED7EA1""" + """B1D510BD7EE74D73FAF36BC31ECFA268359046F4EB879F92""" + """4009438B481C6CD7889A002ED5EE382BC9190DA6FC026E47""" + """9558E4475677E9AA9E3050E2765694DFC81F56E880B96E71""" + """60C980DD98EDD3DFFFFFFFFFFFFFFFFF""", base=1_6, ), """generator""": 2, }, } class a__ : """simple docstring""" def __init__(self , __lowercase = 14 ): if group not in primes: raise ValueError('''Unsupported Group''' ) __lowerCAmelCase = primes[group]['''prime'''] __lowerCAmelCase = primes[group]['''generator'''] __lowerCAmelCase = int(hexlify(urandom(32 ) ) , base=16 ) def _snake_case (self ): return hex(self.__private_key )[2:] def _snake_case (self ): __lowerCAmelCase = pow(self.generator , self.__private_key , self.prime ) return hex(__lowercase )[2:] def _snake_case (self , __lowercase ): # check if the other public key is valid based on NIST SP800-56 return ( 2 <= key <= self.prime - 2 and pow(__lowercase , (self.prime - 1) // 2 , self.prime ) == 1 ) def _snake_case (self , __lowercase ): __lowerCAmelCase = int(__lowercase , base=16 ) if not self.is_valid_public_key(__lowercase ): raise ValueError('''Invalid public key''' ) __lowerCAmelCase = pow(__lowercase , self.__private_key , self.prime ) return shaaaa(str(__lowercase ).encode() ).hexdigest() @staticmethod def _snake_case (__lowercase , __lowercase ): # check if the other public key is valid based on NIST SP800-56 return ( 2 <= remote_public_key_str <= prime - 2 and pow(__lowercase , (prime - 1) // 2 , __lowercase ) == 1 ) @staticmethod def _snake_case (__lowercase , __lowercase , __lowercase = 14 ): __lowerCAmelCase = int(__lowercase , base=16 ) __lowerCAmelCase = int(__lowercase , base=16 ) __lowerCAmelCase = primes[group]['''prime'''] if not DiffieHellman.is_valid_public_key_static(__lowercase , __lowercase ): raise ValueError('''Invalid public key''' ) __lowerCAmelCase = pow(__lowercase , __lowercase , __lowercase ) return shaaaa(str(__lowercase ).encode() ).hexdigest() if __name__ == "__main__": import doctest doctest.testmod()
9
'''simple docstring''' import argparse import re from pathlib import Path import requests import torch from PIL import Image from torchvision.transforms import CenterCrop, Compose, Normalize, Resize, ToTensor from transformers import ( EfficientFormerConfig, EfficientFormerForImageClassificationWithTeacher, EfficientFormerImageProcessor, ) from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling def __magic_name__( lowerCamelCase, lowerCamelCase): __lowerCAmelCase = old_name if "patch_embed" in old_name: __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = old_name.split('''.''') if layer == "0": __lowerCAmelCase = old_name.replace('''0''', '''convolution1''') elif layer == "1": __lowerCAmelCase = old_name.replace('''1''', '''batchnorm_before''') elif layer == "3": __lowerCAmelCase = old_name.replace('''3''', '''convolution2''') else: __lowerCAmelCase = old_name.replace('''4''', '''batchnorm_after''') if "network" in old_name and re.search(r'''\d\.\d''', lowerCamelCase): __lowerCAmelCase = r'''\b\d{2}\b''' if bool(re.search(lowerCamelCase, lowerCamelCase)): __lowerCAmelCase = re.search(r'''\d\.\d\d.''', lowerCamelCase).group() else: __lowerCAmelCase = re.search(r'''\d\.\d.''', lowerCamelCase).group() if int(match[0]) < 6: __lowerCAmelCase = old_name.replace(lowerCamelCase, '''''') __lowerCAmelCase = trimmed_name.replace('''network''', match[0] + '''.meta4D_layers.blocks.''' + match[2:-1]) __lowerCAmelCase = '''intermediate_stages.''' + trimmed_name else: __lowerCAmelCase = old_name.replace(lowerCamelCase, '''''') if int(match[2]) < num_meta4D_last_stage: __lowerCAmelCase = trimmed_name.replace('''network''', '''meta4D_layers.blocks.''' + match[2]) else: __lowerCAmelCase = str(int(match[2]) - num_meta4D_last_stage) __lowerCAmelCase = trimmed_name.replace('''network''', '''meta3D_layers.blocks.''' + layer_index) if "norm1" in old_name: __lowerCAmelCase = trimmed_name.replace('''norm1''', '''layernorm1''') elif "norm2" in old_name: __lowerCAmelCase = trimmed_name.replace('''norm2''', '''layernorm2''') elif "fc1" in old_name: __lowerCAmelCase = trimmed_name.replace('''fc1''', '''linear_in''') elif "fc2" in old_name: __lowerCAmelCase = trimmed_name.replace('''fc2''', '''linear_out''') __lowerCAmelCase = '''last_stage.''' + trimmed_name elif "network" in old_name and re.search(r'''.\d.''', lowerCamelCase): __lowerCAmelCase = old_name.replace('''network''', '''intermediate_stages''') if "fc" in new_name: __lowerCAmelCase = new_name.replace('''fc''', '''convolution''') elif ("norm1" in new_name) and ("layernorm1" not in new_name): __lowerCAmelCase = new_name.replace('''norm1''', '''batchnorm_before''') elif ("norm2" in new_name) and ("layernorm2" not in new_name): __lowerCAmelCase = new_name.replace('''norm2''', '''batchnorm_after''') if "proj" in new_name: __lowerCAmelCase = new_name.replace('''proj''', '''projection''') if "dist_head" in new_name: __lowerCAmelCase = new_name.replace('''dist_head''', '''distillation_classifier''') elif "head" in new_name: __lowerCAmelCase = new_name.replace('''head''', '''classifier''') elif "patch_embed" in new_name: __lowerCAmelCase = '''efficientformer.''' + new_name elif new_name == "norm.weight" or new_name == "norm.bias": __lowerCAmelCase = new_name.replace('''norm''', '''layernorm''') __lowerCAmelCase = '''efficientformer.''' + new_name else: __lowerCAmelCase = '''efficientformer.encoder.''' + new_name return new_name def __magic_name__( lowerCamelCase, lowerCamelCase): for key in checkpoint.copy().keys(): __lowerCAmelCase = checkpoint.pop(lowerCamelCase) __lowerCAmelCase = val return checkpoint def __magic_name__( ): __lowerCAmelCase = '''http://images.cocodataset.org/val2017/000000039769.jpg''' __lowerCAmelCase = Image.open(requests.get(lowerCamelCase, stream=lowerCamelCase).raw) return image def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase): __lowerCAmelCase = torch.load(lowerCamelCase, map_location='''cpu''')['''model'''] __lowerCAmelCase = EfficientFormerConfig.from_json_file(lowerCamelCase) __lowerCAmelCase = EfficientFormerForImageClassificationWithTeacher(lowerCamelCase) __lowerCAmelCase = '''_'''.join(checkpoint_path.split('''/''')[-1].split('''.''')[0].split('''_''')[:-1]) __lowerCAmelCase = config.depths[-1] - config.num_metaad_blocks + 1 __lowerCAmelCase = convert_torch_checkpoint(lowerCamelCase, lowerCamelCase) model.load_state_dict(lowerCamelCase) model.eval() __lowerCAmelCase = { '''bilinear''': PILImageResampling.BILINEAR, '''bicubic''': PILImageResampling.BICUBIC, '''nearest''': PILImageResampling.NEAREST, } # prepare image __lowerCAmelCase = prepare_img() __lowerCAmelCase = 2_5_6 __lowerCAmelCase = 2_2_4 __lowerCAmelCase = EfficientFormerImageProcessor( size={'''shortest_edge''': image_size}, crop_size={'''height''': crop_size, '''width''': crop_size}, resample=pillow_resamplings['''bicubic'''], ) __lowerCAmelCase = processor(images=lowerCamelCase, return_tensors='''pt''').pixel_values # original processing pipeline __lowerCAmelCase = Compose( [ Resize(lowerCamelCase, interpolation=pillow_resamplings['''bicubic''']), CenterCrop(lowerCamelCase), ToTensor(), Normalize(lowerCamelCase, lowerCamelCase), ]) __lowerCAmelCase = image_transforms(lowerCamelCase).unsqueeze(0) assert torch.allclose(lowerCamelCase, lowerCamelCase) __lowerCAmelCase = model(lowerCamelCase) __lowerCAmelCase = outputs.logits __lowerCAmelCase = (1, 1_0_0_0) if "l1" in model_name: __lowerCAmelCase = torch.Tensor( [-0.13_12, 0.43_53, -1.04_99, -0.51_24, 0.41_83, -0.67_93, -1.37_77, -0.08_93, -0.73_58, -2.43_28]) assert torch.allclose(logits[0, :1_0], lowerCamelCase, atol=1E-3) assert logits.shape == expected_shape elif "l3" in model_name: __lowerCAmelCase = torch.Tensor( [-1.31_50, -1.54_56, -1.25_56, -0.84_96, -0.71_27, -0.78_97, -0.97_28, -0.30_52, 0.37_51, -0.31_27]) assert torch.allclose(logits[0, :1_0], lowerCamelCase, atol=1E-3) assert logits.shape == expected_shape elif "l7" in model_name: __lowerCAmelCase = torch.Tensor( [-1.02_83, -1.41_31, -0.56_44, -1.31_15, -0.57_85, -1.20_49, -0.75_28, 0.19_92, -0.38_22, -0.08_78]) assert logits.shape == expected_shape else: raise ValueError( F"""Unknown model checkpoint: {checkpoint_path}. Supported version of efficientformer are l1, l3 and l7""") # Save Checkpoints Path(lowerCamelCase).mkdir(exist_ok=lowerCamelCase) model.save_pretrained(lowerCamelCase) print(F"""Checkpoint successfuly converted. Model saved at {pytorch_dump_path}""") processor.save_pretrained(lowerCamelCase) print(F"""Processor successfuly saved at {pytorch_dump_path}""") if push_to_hub: print('''Pushing model to the hub...''') model.push_to_hub( repo_id=F"""Bearnardd/{pytorch_dump_path}""", commit_message='''Add model''', use_temp_dir=lowerCamelCase, ) processor.push_to_hub( repo_id=F"""Bearnardd/{pytorch_dump_path}""", commit_message='''Add image processor''', use_temp_dir=lowerCamelCase, ) if __name__ == "__main__": _UpperCAmelCase : List[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( """--pytorch_model_path""", default=None, type=str, required=True, help="""Path to EfficientFormer pytorch checkpoint.""", ) parser.add_argument( """--config_file""", default=None, type=str, required=True, help="""The json file for EfficientFormer model config.""", ) parser.add_argument( """--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Push model and image processor to the hub""") parser.add_argument( """--no-push_to_hub""", dest="""push_to_hub""", action="""store_false""", help="""Do not push model and image processor to the hub""", ) parser.set_defaults(push_to_hub=True) _UpperCAmelCase : List[str] = parser.parse_args() convert_efficientformer_checkpoint( checkpoint_path=args.pytorch_model_path, efficientformer_config_file=args.config_file, pytorch_dump_path=args.pytorch_dump_path, push_to_hub=args.push_to_hub, )
9
1
'''simple docstring''' def __magic_name__( lowerCamelCase): __lowerCAmelCase = 1 __lowerCAmelCase = 2 while i * i <= n: __lowerCAmelCase = 0 while n % i == 0: n //= i multiplicity += 1 n_divisors *= multiplicity + 1 i += 1 if n > 1: n_divisors *= 2 return n_divisors def __magic_name__( ): __lowerCAmelCase = 1 __lowerCAmelCase = 1 while True: i += 1 t_num += i if count_divisors(lowerCamelCase) > 5_0_0: break return t_num if __name__ == "__main__": print(solution())
9
'''simple docstring''' from __future__ import annotations import math def __magic_name__( lowerCamelCase, lowerCamelCase): if len(lowerCamelCase) != 2 or len(a[0]) != 2 or len(lowerCamelCase) != 2 or len(b[0]) != 2: raise Exception('''Matrices are not 2x2''') __lowerCAmelCase = [ [a[0][0] * b[0][0] + a[0][1] * b[1][0], a[0][0] * b[0][1] + a[0][1] * b[1][1]], [a[1][0] * b[0][0] + a[1][1] * b[1][0], a[1][0] * b[0][1] + a[1][1] * b[1][1]], ] return new_matrix def __magic_name__( lowerCamelCase, lowerCamelCase): return [ [matrix_a[row][col] + matrix_b[row][col] for col in range(len(matrix_a[row]))] for row in range(len(lowerCamelCase)) ] def __magic_name__( lowerCamelCase, lowerCamelCase): return [ [matrix_a[row][col] - matrix_b[row][col] for col in range(len(matrix_a[row]))] for row in range(len(lowerCamelCase)) ] def __magic_name__( lowerCamelCase): if len(lowerCamelCase) % 2 != 0 or len(a[0]) % 2 != 0: raise Exception('''Odd matrices are not supported!''') __lowerCAmelCase = len(lowerCamelCase) __lowerCAmelCase = matrix_length // 2 __lowerCAmelCase = [[a[i][j] for j in range(lowerCamelCase, lowerCamelCase)] for i in range(lowerCamelCase)] __lowerCAmelCase = [ [a[i][j] for j in range(lowerCamelCase, lowerCamelCase)] for i in range(lowerCamelCase, lowerCamelCase) ] __lowerCAmelCase = [[a[i][j] for j in range(lowerCamelCase)] for i in range(lowerCamelCase)] __lowerCAmelCase = [[a[i][j] for j in range(lowerCamelCase)] for i in range(lowerCamelCase, lowerCamelCase)] return top_left, top_right, bot_left, bot_right def __magic_name__( lowerCamelCase): return len(lowerCamelCase), len(matrix[0]) def __magic_name__( lowerCamelCase): print('''\n'''.join(str(lowerCamelCase) for line in matrix)) def __magic_name__( lowerCamelCase, lowerCamelCase): if matrix_dimensions(lowerCamelCase) == (2, 2): return default_matrix_multiplication(lowerCamelCase, lowerCamelCase) __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = split_matrix(lowerCamelCase) __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = split_matrix(lowerCamelCase) __lowerCAmelCase = actual_strassen(lowerCamelCase, matrix_subtraction(lowerCamelCase, lowerCamelCase)) __lowerCAmelCase = actual_strassen(matrix_addition(lowerCamelCase, lowerCamelCase), lowerCamelCase) __lowerCAmelCase = actual_strassen(matrix_addition(lowerCamelCase, lowerCamelCase), lowerCamelCase) __lowerCAmelCase = actual_strassen(lowerCamelCase, matrix_subtraction(lowerCamelCase, lowerCamelCase)) __lowerCAmelCase = actual_strassen(matrix_addition(lowerCamelCase, lowerCamelCase), matrix_addition(lowerCamelCase, lowerCamelCase)) __lowerCAmelCase = actual_strassen(matrix_subtraction(lowerCamelCase, lowerCamelCase), matrix_addition(lowerCamelCase, lowerCamelCase)) __lowerCAmelCase = actual_strassen(matrix_subtraction(lowerCamelCase, lowerCamelCase), matrix_addition(lowerCamelCase, lowerCamelCase)) __lowerCAmelCase = matrix_addition(matrix_subtraction(matrix_addition(lowerCamelCase, lowerCamelCase), lowerCamelCase), lowerCamelCase) __lowerCAmelCase = matrix_addition(lowerCamelCase, lowerCamelCase) __lowerCAmelCase = matrix_addition(lowerCamelCase, lowerCamelCase) __lowerCAmelCase = matrix_subtraction(matrix_subtraction(matrix_addition(lowerCamelCase, lowerCamelCase), lowerCamelCase), lowerCamelCase) # construct the new matrix from our 4 quadrants __lowerCAmelCase = [] for i in range(len(lowerCamelCase)): new_matrix.append(top_left[i] + top_right[i]) for i in range(len(lowerCamelCase)): new_matrix.append(bot_left[i] + bot_right[i]) return new_matrix def __magic_name__( lowerCamelCase, lowerCamelCase): if matrix_dimensions(lowerCamelCase)[1] != matrix_dimensions(lowerCamelCase)[0]: __lowerCAmelCase = ( '''Unable to multiply these matrices, please check the dimensions.\n''' F"""Matrix A: {matrixa}\n""" F"""Matrix B: {matrixa}""" ) raise Exception(lowerCamelCase) __lowerCAmelCase = matrix_dimensions(lowerCamelCase) __lowerCAmelCase = matrix_dimensions(lowerCamelCase) if dimensiona[0] == dimensiona[1] and dimensiona[0] == dimensiona[1]: return [matrixa, matrixa] __lowerCAmelCase = max(*lowerCamelCase, *lowerCamelCase) __lowerCAmelCase = int(math.pow(2, math.ceil(math.loga(lowerCamelCase)))) __lowerCAmelCase = matrixa __lowerCAmelCase = matrixa # Adding zeros to the matrices so that the arrays dimensions are the same and also # power of 2 for i in range(0, lowerCamelCase): if i < dimensiona[0]: for _ in range(dimensiona[1], lowerCamelCase): new_matrixa[i].append(0) else: new_matrixa.append([0] * maxim) if i < dimensiona[0]: for _ in range(dimensiona[1], lowerCamelCase): new_matrixa[i].append(0) else: new_matrixa.append([0] * maxim) __lowerCAmelCase = actual_strassen(lowerCamelCase, lowerCamelCase) # Removing the additional zeros for i in range(0, lowerCamelCase): if i < dimensiona[0]: for _ in range(dimensiona[1], lowerCamelCase): final_matrix[i].pop() else: final_matrix.pop() return final_matrix if __name__ == "__main__": _UpperCAmelCase : List[str] = [ [2, 3, 4, 5], [6, 4, 3, 1], [2, 3, 6, 7], [3, 1, 2, 4], [2, 3, 4, 5], [6, 4, 3, 1], [2, 3, 6, 7], [3, 1, 2, 4], [2, 3, 4, 5], [6, 2, 3, 1], ] _UpperCAmelCase : Optional[Any] = [[0, 2, 1, 1], [1_6, 2, 3, 3], [2, 2, 7, 7], [1_3, 1_1, 2_2, 4]] print(strassen(matrixa, matrixa))
9
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) _UpperCAmelCase : List[str] = { """configuration_albert""": ["""ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """AlbertConfig""", """AlbertOnnxConfig"""], } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCAmelCase : int = ["""AlbertTokenizer"""] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCAmelCase : str = ["""AlbertTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCAmelCase : str = [ """ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST""", """AlbertForMaskedLM""", """AlbertForMultipleChoice""", """AlbertForPreTraining""", """AlbertForQuestionAnswering""", """AlbertForSequenceClassification""", """AlbertForTokenClassification""", """AlbertModel""", """AlbertPreTrainedModel""", """load_tf_weights_in_albert""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCAmelCase : Tuple = [ """TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFAlbertForMaskedLM""", """TFAlbertForMultipleChoice""", """TFAlbertForPreTraining""", """TFAlbertForQuestionAnswering""", """TFAlbertForSequenceClassification""", """TFAlbertForTokenClassification""", """TFAlbertMainLayer""", """TFAlbertModel""", """TFAlbertPreTrainedModel""", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCAmelCase : Optional[Any] = [ """FlaxAlbertForMaskedLM""", """FlaxAlbertForMultipleChoice""", """FlaxAlbertForPreTraining""", """FlaxAlbertForQuestionAnswering""", """FlaxAlbertForSequenceClassification""", """FlaxAlbertForTokenClassification""", """FlaxAlbertModel""", """FlaxAlbertPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_albert import ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, AlbertConfig, AlbertOnnxConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_albert import AlbertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_albert_fast import AlbertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_albert import ( ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST, AlbertForMaskedLM, AlbertForMultipleChoice, AlbertForPreTraining, AlbertForQuestionAnswering, AlbertForSequenceClassification, AlbertForTokenClassification, AlbertModel, AlbertPreTrainedModel, load_tf_weights_in_albert, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_albert import ( TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFAlbertForMaskedLM, TFAlbertForMultipleChoice, TFAlbertForPreTraining, TFAlbertForQuestionAnswering, TFAlbertForSequenceClassification, TFAlbertForTokenClassification, TFAlbertMainLayer, TFAlbertModel, TFAlbertPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_albert import ( FlaxAlbertForMaskedLM, FlaxAlbertForMultipleChoice, FlaxAlbertForPreTraining, FlaxAlbertForQuestionAnswering, FlaxAlbertForSequenceClassification, FlaxAlbertForTokenClassification, FlaxAlbertModel, FlaxAlbertPreTrainedModel, ) else: import sys _UpperCAmelCase : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
9
'''simple docstring''' import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import CLIPTokenizer, CLIPTokenizerFast from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import CLIPImageProcessor, CLIPProcessor @require_vision class a__ ( unittest.TestCase ): """simple docstring""" def _snake_case (self ): __lowerCAmelCase = tempfile.mkdtemp() # fmt: off __lowerCAmelCase = ['''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''lo''', '''l</w>''', '''w</w>''', '''r</w>''', '''t</w>''', '''low</w>''', '''er</w>''', '''lowest</w>''', '''newer</w>''', '''wider''', '''<unk>''', '''<|startoftext|>''', '''<|endoftext|>'''] # fmt: on __lowerCAmelCase = dict(zip(__lowercase , range(len(__lowercase ) ) ) ) __lowerCAmelCase = ['''#version: 0.2''', '''l o''', '''lo w</w>''', '''e r</w>''', ''''''] __lowerCAmelCase = {'''unk_token''': '''<unk>'''} __lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) __lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(__lowercase ) + '''\n''' ) with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write('''\n'''.join(__lowercase ) ) __lowerCAmelCase = { '''do_resize''': True, '''size''': 20, '''do_center_crop''': True, '''crop_size''': 18, '''do_normalize''': True, '''image_mean''': [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3], '''image_std''': [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1], } __lowerCAmelCase = os.path.join(self.tmpdirname , __lowercase ) with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp: json.dump(__lowercase , __lowercase ) def _snake_case (self , **__lowercase ): return CLIPTokenizer.from_pretrained(self.tmpdirname , **__lowercase ) def _snake_case (self , **__lowercase ): return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **__lowercase ) def _snake_case (self , **__lowercase ): return CLIPImageProcessor.from_pretrained(self.tmpdirname , **__lowercase ) def _snake_case (self ): shutil.rmtree(self.tmpdirname ) def _snake_case (self ): __lowerCAmelCase = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )] __lowerCAmelCase = [Image.fromarray(np.moveaxis(__lowercase , 0 , -1 ) ) for x in image_inputs] return image_inputs def _snake_case (self ): __lowerCAmelCase = self.get_tokenizer() __lowerCAmelCase = self.get_rust_tokenizer() __lowerCAmelCase = self.get_image_processor() __lowerCAmelCase = CLIPProcessor(tokenizer=__lowercase , image_processor=__lowercase ) processor_slow.save_pretrained(self.tmpdirname ) __lowerCAmelCase = CLIPProcessor.from_pretrained(self.tmpdirname , use_fast=__lowercase ) __lowerCAmelCase = CLIPProcessor(tokenizer=__lowercase , image_processor=__lowercase ) processor_fast.save_pretrained(self.tmpdirname ) __lowerCAmelCase = CLIPProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() ) self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() ) self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() ) self.assertIsInstance(processor_slow.tokenizer , __lowercase ) self.assertIsInstance(processor_fast.tokenizer , __lowercase ) self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor_slow.image_processor , __lowercase ) self.assertIsInstance(processor_fast.image_processor , __lowercase ) def _snake_case (self ): __lowerCAmelCase = CLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) __lowerCAmelCase = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' ) __lowerCAmelCase = self.get_image_processor(do_normalize=__lowercase , padding_value=1.0 ) __lowerCAmelCase = CLIPProcessor.from_pretrained( self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=__lowercase , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , __lowercase ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , __lowercase ) def _snake_case (self ): __lowerCAmelCase = self.get_image_processor() __lowerCAmelCase = self.get_tokenizer() __lowerCAmelCase = CLIPProcessor(tokenizer=__lowercase , image_processor=__lowercase ) __lowerCAmelCase = self.prepare_image_inputs() __lowerCAmelCase = image_processor(__lowercase , return_tensors='''np''' ) __lowerCAmelCase = processor(images=__lowercase , return_tensors='''np''' ) for key in input_image_proc.keys(): self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 ) def _snake_case (self ): __lowerCAmelCase = self.get_image_processor() __lowerCAmelCase = self.get_tokenizer() __lowerCAmelCase = CLIPProcessor(tokenizer=__lowercase , image_processor=__lowercase ) __lowerCAmelCase = '''lower newer''' __lowerCAmelCase = processor(text=__lowercase ) __lowerCAmelCase = tokenizer(__lowercase ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def _snake_case (self ): __lowerCAmelCase = self.get_image_processor() __lowerCAmelCase = self.get_tokenizer() __lowerCAmelCase = CLIPProcessor(tokenizer=__lowercase , image_processor=__lowercase ) __lowerCAmelCase = '''lower newer''' __lowerCAmelCase = self.prepare_image_inputs() __lowerCAmelCase = processor(text=__lowercase , images=__lowercase ) self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask''', '''pixel_values'''] ) # test if it raises when no input is passed with pytest.raises(__lowercase ): processor() def _snake_case (self ): __lowerCAmelCase = self.get_image_processor() __lowerCAmelCase = self.get_tokenizer() __lowerCAmelCase = CLIPProcessor(tokenizer=__lowercase , image_processor=__lowercase ) __lowerCAmelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] __lowerCAmelCase = processor.batch_decode(__lowercase ) __lowerCAmelCase = tokenizer.batch_decode(__lowercase ) self.assertListEqual(__lowercase , __lowercase ) def _snake_case (self ): __lowerCAmelCase = self.get_image_processor() __lowerCAmelCase = self.get_tokenizer() __lowerCAmelCase = CLIPProcessor(tokenizer=__lowercase , image_processor=__lowercase ) __lowerCAmelCase = '''lower newer''' __lowerCAmelCase = self.prepare_image_inputs() __lowerCAmelCase = processor(text=__lowercase , images=__lowercase ) self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
9
1
'''simple docstring''' import json import os from functools import lru_cache from typing import TYPE_CHECKING, List, Optional, Tuple import regex as re from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation _UpperCAmelCase : Union[str, Any] = logging.get_logger(__name__) _UpperCAmelCase : int = { """vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_config_file""": """tokenizer_config.json""", } _UpperCAmelCase : Dict = { """vocab_file""": {"""facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json"""}, """merges_file""": {"""facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt"""}, """tokenizer_config_file""": { """facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json""" }, } _UpperCAmelCase : Optional[Any] = {"""facebook/blenderbot-3B""": 1_2_8} @lru_cache() # Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode def __magic_name__( ): __lowerCAmelCase = ( list(range(ord('''!'''), ord('''~''') + 1)) + list(range(ord('''¡'''), ord('''¬''') + 1)) + list(range(ord('''®'''), ord('''ÿ''') + 1)) ) __lowerCAmelCase = bs[:] __lowerCAmelCase = 0 for b in range(2**8): if b not in bs: bs.append(lowerCamelCase) cs.append(2**8 + n) n += 1 __lowerCAmelCase = [chr(lowerCamelCase) for n in cs] return dict(zip(lowerCamelCase, lowerCamelCase)) def __magic_name__( lowerCamelCase): __lowerCAmelCase = set() __lowerCAmelCase = word[0] for char in word[1:]: pairs.add((prev_char, char)) __lowerCAmelCase = char return pairs class a__ ( __A ): """simple docstring""" __UpperCamelCase : Optional[int] = VOCAB_FILES_NAMES __UpperCamelCase : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP __UpperCamelCase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __UpperCamelCase : str = ['input_ids', 'attention_mask'] def __init__(self , __lowercase , __lowercase , __lowercase="replace" , __lowercase="<s>" , __lowercase="</s>" , __lowercase="</s>" , __lowercase="<s>" , __lowercase="<unk>" , __lowercase="<pad>" , __lowercase="<mask>" , __lowercase=False , **__lowercase , ): __lowerCAmelCase = AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase ) if isinstance(__lowercase , __lowercase ) else bos_token __lowerCAmelCase = AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase ) if isinstance(__lowercase , __lowercase ) else eos_token __lowerCAmelCase = AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase ) if isinstance(__lowercase , __lowercase ) else sep_token __lowerCAmelCase = AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase ) if isinstance(__lowercase , __lowercase ) else cls_token __lowerCAmelCase = AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase ) if isinstance(__lowercase , __lowercase ) else unk_token __lowerCAmelCase = AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase ) if isinstance(__lowercase , __lowercase ) else pad_token # Mask token behave like a normal word, i.e. include the space before it __lowerCAmelCase = AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase ) if isinstance(__lowercase , __lowercase ) else mask_token super().__init__( errors=__lowercase , bos_token=__lowercase , eos_token=__lowercase , unk_token=__lowercase , sep_token=__lowercase , cls_token=__lowercase , pad_token=__lowercase , mask_token=__lowercase , add_prefix_space=__lowercase , **__lowercase , ) with open(__lowercase , encoding='''utf-8''' ) as vocab_handle: __lowerCAmelCase = json.load(__lowercase ) __lowerCAmelCase = {v: k for k, v in self.encoder.items()} __lowerCAmelCase = errors # how to handle errors in decoding __lowerCAmelCase = bytes_to_unicode() __lowerCAmelCase = {v: k for k, v in self.byte_encoder.items()} with open(__lowercase , encoding='''utf-8''' ) as merges_handle: __lowerCAmelCase = merges_handle.read().split('''\n''' )[1:-1] __lowerCAmelCase = [tuple(merge.split() ) for merge in bpe_merges] __lowerCAmelCase = dict(zip(__lowercase , range(len(__lowercase ) ) ) ) __lowerCAmelCase = {} __lowerCAmelCase = add_prefix_space # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions __lowerCAmelCase = re.compile(R'''\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+''' ) @property # Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot def _snake_case (self ): return len(self.encoder ) def _snake_case (self ): return dict(self.encoder , **self.added_tokens_encoder ) def _snake_case (self , __lowercase ): if token in self.cache: return self.cache[token] __lowerCAmelCase = tuple(__lowercase ) __lowerCAmelCase = get_pairs(__lowercase ) if not pairs: return token while True: __lowerCAmelCase = min(__lowercase , key=lambda __lowercase : self.bpe_ranks.get(__lowercase , float('''inf''' ) ) ) if bigram not in self.bpe_ranks: break __lowerCAmelCase , __lowerCAmelCase = bigram __lowerCAmelCase = [] __lowerCAmelCase = 0 while i < len(__lowercase ): try: __lowerCAmelCase = word.index(__lowercase , __lowercase ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) __lowerCAmelCase = j if word[i] == first and i < len(__lowercase ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 __lowerCAmelCase = tuple(__lowercase ) __lowerCAmelCase = new_word if len(__lowercase ) == 1: break else: __lowerCAmelCase = get_pairs(__lowercase ) __lowerCAmelCase = ''' '''.join(__lowercase ) __lowerCAmelCase = word return word def _snake_case (self , __lowercase ): __lowerCAmelCase = [] for token in re.findall(self.pat , __lowercase ): __lowerCAmelCase = ''''''.join( self.byte_encoder[b] for b in token.encode('''utf-8''' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) bpe_tokens.extend(bpe_token for bpe_token in self.bpe(__lowercase ).split(''' ''' ) ) return bpe_tokens def _snake_case (self , __lowercase ): return self.encoder.get(__lowercase , self.encoder.get(self.unk_token ) ) def _snake_case (self , __lowercase ): return self.decoder.get(__lowercase ) def _snake_case (self , __lowercase ): __lowerCAmelCase = ''''''.join(__lowercase ) __lowerCAmelCase = bytearray([self.byte_decoder[c] for c in text] ).decode('''utf-8''' , errors=self.errors ) return text def _snake_case (self , __lowercase , __lowercase = None ): if not os.path.isdir(__lowercase ): logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" ) return __lowerCAmelCase = os.path.join( __lowercase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) __lowerCAmelCase = os.path.join( __lowercase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] ) with open(__lowercase , '''w''' , encoding='''utf-8''' ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=__lowercase , ensure_ascii=__lowercase ) + '''\n''' ) __lowerCAmelCase = 0 with open(__lowercase , '''w''' , encoding='''utf-8''' ) as writer: writer.write('''#version: 0.2\n''' ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __lowercase : kv[1] ): if index != token_index: logger.warning( F"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.""" ''' Please check that the tokenizer is not corrupted!''' ) __lowerCAmelCase = token_index writer.write(''' '''.join(__lowercase ) + '''\n''' ) index += 1 return vocab_file, merge_file def _snake_case (self , __lowercase , __lowercase = None , __lowercase = False ): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__lowercase , token_ids_a=__lowercase , already_has_special_tokens=__lowercase ) if token_ids_a is None: return [1] + ([0] * len(__lowercase )) + [1] return [1] + ([0] * len(__lowercase )) + [1, 1] + ([0] * len(__lowercase )) + [1] def _snake_case (self , __lowercase , __lowercase = None ): __lowerCAmelCase = [self.sep_token_id] __lowerCAmelCase = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def _snake_case (self , __lowercase , __lowercase=False , **__lowercase ): __lowerCAmelCase = kwargs.pop('''add_prefix_space''' , self.add_prefix_space ) if (is_split_into_words or add_prefix_space) and (len(__lowercase ) > 0 and not text[0].isspace()): __lowerCAmelCase = ''' ''' + text return (text, kwargs) def _snake_case (self , __lowercase , __lowercase = None ): return token_ids_a + [self.eos_token_id] def _snake_case (self , __lowercase ): __lowerCAmelCase = [] for is_user, text in conversation.iter_texts(): if is_user: # We need to space prefix as it's being done within blenderbot inputs.append(''' ''' + text ) else: # Generated responses should contain them already. inputs.append(__lowercase ) __lowerCAmelCase = ''' '''.join(__lowercase ) __lowerCAmelCase = self.encode(__lowercase ) if len(__lowercase ) > self.model_max_length: __lowerCAmelCase = input_ids[-self.model_max_length :] logger.warning(F"""Trimmed input from conversation as it was longer than {self.model_max_length} tokens.""" ) return input_ids
9
'''simple docstring''' from typing import Callable, Dict, Optional, Tuple import torch from torch import nn from torch.distributions import ( AffineTransform, Distribution, Independent, NegativeBinomial, Normal, StudentT, TransformedDistribution, ) class a__ ( __A ): """simple docstring""" def __init__(self , __lowercase , __lowercase=None , __lowercase=None , __lowercase=0 ): __lowerCAmelCase = 1.0 if scale is None else scale __lowerCAmelCase = 0.0 if loc is None else loc super().__init__(__lowercase , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=__lowercase )] ) @property def _snake_case (self ): return self.base_dist.mean * self.scale + self.loc @property def _snake_case (self ): return self.base_dist.variance * self.scale**2 @property def _snake_case (self ): return self.variance.sqrt() class a__ ( nn.Module ): """simple docstring""" def __init__(self , __lowercase , __lowercase , __lowercase , **__lowercase ): super().__init__(**__lowercase ) __lowerCAmelCase = args_dim __lowerCAmelCase = nn.ModuleList([nn.Linear(__lowercase , __lowercase ) for dim in args_dim.values()] ) __lowerCAmelCase = domain_map def _snake_case (self , __lowercase ): __lowerCAmelCase = [proj(__lowercase ) for proj in self.proj] return self.domain_map(*__lowercase ) class a__ ( nn.Module ): """simple docstring""" def __init__(self , __lowercase ): super().__init__() __lowerCAmelCase = function def _snake_case (self , __lowercase , *__lowercase ): return self.function(__lowercase , *__lowercase ) class a__ : """simple docstring""" __UpperCamelCase : type __UpperCamelCase : int __UpperCamelCase : Dict[str, int] def __init__(self , __lowercase = 1 ): __lowerCAmelCase = dim __lowerCAmelCase = {k: dim * self.args_dim[k] for k in self.args_dim} def _snake_case (self , __lowercase ): if self.dim == 1: return self.distribution_class(*__lowercase ) else: return Independent(self.distribution_class(*__lowercase ) , 1 ) def _snake_case (self , __lowercase , __lowercase = None , __lowercase = None , ): __lowerCAmelCase = self._base_distribution(__lowercase ) if loc is None and scale is None: return distr else: return AffineTransformed(__lowercase , loc=__lowercase , scale=__lowercase , event_dim=self.event_dim ) @property def _snake_case (self ): return () if self.dim == 1 else (self.dim,) @property def _snake_case (self ): return len(self.event_shape ) @property def _snake_case (self ): return 0.0 def _snake_case (self , __lowercase ): return ParameterProjection( in_features=__lowercase , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map ) , ) def _snake_case (self , *__lowercase ): raise NotImplementedError() @staticmethod def _snake_case (__lowercase ): return (x + torch.sqrt(torch.square(__lowercase ) + 4.0 )) / 2.0 class a__ ( __A ): """simple docstring""" __UpperCamelCase : Dict[str, int] = {"df": 1, "loc": 1, "scale": 1} __UpperCamelCase : type = StudentT @classmethod def _snake_case (cls , __lowercase , __lowercase , __lowercase ): __lowerCAmelCase = cls.squareplus(__lowercase ).clamp_min(torch.finfo(scale.dtype ).eps ) __lowerCAmelCase = 2.0 + cls.squareplus(__lowercase ) return df.squeeze(-1 ), loc.squeeze(-1 ), scale.squeeze(-1 ) class a__ ( __A ): """simple docstring""" __UpperCamelCase : Dict[str, int] = {"loc": 1, "scale": 1} __UpperCamelCase : type = Normal @classmethod def _snake_case (cls , __lowercase , __lowercase ): __lowerCAmelCase = cls.squareplus(__lowercase ).clamp_min(torch.finfo(scale.dtype ).eps ) return loc.squeeze(-1 ), scale.squeeze(-1 ) class a__ ( __A ): """simple docstring""" __UpperCamelCase : Dict[str, int] = {"total_count": 1, "logits": 1} __UpperCamelCase : type = NegativeBinomial @classmethod def _snake_case (cls , __lowercase , __lowercase ): __lowerCAmelCase = cls.squareplus(__lowercase ) return total_count.squeeze(-1 ), logits.squeeze(-1 ) def _snake_case (self , __lowercase ): __lowerCAmelCase , __lowerCAmelCase = distr_args if self.dim == 1: return self.distribution_class(total_count=__lowercase , logits=__lowercase ) else: return Independent(self.distribution_class(total_count=__lowercase , logits=__lowercase ) , 1 ) def _snake_case (self , __lowercase , __lowercase = None , __lowercase = None ): __lowerCAmelCase , __lowerCAmelCase = distr_args if scale is not None: # See scaling property of Gamma. logits += scale.log() return self._base_distribution((total_count, logits) )
9
1
'''simple docstring''' import itertools import json import os import unittest from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class a__ ( __A , unittest.TestCase ): """simple docstring""" __UpperCamelCase : List[str] = LongformerTokenizer __UpperCamelCase : Dict = True __UpperCamelCase : List[str] = LongformerTokenizerFast __UpperCamelCase : Optional[Any] = True def _snake_case (self ): super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt __lowerCAmelCase = [ '''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''\u0120''', '''\u0120l''', '''\u0120n''', '''\u0120lo''', '''\u0120low''', '''er''', '''\u0120lowest''', '''\u0120newer''', '''\u0120wider''', '''<unk>''', ] __lowerCAmelCase = dict(zip(__lowercase , range(len(__lowercase ) ) ) ) __lowerCAmelCase = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', ''''''] __lowerCAmelCase = {'''unk_token''': '''<unk>'''} __lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) __lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(__lowercase ) + '''\n''' ) with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write('''\n'''.join(__lowercase ) ) def _snake_case (self , **__lowercase ): kwargs.update(self.special_tokens_map ) return self.tokenizer_class.from_pretrained(self.tmpdirname , **__lowercase ) def _snake_case (self , **__lowercase ): kwargs.update(self.special_tokens_map ) return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **__lowercase ) def _snake_case (self , __lowercase ): __lowerCAmelCase = '''lower newer''' __lowerCAmelCase = '''lower newer''' return input_text, output_text def _snake_case (self ): __lowerCAmelCase = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map ) __lowerCAmelCase = '''lower newer''' __lowerCAmelCase = ['''l''', '''o''', '''w''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er'''] __lowerCAmelCase = tokenizer.tokenize(__lowercase ) # , add_prefix_space=True) self.assertListEqual(__lowercase , __lowercase ) __lowerCAmelCase = tokens + [tokenizer.unk_token] __lowerCAmelCase = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowercase ) , __lowercase ) def _snake_case (self ): __lowerCAmelCase = self.get_tokenizer() self.assertListEqual(tokenizer.encode('''Hello world!''' , add_special_tokens=__lowercase ) , [0, 3_14_14, 2_32, 3_28, 2] ) self.assertListEqual( tokenizer.encode('''Hello world! cécé herlolip 418''' , add_special_tokens=__lowercase ) , [0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2] , ) @slow def _snake_case (self ): __lowerCAmelCase = self.tokenizer_class.from_pretrained('''allenai/longformer-base-4096''' ) __lowerCAmelCase = tokenizer.encode('''sequence builders''' , add_special_tokens=__lowercase ) __lowerCAmelCase = tokenizer.encode('''multi-sequence build''' , add_special_tokens=__lowercase ) __lowerCAmelCase = tokenizer.encode( '''sequence builders''' , add_special_tokens=__lowercase , add_prefix_space=__lowercase ) __lowerCAmelCase = tokenizer.encode( '''sequence builders''' , '''multi-sequence build''' , add_special_tokens=__lowercase , add_prefix_space=__lowercase ) __lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(__lowercase ) __lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(__lowercase , __lowercase ) assert encoded_sentence == encoded_text_from_decode assert encoded_pair == encoded_pair_from_decode def _snake_case (self ): __lowerCAmelCase = self.get_tokenizer() __lowerCAmelCase = '''Encode this sequence.''' __lowerCAmelCase = tokenizer.byte_encoder[''' '''.encode('''utf-8''' )[0]] # Testing encoder arguments __lowerCAmelCase = tokenizer.encode(__lowercase , add_special_tokens=__lowercase , add_prefix_space=__lowercase ) __lowerCAmelCase = tokenizer.convert_ids_to_tokens(encoded[0] )[0] self.assertNotEqual(__lowercase , __lowercase ) __lowerCAmelCase = tokenizer.encode(__lowercase , add_special_tokens=__lowercase , add_prefix_space=__lowercase ) __lowerCAmelCase = tokenizer.convert_ids_to_tokens(encoded[0] )[0] self.assertEqual(__lowercase , __lowercase ) tokenizer.add_special_tokens({'''bos_token''': '''<s>'''} ) __lowerCAmelCase = tokenizer.encode(__lowercase , add_special_tokens=__lowercase ) __lowerCAmelCase = tokenizer.convert_ids_to_tokens(encoded[1] )[0] self.assertNotEqual(__lowercase , __lowercase ) # Testing spaces after special tokens __lowerCAmelCase = '''<mask>''' tokenizer.add_special_tokens( {'''mask_token''': AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase )} ) # mask token has a left space __lowerCAmelCase = tokenizer.convert_tokens_to_ids(__lowercase ) __lowerCAmelCase = '''Encode <mask> sequence''' __lowerCAmelCase = '''Encode <mask>sequence''' __lowerCAmelCase = tokenizer.encode(__lowercase ) __lowerCAmelCase = encoded.index(__lowercase ) __lowerCAmelCase = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0] self.assertEqual(__lowercase , __lowercase ) __lowerCAmelCase = tokenizer.encode(__lowercase ) __lowerCAmelCase = encoded.index(__lowercase ) __lowerCAmelCase = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0] self.assertNotEqual(__lowercase , __lowercase ) def _snake_case (self ): pass def _snake_case (self ): for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): __lowerCAmelCase = self.rust_tokenizer_class.from_pretrained(__lowercase , **__lowercase ) __lowerCAmelCase = self.tokenizer_class.from_pretrained(__lowercase , **__lowercase ) __lowerCAmelCase = '''A, <mask> AllenNLP sentence.''' __lowerCAmelCase = tokenizer_r.encode_plus(__lowercase , add_special_tokens=__lowercase , return_token_type_ids=__lowercase ) __lowerCAmelCase = tokenizer_p.encode_plus(__lowercase , add_special_tokens=__lowercase , return_token_type_ids=__lowercase ) # token_type_ids should put 0 everywhere self.assertEqual(sum(tokens_r['''token_type_ids'''] ) , sum(tokens_p['''token_type_ids'''] ) ) # attention_mask should put 1 everywhere, so sum over length should be 1 self.assertEqual( sum(tokens_r['''attention_mask'''] ) / len(tokens_r['''attention_mask'''] ) , sum(tokens_p['''attention_mask'''] ) / len(tokens_p['''attention_mask'''] ) , ) __lowerCAmelCase = tokenizer_r.convert_ids_to_tokens(tokens_r['''input_ids'''] ) __lowerCAmelCase = tokenizer_p.convert_ids_to_tokens(tokens_p['''input_ids'''] ) # Rust correctly handles the space before the mask while python doesnt self.assertSequenceEqual(tokens_p['''input_ids'''] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] ) self.assertSequenceEqual(tokens_r['''input_ids'''] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] ) self.assertSequenceEqual( __lowercase , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] ) self.assertSequenceEqual( __lowercase , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] ) def _snake_case (self ): for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ): __lowerCAmelCase = self.rust_tokenizer_class.from_pretrained( self.tmpdirname , use_fast=__lowercase , add_prefix_space=__lowercase , trim_offsets=__lowercase ) __lowerCAmelCase = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() ) __lowerCAmelCase = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() ) self.assertEqual(pre_tokenizer_state['''add_prefix_space'''] , __lowercase ) self.assertEqual(post_processor_state['''add_prefix_space'''] , __lowercase ) self.assertEqual(post_processor_state['''trim_offsets'''] , __lowercase ) def _snake_case (self ): # Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and # `trim_offsets` for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): __lowerCAmelCase = '''hello''' # `hello` is a token in the vocabulary of `pretrained_name` __lowerCAmelCase = F"""{text_of_1_token} {text_of_1_token}""" __lowerCAmelCase = self.rust_tokenizer_class.from_pretrained( __lowercase , use_fast=__lowercase , add_prefix_space=__lowercase , trim_offsets=__lowercase ) __lowerCAmelCase = tokenizer_r(__lowercase , return_offsets_mapping=__lowercase , add_special_tokens=__lowercase ) self.assertEqual(encoding.offset_mapping[0] , (0, len(__lowercase )) ) self.assertEqual( encoding.offset_mapping[1] , (len(__lowercase ) + 1, len(__lowercase ) + 1 + len(__lowercase )) , ) __lowerCAmelCase = self.rust_tokenizer_class.from_pretrained( __lowercase , use_fast=__lowercase , add_prefix_space=__lowercase , trim_offsets=__lowercase ) __lowerCAmelCase = tokenizer_r(__lowercase , return_offsets_mapping=__lowercase , add_special_tokens=__lowercase ) self.assertEqual(encoding.offset_mapping[0] , (0, len(__lowercase )) ) self.assertEqual( encoding.offset_mapping[1] , (len(__lowercase ) + 1, len(__lowercase ) + 1 + len(__lowercase )) , ) __lowerCAmelCase = self.rust_tokenizer_class.from_pretrained( __lowercase , use_fast=__lowercase , add_prefix_space=__lowercase , trim_offsets=__lowercase ) __lowerCAmelCase = tokenizer_r(__lowercase , return_offsets_mapping=__lowercase , add_special_tokens=__lowercase ) self.assertEqual(encoding.offset_mapping[0] , (0, len(__lowercase )) ) self.assertEqual( encoding.offset_mapping[1] , (len(__lowercase ), len(__lowercase ) + 1 + len(__lowercase )) , ) __lowerCAmelCase = self.rust_tokenizer_class.from_pretrained( __lowercase , use_fast=__lowercase , add_prefix_space=__lowercase , trim_offsets=__lowercase ) __lowerCAmelCase = tokenizer_r(__lowercase , return_offsets_mapping=__lowercase , add_special_tokens=__lowercase ) self.assertEqual(encoding.offset_mapping[0] , (0, len(__lowercase )) ) self.assertEqual( encoding.offset_mapping[1] , (len(__lowercase ), len(__lowercase ) + 1 + len(__lowercase )) , ) __lowerCAmelCase = F""" {text}""" # tokenizer_r = self.rust_tokenizer_class.from_pretrained( # pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True # ) # encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False) # self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token))) # self.assertEqual( # encoding.offset_mapping[1], # (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)), # ) __lowerCAmelCase = self.rust_tokenizer_class.from_pretrained( __lowercase , use_fast=__lowercase , add_prefix_space=__lowercase , trim_offsets=__lowercase ) __lowerCAmelCase = tokenizer_r(__lowercase , return_offsets_mapping=__lowercase , add_special_tokens=__lowercase ) self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(__lowercase )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(__lowercase ) + 1, 1 + len(__lowercase ) + 1 + len(__lowercase )) , ) __lowerCAmelCase = self.rust_tokenizer_class.from_pretrained( __lowercase , use_fast=__lowercase , add_prefix_space=__lowercase , trim_offsets=__lowercase ) __lowerCAmelCase = tokenizer_r(__lowercase , return_offsets_mapping=__lowercase , add_special_tokens=__lowercase ) self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__lowercase )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(__lowercase ), 1 + len(__lowercase ) + 1 + len(__lowercase )) , ) __lowerCAmelCase = self.rust_tokenizer_class.from_pretrained( __lowercase , use_fast=__lowercase , add_prefix_space=__lowercase , trim_offsets=__lowercase ) __lowerCAmelCase = tokenizer_r(__lowercase , return_offsets_mapping=__lowercase , add_special_tokens=__lowercase ) self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__lowercase )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(__lowercase ), 1 + len(__lowercase ) + 1 + len(__lowercase )) , )
9
'''simple docstring''' # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import re from ..models.auto import AutoProcessor from ..models.vision_encoder_decoder import VisionEncoderDecoderModel from ..utils import is_vision_available from .base import PipelineTool if is_vision_available(): from PIL import Image class a__ ( __A ): """simple docstring""" __UpperCamelCase : Tuple = 'naver-clova-ix/donut-base-finetuned-docvqa' __UpperCamelCase : List[str] = ( 'This is a tool that answers a question about an document (pdf). It takes an input named `document` which ' 'should be the document containing the information, as well as a `question` that is the question about the ' 'document. It returns a text that contains the answer to the question.' ) __UpperCamelCase : Optional[int] = 'document_qa' __UpperCamelCase : Optional[int] = AutoProcessor __UpperCamelCase : Tuple = VisionEncoderDecoderModel __UpperCamelCase : Any = ['image', 'text'] __UpperCamelCase : Optional[Any] = ['text'] def __init__(self , *__lowercase , **__lowercase ): if not is_vision_available(): raise ValueError('''Pillow must be installed to use the DocumentQuestionAnsweringTool.''' ) super().__init__(*__lowercase , **__lowercase ) def _snake_case (self , __lowercase , __lowercase ): __lowerCAmelCase = '''<s_docvqa><s_question>{user_input}</s_question><s_answer>''' __lowerCAmelCase = task_prompt.replace('''{user_input}''' , __lowercase ) __lowerCAmelCase = self.pre_processor.tokenizer( __lowercase , add_special_tokens=__lowercase , return_tensors='''pt''' ).input_ids __lowerCAmelCase = self.pre_processor(__lowercase , return_tensors='''pt''' ).pixel_values return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values} def _snake_case (self , __lowercase ): return self.model.generate( inputs['''pixel_values'''].to(self.device ) , decoder_input_ids=inputs['''decoder_input_ids'''].to(self.device ) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=__lowercase , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=__lowercase , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=__lowercase , ).sequences def _snake_case (self , __lowercase ): __lowerCAmelCase = self.pre_processor.batch_decode(__lowercase )[0] __lowerCAmelCase = sequence.replace(self.pre_processor.tokenizer.eos_token , '''''' ) __lowerCAmelCase = sequence.replace(self.pre_processor.tokenizer.pad_token , '''''' ) __lowerCAmelCase = re.sub(R'''<.*?>''' , '''''' , __lowercase , count=1 ).strip() # remove first task start token __lowerCAmelCase = self.pre_processor.tokenajson(__lowercase ) return sequence["answer"]
9
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available _UpperCAmelCase : Any = { """configuration_graphormer""": ["""GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GraphormerConfig"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCAmelCase : List[Any] = [ """GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST""", """GraphormerForGraphClassification""", """GraphormerModel""", """GraphormerPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_graphormer import GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, GraphormerConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_graphormer import ( GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST, GraphormerForGraphClassification, GraphormerModel, GraphormerPreTrainedModel, ) else: import sys _UpperCAmelCase : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
9
'''simple docstring''' def __magic_name__( lowerCamelCase): __lowerCAmelCase = 1 __lowerCAmelCase = 2 while i * i <= n: __lowerCAmelCase = 0 while n % i == 0: n //= i multiplicity += 1 n_divisors *= multiplicity + 1 i += 1 if n > 1: n_divisors *= 2 return n_divisors def __magic_name__( ): __lowerCAmelCase = 1 __lowerCAmelCase = 1 while True: i += 1 t_num += i if count_divisors(lowerCamelCase) > 5_0_0: break return t_num if __name__ == "__main__": print(solution())
9
1
'''simple docstring''' import argparse import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType ######################################################################## # This is a fully working simple example to use Accelerate # and perform gradient accumulation # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## _UpperCAmelCase : Tuple = 1_6 _UpperCAmelCase : List[Any] = 3_2 def __magic_name__( lowerCamelCase, lowerCamelCase = 1_6): __lowerCAmelCase = AutoTokenizer.from_pretrained('''bert-base-cased''') __lowerCAmelCase = load_dataset('''glue''', '''mrpc''') def tokenize_function(lowerCamelCase): # max_length=None => use the model max length (it's actually the default) __lowerCAmelCase = tokenizer(examples['''sentence1'''], examples['''sentence2'''], truncation=lowerCamelCase, max_length=lowerCamelCase) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): __lowerCAmelCase = datasets.map( lowerCamelCase, batched=lowerCamelCase, remove_columns=['''idx''', '''sentence1''', '''sentence2'''], ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library __lowerCAmelCase = tokenized_datasets.rename_column('''label''', '''labels''') def collate_fn(lowerCamelCase): # On TPU it's best to pad everything to the same length or training will be very slow. __lowerCAmelCase = 1_2_8 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": __lowerCAmelCase = 1_6 elif accelerator.mixed_precision != "no": __lowerCAmelCase = 8 else: __lowerCAmelCase = None return tokenizer.pad( lowerCamelCase, padding='''longest''', max_length=lowerCamelCase, pad_to_multiple_of=lowerCamelCase, return_tensors='''pt''', ) # Instantiate dataloaders. __lowerCAmelCase = DataLoader( tokenized_datasets['''train'''], shuffle=lowerCamelCase, collate_fn=lowerCamelCase, batch_size=lowerCamelCase) __lowerCAmelCase = DataLoader( tokenized_datasets['''validation'''], shuffle=lowerCamelCase, collate_fn=lowerCamelCase, batch_size=lowerCamelCase) return train_dataloader, eval_dataloader # For testing only if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1": from accelerate.test_utils.training import mocked_dataloaders _UpperCAmelCase : Dict = mocked_dataloaders # noqa: F811 def __magic_name__( lowerCamelCase, lowerCamelCase): # For testing only if os.environ.get('''TESTING_MOCKED_DATALOADERS''', lowerCamelCase) == "1": __lowerCAmelCase = 2 # New Code # __lowerCAmelCase = int(args.gradient_accumulation_steps) # Initialize accelerator __lowerCAmelCase = Accelerator( cpu=args.cpu, mixed_precision=args.mixed_precision, gradient_accumulation_steps=lowerCamelCase) if accelerator.distributed_type == DistributedType.TPU and gradient_accumulation_steps > 1: raise NotImplementedError( '''Gradient accumulation on TPUs is currently not supported. Pass `gradient_accumulation_steps=1`''') # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs __lowerCAmelCase = config['''lr'''] __lowerCAmelCase = int(config['''num_epochs''']) __lowerCAmelCase = int(config['''seed''']) __lowerCAmelCase = int(config['''batch_size''']) __lowerCAmelCase = evaluate.load('''glue''', '''mrpc''') set_seed(lowerCamelCase) __lowerCAmelCase , __lowerCAmelCase = get_dataloaders(lowerCamelCase, lowerCamelCase) # Instantiate the model (we build the model here so that the seed also control new weights initialization) __lowerCAmelCase = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''', return_dict=lowerCamelCase) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). __lowerCAmelCase = model.to(accelerator.device) # Instantiate optimizer __lowerCAmelCase = AdamW(params=model.parameters(), lr=lowerCamelCase) # Instantiate scheduler __lowerCAmelCase = get_linear_schedule_with_warmup( optimizer=lowerCamelCase, num_warmup_steps=1_0_0, num_training_steps=(len(lowerCamelCase) * num_epochs), ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = accelerator.prepare( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase) # Now we train the model for epoch in range(lowerCamelCase): model.train() for step, batch in enumerate(lowerCamelCase): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device) # New code # # We use the new `accumulate` context manager to perform gradient accumulation # We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests. with accelerator.accumulate(lowerCamelCase): __lowerCAmelCase = model(**lowerCamelCase) __lowerCAmelCase = output.loss accelerator.backward(lowerCamelCase) optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(lowerCamelCase): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device) with torch.no_grad(): __lowerCAmelCase = model(**lowerCamelCase) __lowerCAmelCase = outputs.logits.argmax(dim=-1) __lowerCAmelCase , __lowerCAmelCase = accelerator.gather_for_metrics((predictions, batch['''labels'''])) metric.add_batch( predictions=lowerCamelCase, references=lowerCamelCase, ) __lowerCAmelCase = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(F"""epoch {epoch}:""", lowerCamelCase) def __magic_name__( ): __lowerCAmelCase = argparse.ArgumentParser(description='''Simple example of training script.''') parser.add_argument( '''--mixed_precision''', type=lowerCamelCase, default=lowerCamelCase, choices=['''no''', '''fp16''', '''bf16''', '''fp8'''], help='''Whether to use mixed precision. Choose''' '''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.''' '''and an Nvidia Ampere GPU.''', ) # New Code # parser.add_argument( '''--gradient_accumulation_steps''', type=lowerCamelCase, default=1, help='''The number of minibatches to be ran before gradients are accumulated.''', ) parser.add_argument('''--cpu''', action='''store_true''', help='''If passed, will train on the CPU.''') __lowerCAmelCase = parser.parse_args() __lowerCAmelCase = {'''lr''': 2E-5, '''num_epochs''': 3, '''seed''': 4_2, '''batch_size''': 1_6} training_function(lowerCamelCase, lowerCamelCase) if __name__ == "__main__": main()
9
'''simple docstring''' import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch if is_torch_available(): import torch from transformers.generation import DisjunctiveConstraint @require_torch class a__ ( unittest.TestCase ): """simple docstring""" def _snake_case (self ): # For consistency across different places the DisjunctiveConstraint is called, # dc.token_ids is a list of integers. It is also initialized only by integers. __lowerCAmelCase = [[1, 2, 4], [1, 2, 3, 4]] __lowerCAmelCase = DisjunctiveConstraint(__lowercase ) self.assertTrue(isinstance(dc.token_ids , __lowercase ) ) with self.assertRaises(__lowercase ): DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) ) with self.assertRaises(__lowercase ): DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] ) def _snake_case (self ): # We can't have constraints that are complete subsets of another. This leads to a preverse # interpretation of "constraint fulfillment": does generating [1,2,3] fulfill the constraint? # It would mean that it generated [1,2] which fulfills it, but it's in the middle of potentially # fulfilling [1,2,3,4]. If we believe that [1,2,3] does fulfill the constraint, then the algorithm # will necessarily never reach [1,2,3,4], giving users a false sense of control (better to just not allow it). __lowerCAmelCase = [[1, 2], [1, 2, 3, 4]] with self.assertRaises(__lowercase ): DisjunctiveConstraint(__lowercase ) # fails here def _snake_case (self ): __lowerCAmelCase = [[1, 2, 3], [1, 2, 4]] __lowerCAmelCase = DisjunctiveConstraint(__lowercase ) __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = dc.update(1 ) __lowerCAmelCase = stepped is True and completed is False and reset is False self.assertTrue(__lowercase ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1] ) __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = dc.update(2 ) __lowerCAmelCase = stepped is True and completed is False and reset is False self.assertTrue(__lowercase ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2] ) __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = dc.update(3 ) __lowerCAmelCase = stepped is True and completed is True and reset is False self.assertTrue(__lowercase ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.current_seq == [1, 2, 3] ) def _snake_case (self ): __lowerCAmelCase = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]] __lowerCAmelCase = DisjunctiveConstraint(__lowercase ) __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = dc.update(1 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1] ) __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = dc.update(2 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2] ) __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = dc.update(4 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2, 4] ) __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = dc.update(5 ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.current_seq == [1, 2, 4, 5] ) dc.reset() __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = dc.update(1 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.remaining() == 3 ) self.assertTrue(dc.current_seq == [1] ) __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = dc.update(2 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.remaining() == 2 ) self.assertTrue(dc.current_seq == [1, 2] ) __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = dc.update(5 ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.remaining() == 0 ) self.assertTrue(dc.current_seq == [1, 2, 5] )
9
1
'''simple docstring''' from queue import PriorityQueue from typing import Any import numpy as np def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, ): for nxt, d in graph[v]: if nxt in visited_forward: continue __lowerCAmelCase = cst_fwd.get(lowerCamelCase, np.inf) __lowerCAmelCase = cst_fwd[v] + d if new_cost_f < old_cost_f: queue.put((new_cost_f, nxt)) __lowerCAmelCase = new_cost_f __lowerCAmelCase = v if nxt in visited_backward: if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance: __lowerCAmelCase = cst_fwd[v] + d + cst_bwd[nxt] return shortest_distance def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase): __lowerCAmelCase = -1 __lowerCAmelCase = set() __lowerCAmelCase = set() __lowerCAmelCase = {source: 0} __lowerCAmelCase = {destination: 0} __lowerCAmelCase = {source: None} __lowerCAmelCase = {destination: None} __lowerCAmelCase = PriorityQueue() __lowerCAmelCase = PriorityQueue() __lowerCAmelCase = np.inf queue_forward.put((0, source)) queue_backward.put((0, destination)) if source == destination: return 0 while not queue_forward.empty() and not queue_backward.empty(): __lowerCAmelCase , __lowerCAmelCase = queue_forward.get() visited_forward.add(lowerCamelCase) __lowerCAmelCase , __lowerCAmelCase = queue_backward.get() visited_backward.add(lowerCamelCase) __lowerCAmelCase = pass_and_relaxation( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, ) __lowerCAmelCase = pass_and_relaxation( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, ) if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance: break if shortest_distance != np.inf: __lowerCAmelCase = shortest_distance return shortest_path_distance _UpperCAmelCase : List[str] = { """B""": [["""C""", 1]], """C""": [["""D""", 1]], """D""": [["""F""", 1]], """E""": [["""B""", 1], ["""G""", 2]], """F""": [], """G""": [["""F""", 1]], } _UpperCAmelCase : Optional[Any] = { """B""": [["""E""", 1]], """C""": [["""B""", 1]], """D""": [["""C""", 1]], """F""": [["""D""", 1], ["""G""", 1]], """E""": [[None, np.inf]], """G""": [["""E""", 2]], } if __name__ == "__main__": import doctest doctest.testmod()
9
'''simple docstring''' from typing import Dict, Optional import numpy as np import datasets _UpperCAmelCase : List[str] = """ IoU is the area of overlap between the predicted segmentation and the ground truth divided by the area of union between the predicted segmentation and the ground truth. For binary (two classes) or multi-class segmentation, the mean IoU of the image is calculated by taking the IoU of each class and averaging them. """ _UpperCAmelCase : str = """ Args: predictions (`List[ndarray]`): List of predicted segmentation maps, each of shape (height, width). Each segmentation map can be of a different size. references (`List[ndarray]`): List of ground truth segmentation maps, each of shape (height, width). Each segmentation map can be of a different size. num_labels (`int`): Number of classes (categories). ignore_index (`int`): Index that will be ignored during evaluation. nan_to_num (`int`, *optional*): If specified, NaN values will be replaced by the number defined by the user. label_map (`dict`, *optional*): If specified, dictionary mapping old label indices to new label indices. reduce_labels (`bool`, *optional*, defaults to `False`): Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background, and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by 255. Returns: `Dict[str, float | ndarray]` comprising various elements: - *mean_iou* (`float`): Mean Intersection-over-Union (IoU averaged over all categories). - *mean_accuracy* (`float`): Mean accuracy (averaged over all categories). - *overall_accuracy* (`float`): Overall accuracy on all images. - *per_category_accuracy* (`ndarray` of shape `(num_labels,)`): Per category accuracy. - *per_category_iou* (`ndarray` of shape `(num_labels,)`): Per category IoU. Examples: >>> import numpy as np >>> mean_iou = datasets.load_metric(\"mean_iou\") >>> # suppose one has 3 different segmentation maps predicted >>> predicted_1 = np.array([[1, 2], [3, 4], [5, 255]]) >>> actual_1 = np.array([[0, 3], [5, 4], [6, 255]]) >>> predicted_2 = np.array([[2, 7], [9, 2], [3, 6]]) >>> actual_2 = np.array([[1, 7], [9, 2], [3, 6]]) >>> predicted_3 = np.array([[2, 2, 3], [8, 2, 4], [3, 255, 2]]) >>> actual_3 = np.array([[1, 2, 2], [8, 2, 1], [3, 255, 1]]) >>> predicted = [predicted_1, predicted_2, predicted_3] >>> ground_truth = [actual_1, actual_2, actual_3] >>> results = mean_iou.compute(predictions=predicted, references=ground_truth, num_labels=10, ignore_index=255, reduce_labels=False) >>> print(results) # doctest: +NORMALIZE_WHITESPACE {'mean_iou': 0.47750000000000004, 'mean_accuracy': 0.5916666666666666, 'overall_accuracy': 0.5263157894736842, 'per_category_iou': array([0. , 0. , 0.375, 0.4 , 0.5 , 0. , 0.5 , 1. , 1. , 1. ]), 'per_category_accuracy': array([0. , 0. , 0.75 , 0.66666667, 1. , 0. , 0.5 , 1. , 1. , 1. ])} """ _UpperCAmelCase : Tuple = """\ @software{MMSegmentation_Contributors_OpenMMLab_Semantic_Segmentation_2020, author = {{MMSegmentation Contributors}}, license = {Apache-2.0}, month = {7}, title = {{OpenMMLab Semantic Segmentation Toolbox and Benchmark}}, url = {https://github.com/open-mmlab/mmsegmentation}, year = {2020} }""" def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase = None, lowerCamelCase = False, ): if label_map is not None: for old_id, new_id in label_map.items(): __lowerCAmelCase = new_id # turn into Numpy arrays __lowerCAmelCase = np.array(lowerCamelCase) __lowerCAmelCase = np.array(lowerCamelCase) if reduce_labels: __lowerCAmelCase = 2_5_5 __lowerCAmelCase = label - 1 __lowerCAmelCase = 2_5_5 __lowerCAmelCase = label != ignore_index __lowerCAmelCase = np.not_equal(lowerCamelCase, lowerCamelCase) __lowerCAmelCase = pred_label[mask] __lowerCAmelCase = np.array(lowerCamelCase)[mask] __lowerCAmelCase = pred_label[pred_label == label] __lowerCAmelCase = np.histogram(lowerCamelCase, bins=lowerCamelCase, range=(0, num_labels - 1))[0] __lowerCAmelCase = np.histogram(lowerCamelCase, bins=lowerCamelCase, range=(0, num_labels - 1))[0] __lowerCAmelCase = np.histogram(lowerCamelCase, bins=lowerCamelCase, range=(0, num_labels - 1))[0] __lowerCAmelCase = area_pred_label + area_label - area_intersect return area_intersect, area_union, area_pred_label, area_label def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase = None, lowerCamelCase = False, ): __lowerCAmelCase = np.zeros((num_labels,), dtype=np.floataa) __lowerCAmelCase = np.zeros((num_labels,), dtype=np.floataa) __lowerCAmelCase = np.zeros((num_labels,), dtype=np.floataa) __lowerCAmelCase = np.zeros((num_labels,), dtype=np.floataa) for result, gt_seg_map in zip(lowerCamelCase, lowerCamelCase): __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = intersect_and_union( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase) total_area_intersect += area_intersect total_area_union += area_union total_area_pred_label += area_pred_label total_area_label += area_label return total_area_intersect, total_area_union, total_area_pred_label, total_area_label def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase = None, lowerCamelCase = None, lowerCamelCase = False, ): __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = total_intersect_and_union( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase) # compute metrics __lowerCAmelCase = {} __lowerCAmelCase = total_area_intersect.sum() / total_area_label.sum() __lowerCAmelCase = total_area_intersect / total_area_union __lowerCAmelCase = total_area_intersect / total_area_label __lowerCAmelCase = np.nanmean(lowerCamelCase) __lowerCAmelCase = np.nanmean(lowerCamelCase) __lowerCAmelCase = all_acc __lowerCAmelCase = iou __lowerCAmelCase = acc if nan_to_num is not None: __lowerCAmelCase = {metric: np.nan_to_num(lowerCamelCase, nan=lowerCamelCase) for metric, metric_value in metrics.items()} return metrics @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class a__ ( datasets.Metric ): """simple docstring""" def _snake_case (self ): return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( # 1st Seq - height dim, 2nd - width dim { '''predictions''': datasets.Sequence(datasets.Sequence(datasets.Value('''uint16''' ) ) ), '''references''': datasets.Sequence(datasets.Sequence(datasets.Value('''uint16''' ) ) ), } ) , reference_urls=[ '''https://github.com/open-mmlab/mmsegmentation/blob/71c201b1813267d78764f306a297ca717827c4bf/mmseg/core/evaluation/metrics.py''' ] , ) def _snake_case (self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase = None , __lowercase = None , __lowercase = False , ): __lowerCAmelCase = mean_iou( results=__lowercase , gt_seg_maps=__lowercase , num_labels=__lowercase , ignore_index=__lowercase , nan_to_num=__lowercase , label_map=__lowercase , reduce_labels=__lowercase , ) return iou_result
9
1
'''simple docstring''' import argparse import json from pathlib import Path import requests import torch from huggingface_hub import cached_download, hf_hub_download, hf_hub_url from PIL import Image from transformers import DetaConfig, DetaForObjectDetection, DetaImageProcessor, SwinConfig from transformers.utils import logging logging.set_verbosity_info() _UpperCAmelCase : Dict = logging.get_logger(__name__) def __magic_name__( lowerCamelCase): __lowerCAmelCase = SwinConfig( embed_dim=1_9_2, depths=(2, 2, 1_8, 2), num_heads=(6, 1_2, 2_4, 4_8), window_size=1_2, out_features=['''stage2''', '''stage3''', '''stage4'''], ) __lowerCAmelCase = DetaConfig( backbone_config=lowerCamelCase, num_queries=9_0_0, encoder_ffn_dim=2_0_4_8, decoder_ffn_dim=2_0_4_8, num_feature_levels=5, assign_first_stage=lowerCamelCase, with_box_refine=lowerCamelCase, two_stage=lowerCamelCase, ) # set labels __lowerCAmelCase = '''huggingface/label-files''' if "o365" in model_name: __lowerCAmelCase = 3_6_6 __lowerCAmelCase = '''object365-id2label.json''' else: __lowerCAmelCase = 9_1 __lowerCAmelCase = '''coco-detection-id2label.json''' __lowerCAmelCase = num_labels __lowerCAmelCase = json.load(open(cached_download(hf_hub_url(lowerCamelCase, lowerCamelCase, repo_type='''dataset''')), '''r''')) __lowerCAmelCase = {int(lowerCamelCase): v for k, v in idalabel.items()} __lowerCAmelCase = idalabel __lowerCAmelCase = {v: k for k, v in idalabel.items()} return config def __magic_name__( lowerCamelCase): __lowerCAmelCase = [] # stem # fmt: off rename_keys.append(('''backbone.0.body.patch_embed.proj.weight''', '''model.backbone.model.embeddings.patch_embeddings.projection.weight''')) rename_keys.append(('''backbone.0.body.patch_embed.proj.bias''', '''model.backbone.model.embeddings.patch_embeddings.projection.bias''')) rename_keys.append(('''backbone.0.body.patch_embed.norm.weight''', '''model.backbone.model.embeddings.norm.weight''')) rename_keys.append(('''backbone.0.body.patch_embed.norm.bias''', '''model.backbone.model.embeddings.norm.bias''')) # stages for i in range(len(config.backbone_config.depths)): for j in range(config.backbone_config.depths[i]): rename_keys.append((F"""backbone.0.body.layers.{i}.blocks.{j}.norm1.weight""", F"""model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight""")) rename_keys.append((F"""backbone.0.body.layers.{i}.blocks.{j}.norm1.bias""", F"""model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias""")) rename_keys.append((F"""backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_bias_table""", F"""model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table""")) rename_keys.append((F"""backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_index""", F"""model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index""")) rename_keys.append((F"""backbone.0.body.layers.{i}.blocks.{j}.attn.proj.weight""", F"""model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight""")) rename_keys.append((F"""backbone.0.body.layers.{i}.blocks.{j}.attn.proj.bias""", F"""model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias""")) rename_keys.append((F"""backbone.0.body.layers.{i}.blocks.{j}.norm2.weight""", F"""model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight""")) rename_keys.append((F"""backbone.0.body.layers.{i}.blocks.{j}.norm2.bias""", F"""model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias""")) rename_keys.append((F"""backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.weight""", F"""model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight""")) rename_keys.append((F"""backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.bias""", F"""model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias""")) rename_keys.append((F"""backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.weight""", F"""model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.weight""")) rename_keys.append((F"""backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.bias""", F"""model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.bias""")) if i < 3: rename_keys.append((F"""backbone.0.body.layers.{i}.downsample.reduction.weight""", F"""model.backbone.model.encoder.layers.{i}.downsample.reduction.weight""")) rename_keys.append((F"""backbone.0.body.layers.{i}.downsample.norm.weight""", F"""model.backbone.model.encoder.layers.{i}.downsample.norm.weight""")) rename_keys.append((F"""backbone.0.body.layers.{i}.downsample.norm.bias""", F"""model.backbone.model.encoder.layers.{i}.downsample.norm.bias""")) rename_keys.append(('''backbone.0.body.norm1.weight''', '''model.backbone.model.hidden_states_norms.stage2.weight''')) rename_keys.append(('''backbone.0.body.norm1.bias''', '''model.backbone.model.hidden_states_norms.stage2.bias''')) rename_keys.append(('''backbone.0.body.norm2.weight''', '''model.backbone.model.hidden_states_norms.stage3.weight''')) rename_keys.append(('''backbone.0.body.norm2.bias''', '''model.backbone.model.hidden_states_norms.stage3.bias''')) rename_keys.append(('''backbone.0.body.norm3.weight''', '''model.backbone.model.hidden_states_norms.stage4.weight''')) rename_keys.append(('''backbone.0.body.norm3.bias''', '''model.backbone.model.hidden_states_norms.stage4.bias''')) # transformer encoder for i in range(config.encoder_layers): rename_keys.append((F"""transformer.encoder.layers.{i}.self_attn.sampling_offsets.weight""", F"""model.encoder.layers.{i}.self_attn.sampling_offsets.weight""")) rename_keys.append((F"""transformer.encoder.layers.{i}.self_attn.sampling_offsets.bias""", F"""model.encoder.layers.{i}.self_attn.sampling_offsets.bias""")) rename_keys.append((F"""transformer.encoder.layers.{i}.self_attn.attention_weights.weight""", F"""model.encoder.layers.{i}.self_attn.attention_weights.weight""")) rename_keys.append((F"""transformer.encoder.layers.{i}.self_attn.attention_weights.bias""", F"""model.encoder.layers.{i}.self_attn.attention_weights.bias""")) rename_keys.append((F"""transformer.encoder.layers.{i}.self_attn.value_proj.weight""", F"""model.encoder.layers.{i}.self_attn.value_proj.weight""")) rename_keys.append((F"""transformer.encoder.layers.{i}.self_attn.value_proj.bias""", F"""model.encoder.layers.{i}.self_attn.value_proj.bias""")) rename_keys.append((F"""transformer.encoder.layers.{i}.self_attn.output_proj.weight""", F"""model.encoder.layers.{i}.self_attn.output_proj.weight""")) rename_keys.append((F"""transformer.encoder.layers.{i}.self_attn.output_proj.bias""", F"""model.encoder.layers.{i}.self_attn.output_proj.bias""")) rename_keys.append((F"""transformer.encoder.layers.{i}.norm1.weight""", F"""model.encoder.layers.{i}.self_attn_layer_norm.weight""")) rename_keys.append((F"""transformer.encoder.layers.{i}.norm1.bias""", F"""model.encoder.layers.{i}.self_attn_layer_norm.bias""")) rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.weight""", F"""model.encoder.layers.{i}.fc1.weight""")) rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.bias""", F"""model.encoder.layers.{i}.fc1.bias""")) rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.weight""", F"""model.encoder.layers.{i}.fc2.weight""")) rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.bias""", F"""model.encoder.layers.{i}.fc2.bias""")) rename_keys.append((F"""transformer.encoder.layers.{i}.norm2.weight""", F"""model.encoder.layers.{i}.final_layer_norm.weight""")) rename_keys.append((F"""transformer.encoder.layers.{i}.norm2.bias""", F"""model.encoder.layers.{i}.final_layer_norm.bias""")) # transformer decoder for i in range(config.decoder_layers): rename_keys.append((F"""transformer.decoder.layers.{i}.cross_attn.sampling_offsets.weight""", F"""model.decoder.layers.{i}.encoder_attn.sampling_offsets.weight""")) rename_keys.append((F"""transformer.decoder.layers.{i}.cross_attn.sampling_offsets.bias""", F"""model.decoder.layers.{i}.encoder_attn.sampling_offsets.bias""")) rename_keys.append((F"""transformer.decoder.layers.{i}.cross_attn.attention_weights.weight""", F"""model.decoder.layers.{i}.encoder_attn.attention_weights.weight""")) rename_keys.append((F"""transformer.decoder.layers.{i}.cross_attn.attention_weights.bias""", F"""model.decoder.layers.{i}.encoder_attn.attention_weights.bias""")) rename_keys.append((F"""transformer.decoder.layers.{i}.cross_attn.value_proj.weight""", F"""model.decoder.layers.{i}.encoder_attn.value_proj.weight""")) rename_keys.append((F"""transformer.decoder.layers.{i}.cross_attn.value_proj.bias""", F"""model.decoder.layers.{i}.encoder_attn.value_proj.bias""")) rename_keys.append((F"""transformer.decoder.layers.{i}.cross_attn.output_proj.weight""", F"""model.decoder.layers.{i}.encoder_attn.output_proj.weight""")) rename_keys.append((F"""transformer.decoder.layers.{i}.cross_attn.output_proj.bias""", F"""model.decoder.layers.{i}.encoder_attn.output_proj.bias""")) rename_keys.append((F"""transformer.decoder.layers.{i}.norm1.weight""", F"""model.decoder.layers.{i}.encoder_attn_layer_norm.weight""")) rename_keys.append((F"""transformer.decoder.layers.{i}.norm1.bias""", F"""model.decoder.layers.{i}.encoder_attn_layer_norm.bias""")) rename_keys.append((F"""transformer.decoder.layers.{i}.self_attn.out_proj.weight""", F"""model.decoder.layers.{i}.self_attn.out_proj.weight""")) rename_keys.append((F"""transformer.decoder.layers.{i}.self_attn.out_proj.bias""", F"""model.decoder.layers.{i}.self_attn.out_proj.bias""")) rename_keys.append((F"""transformer.decoder.layers.{i}.norm2.weight""", F"""model.decoder.layers.{i}.self_attn_layer_norm.weight""")) rename_keys.append((F"""transformer.decoder.layers.{i}.norm2.bias""", F"""model.decoder.layers.{i}.self_attn_layer_norm.bias""")) rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.weight""", F"""model.decoder.layers.{i}.fc1.weight""")) rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.bias""", F"""model.decoder.layers.{i}.fc1.bias""")) rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.weight""", F"""model.decoder.layers.{i}.fc2.weight""")) rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.bias""", F"""model.decoder.layers.{i}.fc2.bias""")) rename_keys.append((F"""transformer.decoder.layers.{i}.norm3.weight""", F"""model.decoder.layers.{i}.final_layer_norm.weight""")) rename_keys.append((F"""transformer.decoder.layers.{i}.norm3.bias""", F"""model.decoder.layers.{i}.final_layer_norm.bias""")) # fmt: on return rename_keys def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase): __lowerCAmelCase = dct.pop(lowerCamelCase) __lowerCAmelCase = val def __magic_name__( lowerCamelCase, lowerCamelCase): __lowerCAmelCase = [int(backbone_config.embed_dim * 2**i) for i in range(len(backbone_config.depths))] for i in range(len(backbone_config.depths)): __lowerCAmelCase = num_features[i] for j in range(backbone_config.depths[i]): # fmt: off # read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias) __lowerCAmelCase = state_dict.pop(F"""backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.weight""") __lowerCAmelCase = state_dict.pop(F"""backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.bias""") # next, add query, keys and values (in that order) to the state dict __lowerCAmelCase = in_proj_weight[:dim, :] __lowerCAmelCase = in_proj_bias[: dim] __lowerCAmelCase = in_proj_weight[ dim : dim * 2, : ] __lowerCAmelCase = in_proj_bias[ dim : dim * 2 ] __lowerCAmelCase = in_proj_weight[ -dim :, : ] __lowerCAmelCase = in_proj_bias[-dim :] # fmt: on def __magic_name__( lowerCamelCase, lowerCamelCase): # transformer decoder self-attention layers __lowerCAmelCase = config.d_model for i in range(config.decoder_layers): # read in weights + bias of input projection layer of self-attention __lowerCAmelCase = state_dict.pop(F"""transformer.decoder.layers.{i}.self_attn.in_proj_weight""") __lowerCAmelCase = state_dict.pop(F"""transformer.decoder.layers.{i}.self_attn.in_proj_bias""") # next, add query, keys and values (in that order) to the state dict __lowerCAmelCase = in_proj_weight[:hidden_size, :] __lowerCAmelCase = in_proj_bias[:hidden_size] __lowerCAmelCase = in_proj_weight[ hidden_size : hidden_size * 2, : ] __lowerCAmelCase = in_proj_bias[hidden_size : hidden_size * 2] __lowerCAmelCase = in_proj_weight[-hidden_size:, :] __lowerCAmelCase = in_proj_bias[-hidden_size:] def __magic_name__( ): __lowerCAmelCase = '''http://images.cocodataset.org/val2017/000000039769.jpg''' __lowerCAmelCase = Image.open(requests.get(lowerCamelCase, stream=lowerCamelCase).raw) return im @torch.no_grad() def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase): __lowerCAmelCase = get_deta_config(lowerCamelCase) # load original state dict if model_name == "deta-swin-large": __lowerCAmelCase = hf_hub_download(repo_id='''nielsr/deta-checkpoints''', filename='''adet_swin_ft.pth''') elif model_name == "deta-swin-large-o365": __lowerCAmelCase = hf_hub_download(repo_id='''jozhang97/deta-swin-l-o365''', filename='''deta_swin_pt_o365.pth''') else: raise ValueError(F"""Model name {model_name} not supported""") __lowerCAmelCase = torch.load(lowerCamelCase, map_location='''cpu''')['''model'''] # original state dict for name, param in state_dict.items(): print(lowerCamelCase, param.shape) # rename keys __lowerCAmelCase = create_rename_keys(lowerCamelCase) for src, dest in rename_keys: rename_key(lowerCamelCase, lowerCamelCase, lowerCamelCase) read_in_swin_q_k_v(lowerCamelCase, config.backbone_config) read_in_decoder_q_k_v(lowerCamelCase, lowerCamelCase) # fix some prefixes for key in state_dict.copy().keys(): if "transformer.decoder.class_embed" in key or "transformer.decoder.bbox_embed" in key: __lowerCAmelCase = state_dict.pop(lowerCamelCase) __lowerCAmelCase = val if "input_proj" in key: __lowerCAmelCase = state_dict.pop(lowerCamelCase) __lowerCAmelCase = val if "level_embed" in key or "pos_trans" in key or "pix_trans" in key or "enc_output" in key: __lowerCAmelCase = state_dict.pop(lowerCamelCase) __lowerCAmelCase = val # finally, create HuggingFace model and load state dict __lowerCAmelCase = DetaForObjectDetection(lowerCamelCase) model.load_state_dict(lowerCamelCase) model.eval() __lowerCAmelCase = '''cuda''' if torch.cuda.is_available() else '''cpu''' model.to(lowerCamelCase) # load image processor __lowerCAmelCase = DetaImageProcessor(format='''coco_detection''') # verify our conversion on image __lowerCAmelCase = prepare_img() __lowerCAmelCase = processor(images=lowerCamelCase, return_tensors='''pt''') __lowerCAmelCase = encoding['''pixel_values'''] __lowerCAmelCase = model(pixel_values.to(lowerCamelCase)) # verify logits print('''Logits:''', outputs.logits[0, :3, :3]) print('''Boxes:''', outputs.pred_boxes[0, :3, :3]) if model_name == "deta-swin-large": __lowerCAmelCase = torch.tensor( [[-7.63_08, -2.84_85, -5.37_37], [-7.20_37, -4.55_05, -4.80_27], [-7.29_43, -4.26_11, -4.66_17]]) __lowerCAmelCase = torch.tensor([[0.49_87, 0.49_69, 0.99_99], [0.25_49, 0.54_98, 0.48_05], [0.54_98, 0.27_57, 0.05_69]]) elif model_name == "deta-swin-large-o365": __lowerCAmelCase = torch.tensor( [[-8.01_22, -3.57_20, -4.97_17], [-8.15_47, -3.68_86, -4.63_89], [-7.66_10, -3.61_94, -5.01_34]]) __lowerCAmelCase = torch.tensor([[0.25_23, 0.55_49, 0.48_81], [0.77_15, 0.41_49, 0.46_01], [0.55_03, 0.27_53, 0.05_75]]) assert torch.allclose(outputs.logits[0, :3, :3], expected_logits.to(lowerCamelCase), atol=1E-4) assert torch.allclose(outputs.pred_boxes[0, :3, :3], expected_boxes.to(lowerCamelCase), atol=1E-4) print('''Everything ok!''') if pytorch_dump_folder_path: # Save model and processor logger.info(F"""Saving PyTorch model and processor to {pytorch_dump_folder_path}...""") Path(lowerCamelCase).mkdir(exist_ok=lowerCamelCase) model.save_pretrained(lowerCamelCase) processor.save_pretrained(lowerCamelCase) # Push to hub if push_to_hub: print('''Pushing model and processor to hub...''') model.push_to_hub(F"""jozhang97/{model_name}""") processor.push_to_hub(F"""jozhang97/{model_name}""") if __name__ == "__main__": _UpperCAmelCase : Dict = argparse.ArgumentParser() parser.add_argument( """--model_name""", type=str, default="""deta-swin-large""", choices=["""deta-swin-large""", """deta-swin-large-o365"""], help="""Name of the model you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model.""", ) parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub.""" ) _UpperCAmelCase : Tuple = parser.parse_args() convert_deta_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
9
'''simple docstring''' import json import os import unittest from transformers import DebertaTokenizer, DebertaTokenizerFast from transformers.models.deberta.tokenization_deberta import VOCAB_FILES_NAMES from transformers.testing_utils import slow from ...test_tokenization_common import TokenizerTesterMixin class a__ ( __A , unittest.TestCase ): """simple docstring""" __UpperCamelCase : str = DebertaTokenizer __UpperCamelCase : str = True __UpperCamelCase : Any = DebertaTokenizerFast def _snake_case (self ): super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt __lowerCAmelCase = [ '''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''\u0120''', '''\u0120l''', '''\u0120n''', '''\u0120lo''', '''\u0120low''', '''er''', '''\u0120lowest''', '''\u0120newer''', '''\u0120wider''', '''[UNK]''', ] __lowerCAmelCase = dict(zip(__lowercase , range(len(__lowercase ) ) ) ) __lowerCAmelCase = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', ''''''] __lowerCAmelCase = {'''unk_token''': '''[UNK]'''} __lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) __lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(__lowercase ) + '''\n''' ) with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write('''\n'''.join(__lowercase ) ) def _snake_case (self , **__lowercase ): kwargs.update(self.special_tokens_map ) return self.tokenizer_class.from_pretrained(self.tmpdirname , **__lowercase ) def _snake_case (self , __lowercase ): __lowerCAmelCase = '''lower newer''' __lowerCAmelCase = '''lower newer''' return input_text, output_text def _snake_case (self ): __lowerCAmelCase = self.get_tokenizer() __lowerCAmelCase = '''lower newer''' __lowerCAmelCase = ['''l''', '''o''', '''w''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er'''] __lowerCAmelCase = tokenizer.tokenize(__lowercase ) self.assertListEqual(__lowercase , __lowercase ) __lowerCAmelCase = tokens + [tokenizer.unk_token] __lowerCAmelCase = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowercase ) , __lowercase ) def _snake_case (self ): __lowerCAmelCase = self.get_tokenizer() __lowerCAmelCase = tokenizer('''Hello''' , '''World''' ) __lowerCAmelCase = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1] self.assertListEqual(tokd['''token_type_ids'''] , __lowercase ) @slow def _snake_case (self ): __lowerCAmelCase = self.tokenizer_class.from_pretrained('''microsoft/deberta-base''' ) __lowerCAmelCase = tokenizer.encode('''sequence builders''' , add_special_tokens=__lowercase ) __lowerCAmelCase = tokenizer.encode('''multi-sequence build''' , add_special_tokens=__lowercase ) __lowerCAmelCase = tokenizer.encode( '''sequence builders''' , add_special_tokens=__lowercase , add_prefix_space=__lowercase ) __lowerCAmelCase = tokenizer.encode( '''sequence builders''' , '''multi-sequence build''' , add_special_tokens=__lowercase , add_prefix_space=__lowercase ) __lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(__lowercase ) __lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(__lowercase , __lowercase ) assert encoded_sentence == encoded_text_from_decode assert encoded_pair == encoded_pair_from_decode @slow def _snake_case (self ): __lowerCAmelCase = [self.tokenizer_class] if self.test_rust_tokenizer: tokenizer_classes.append(self.rust_tokenizer_class ) for tokenizer_class in tokenizer_classes: __lowerCAmelCase = tokenizer_class.from_pretrained('''microsoft/deberta-base''' ) __lowerCAmelCase = [ '''ALBERT: A Lite BERT for Self-supervised Learning of Language Representations''', '''ALBERT incorporates two parameter reduction techniques''', '''The first one is a factorized embedding parameterization. By decomposing the large vocabulary''' ''' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of''' ''' vocabulary embedding.''', ] __lowerCAmelCase = tokenizer(__lowercase , padding=__lowercase ) __lowerCAmelCase = [tokenizer.decode(__lowercase , skip_special_tokens=__lowercase ) for seq in encoding['''input_ids''']] # fmt: off __lowerCAmelCase = { '''input_ids''': [ [1, 21_18, 1_11_26, 5_65, 35, 83, 2_51_91, 1_63, 1_88_54, 13, 1_21_56, 12, 1_61_01, 2_53_76, 1_38_07, 9, 2_22_05, 2_78_93, 16_35, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 21_18, 1_11_26, 5_65, 2_45_36, 80, 4_37_97, 48_78, 73_73, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1_33, 78, 65, 16, 10, 37_24, 15_38, 3_31_83, 1_13_03, 4_37_97, 19_38, 4, 8_70, 2_41_65, 2_91_05, 5, 7_39, 3_26_44, 3_31_83, 1_13_03, 3_61_73, 88, 80, 6_50, 78_21, 4_59_40, 6, 52, 25_59, 5, 18_36, 9, 5, 73_97, 1_31_71, 31, 5, 18_36, 9, 3_26_44, 3_31_83, 1_13_03, 4, 2] ], '''token_type_ids''': [ [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] ], '''attention_mask''': [ [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1] ] } # fmt: on __lowerCAmelCase = [ '''ALBERT: A Lite BERT for Self-supervised Learning of Language Representations''', '''ALBERT incorporates two parameter reduction techniques''', '''The first one is a factorized embedding parameterization. By decomposing the large vocabulary''' ''' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of''' ''' vocabulary embedding.''', ] self.assertDictEqual(encoding.data , __lowercase ) for expected, decoded in zip(__lowercase , __lowercase ): self.assertEqual(__lowercase , __lowercase )
9
1
'''simple docstring''' import unittest from typing import Dict, List, Optional, Union import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import BridgeTowerImageProcessor class a__ ( unittest.TestCase ): """simple docstring""" def __init__(self , __lowercase , __lowercase = True , __lowercase = None , __lowercase = 32 , __lowercase = True , __lowercase = 1 / 2_55 , __lowercase = True , __lowercase = True , __lowercase = [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3] , __lowercase = [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1] , __lowercase = True , __lowercase=7 , __lowercase=30 , __lowercase=4_00 , __lowercase=3 , ): __lowerCAmelCase = parent __lowerCAmelCase = do_resize __lowerCAmelCase = size if size is not None else {'''shortest_edge''': 2_88} __lowerCAmelCase = size_divisor __lowerCAmelCase = do_rescale __lowerCAmelCase = rescale_factor __lowerCAmelCase = do_normalize __lowerCAmelCase = do_center_crop __lowerCAmelCase = image_mean __lowerCAmelCase = image_std __lowerCAmelCase = do_pad __lowerCAmelCase = batch_size __lowerCAmelCase = num_channels __lowerCAmelCase = min_resolution __lowerCAmelCase = max_resolution def _snake_case (self ): return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "size": self.size, "size_divisor": self.size_divisor, } def _snake_case (self , __lowercase , __lowercase=False ): if not batched: __lowerCAmelCase = self.size['''shortest_edge'''] __lowerCAmelCase = image_inputs[0] if isinstance(__lowercase , Image.Image ): __lowerCAmelCase , __lowerCAmelCase = image.size else: __lowerCAmelCase , __lowerCAmelCase = image.shape[1], image.shape[2] __lowerCAmelCase = size / min(__lowercase , __lowercase ) if h < w: __lowerCAmelCase , __lowerCAmelCase = size, scale * w else: __lowerCAmelCase , __lowerCAmelCase = scale * h, size __lowerCAmelCase = int((13_33 / 8_00) * size ) if max(__lowercase , __lowercase ) > max_size: __lowerCAmelCase = max_size / max(__lowercase , __lowercase ) __lowerCAmelCase = newh * scale __lowerCAmelCase = neww * scale __lowerCAmelCase , __lowerCAmelCase = int(newh + 0.5 ), int(neww + 0.5 ) __lowerCAmelCase , __lowerCAmelCase = ( newh // self.size_divisor * self.size_divisor, neww // self.size_divisor * self.size_divisor, ) else: __lowerCAmelCase = [] for image in image_inputs: __lowerCAmelCase , __lowerCAmelCase = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) __lowerCAmelCase = max(__lowercase , key=lambda __lowercase : item[0] )[0] __lowerCAmelCase = max(__lowercase , key=lambda __lowercase : item[1] )[1] return expected_height, expected_width @require_torch @require_vision class a__ ( __A , unittest.TestCase ): """simple docstring""" __UpperCamelCase : Any = BridgeTowerImageProcessor if is_vision_available() else None def _snake_case (self ): __lowerCAmelCase = BridgeTowerImageProcessingTester(self ) @property def _snake_case (self ): return self.image_processor_tester.prepare_image_processor_dict() def _snake_case (self ): __lowerCAmelCase = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(__lowercase , '''image_mean''' ) ) self.assertTrue(hasattr(__lowercase , '''image_std''' ) ) self.assertTrue(hasattr(__lowercase , '''do_normalize''' ) ) self.assertTrue(hasattr(__lowercase , '''do_resize''' ) ) self.assertTrue(hasattr(__lowercase , '''size''' ) ) self.assertTrue(hasattr(__lowercase , '''size_divisor''' ) ) def _snake_case (self ): pass def _snake_case (self ): # Initialize image processor __lowerCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random PIL images __lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowercase ) for image in image_inputs: self.assertIsInstance(__lowercase , Image.Image ) # Test not batched input __lowerCAmelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values __lowerCAmelCase , __lowerCAmelCase = self.image_processor_tester.get_expected_values(__lowercase ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched __lowerCAmelCase = image_processing(__lowercase , return_tensors='''pt''' ).pixel_values __lowerCAmelCase , __lowerCAmelCase = self.image_processor_tester.get_expected_values(__lowercase , batched=__lowercase ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def _snake_case (self ): # Initialize image processor __lowerCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors __lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowercase , numpify=__lowercase ) for image in image_inputs: self.assertIsInstance(__lowercase , np.ndarray ) # Test not batched input __lowerCAmelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values __lowerCAmelCase , __lowerCAmelCase = self.image_processor_tester.get_expected_values(__lowercase ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched __lowerCAmelCase = image_processing(__lowercase , return_tensors='''pt''' ).pixel_values __lowerCAmelCase , __lowerCAmelCase = self.image_processor_tester.get_expected_values(__lowercase , batched=__lowercase ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def _snake_case (self ): # Initialize image processor __lowerCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors __lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowercase , torchify=__lowercase ) for image in image_inputs: self.assertIsInstance(__lowercase , torch.Tensor ) # Test not batched input __lowerCAmelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values __lowerCAmelCase , __lowerCAmelCase = self.image_processor_tester.get_expected_values(__lowercase ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched __lowerCAmelCase = image_processing(__lowercase , return_tensors='''pt''' ).pixel_values __lowerCAmelCase , __lowerCAmelCase = self.image_processor_tester.get_expected_values(__lowercase , batched=__lowercase ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , )
9
'''simple docstring''' import argparse import datetime def __magic_name__( lowerCamelCase): __lowerCAmelCase = { '''0''': '''Sunday''', '''1''': '''Monday''', '''2''': '''Tuesday''', '''3''': '''Wednesday''', '''4''': '''Thursday''', '''5''': '''Friday''', '''6''': '''Saturday''', } __lowerCAmelCase = {0: 1, 1: 2, 2: 3, 3: 4, 4: 5, 5: 6, 6: 0} # Validate if not 0 < len(lowerCamelCase) < 1_1: raise ValueError('''Must be 10 characters long''') # Get month __lowerCAmelCase = int(date_input[0] + date_input[1]) # Validate if not 0 < m < 1_3: raise ValueError('''Month must be between 1 - 12''') __lowerCAmelCase = date_input[2] # Validate if sep_a not in ["-", "/"]: raise ValueError('''Date separator must be \'-\' or \'/\'''') # Get day __lowerCAmelCase = int(date_input[3] + date_input[4]) # Validate if not 0 < d < 3_2: raise ValueError('''Date must be between 1 - 31''') # Get second separator __lowerCAmelCase = date_input[5] # Validate if sep_a not in ["-", "/"]: raise ValueError('''Date separator must be \'-\' or \'/\'''') # Get year __lowerCAmelCase = int(date_input[6] + date_input[7] + date_input[8] + date_input[9]) # Arbitrary year range if not 4_5 < y < 8_5_0_0: raise ValueError( '''Year out of range. There has to be some sort of limit...right?''') # Get datetime obj for validation __lowerCAmelCase = datetime.date(int(lowerCamelCase), int(lowerCamelCase), int(lowerCamelCase)) # Start math if m <= 2: __lowerCAmelCase = y - 1 __lowerCAmelCase = m + 1_2 # maths var __lowerCAmelCase = int(str(lowerCamelCase)[:2]) __lowerCAmelCase = int(str(lowerCamelCase)[2:]) __lowerCAmelCase = int(2.6 * m - 5.39) __lowerCAmelCase = int(c / 4) __lowerCAmelCase = int(k / 4) __lowerCAmelCase = int(d + k) __lowerCAmelCase = int(t + u + v + x) __lowerCAmelCase = int(z - (2 * c)) __lowerCAmelCase = round(w % 7) # End math # Validate math if f != convert_datetime_days[dt_ck.weekday()]: raise AssertionError('''The date was evaluated incorrectly. Contact developer.''') # Response __lowerCAmelCase = F"""Your date {date_input}, is a {days[str(lowerCamelCase)]}!""" return response if __name__ == "__main__": import doctest doctest.testmod() _UpperCAmelCase : List[str] = argparse.ArgumentParser( description=( """Find out what day of the week nearly any date is or was. Enter """ """date as a string in the mm-dd-yyyy or mm/dd/yyyy format""" ) ) parser.add_argument( """date_input""", type=str, help="""Date as a string (mm-dd-yyyy or mm/dd/yyyy)""" ) _UpperCAmelCase : Dict = parser.parse_args() zeller(args.date_input)
9
1
'''simple docstring''' # Imports import numpy as np class a__ : """simple docstring""" def __init__(self , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase=None ): self.set_matricies(red=__lowercase , green=__lowercase , blue=__lowercase , red_edge=__lowercase , nir=__lowercase ) def _snake_case (self , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase=None ): if red is not None: __lowerCAmelCase = red if green is not None: __lowerCAmelCase = green if blue is not None: __lowerCAmelCase = blue if red_edge is not None: __lowerCAmelCase = red_edge if nir is not None: __lowerCAmelCase = nir return True def _snake_case (self , __lowercase="" , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase=None ): self.set_matricies(red=__lowercase , green=__lowercase , blue=__lowercase , red_edge=__lowercase , nir=__lowercase ) __lowerCAmelCase = { '''ARVI2''': self.arvaa, '''CCCI''': self.ccci, '''CVI''': self.cvi, '''GLI''': self.gli, '''NDVI''': self.ndvi, '''BNDVI''': self.bndvi, '''redEdgeNDVI''': self.red_edge_ndvi, '''GNDVI''': self.gndvi, '''GBNDVI''': self.gbndvi, '''GRNDVI''': self.grndvi, '''RBNDVI''': self.rbndvi, '''PNDVI''': self.pndvi, '''ATSAVI''': self.atsavi, '''BWDRVI''': self.bwdrvi, '''CIgreen''': self.ci_green, '''CIrededge''': self.ci_rededge, '''CI''': self.ci, '''CTVI''': self.ctvi, '''GDVI''': self.gdvi, '''EVI''': self.evi, '''GEMI''': self.gemi, '''GOSAVI''': self.gosavi, '''GSAVI''': self.gsavi, '''Hue''': self.hue, '''IVI''': self.ivi, '''IPVI''': self.ipvi, '''I''': self.i, '''RVI''': self.rvi, '''MRVI''': self.mrvi, '''MSAVI''': self.m_savi, '''NormG''': self.norm_g, '''NormNIR''': self.norm_nir, '''NormR''': self.norm_r, '''NGRDI''': self.ngrdi, '''RI''': self.ri, '''S''': self.s, '''IF''': self._if, '''DVI''': self.dvi, '''TVI''': self.tvi, '''NDRE''': self.ndre, } try: return funcs[index]() except KeyError: print('''Index not in the list!''' ) return False def _snake_case (self ): return -0.1_8 + (1.1_7 * ((self.nir - self.red) / (self.nir + self.red))) def _snake_case (self ): return ((self.nir - self.redEdge) / (self.nir + self.redEdge)) / ( (self.nir - self.red) / (self.nir + self.red) ) def _snake_case (self ): return self.nir * (self.red / (self.green**2)) def _snake_case (self ): return (2 * self.green - self.red - self.blue) / ( 2 * self.green + self.red + self.blue ) def _snake_case (self ): return (self.nir - self.red) / (self.nir + self.red) def _snake_case (self ): return (self.nir - self.blue) / (self.nir + self.blue) def _snake_case (self ): return (self.redEdge - self.red) / (self.redEdge + self.red) def _snake_case (self ): return (self.nir - self.green) / (self.nir + self.green) def _snake_case (self ): return (self.nir - (self.green + self.blue)) / ( self.nir + (self.green + self.blue) ) def _snake_case (self ): return (self.nir - (self.green + self.red)) / ( self.nir + (self.green + self.red) ) def _snake_case (self ): return (self.nir - (self.blue + self.red)) / (self.nir + (self.blue + self.red)) def _snake_case (self ): return (self.nir - (self.green + self.red + self.blue)) / ( self.nir + (self.green + self.red + self.blue) ) def _snake_case (self , __lowercase=0.0_8 , __lowercase=1.2_2 , __lowercase=0.0_3 ): return a * ( (self.nir - a * self.red - b) / (a * self.nir + self.red - a * b + x * (1 + a**2)) ) def _snake_case (self ): return (0.1 * self.nir - self.blue) / (0.1 * self.nir + self.blue) def _snake_case (self ): return (self.nir / self.green) - 1 def _snake_case (self ): return (self.nir / self.redEdge) - 1 def _snake_case (self ): return (self.red - self.blue) / self.red def _snake_case (self ): __lowerCAmelCase = self.ndvi() return ((ndvi + 0.5) / (abs(ndvi + 0.5 ))) * (abs(ndvi + 0.5 ) ** (1 / 2)) def _snake_case (self ): return self.nir - self.green def _snake_case (self ): return 2.5 * ( (self.nir - self.red) / (self.nir + 6 * self.red - 7.5 * self.blue + 1) ) def _snake_case (self ): __lowerCAmelCase = (2 * (self.nir**2 - self.red**2) + 1.5 * self.nir + 0.5 * self.red) / ( self.nir + self.red + 0.5 ) return n * (1 - 0.2_5 * n) - (self.red - 0.1_2_5) / (1 - self.red) def _snake_case (self , __lowercase=0.1_6 ): return (self.nir - self.green) / (self.nir + self.green + y) def _snake_case (self , __lowercase=0.5 ): return ((self.nir - self.green) / (self.nir + self.green + n)) * (1 + n) def _snake_case (self ): return np.arctan( ((2 * self.red - self.green - self.blue) / 3_0.5) * (self.green - self.blue) ) def _snake_case (self , __lowercase=None , __lowercase=None ): return (self.nir - b) / (a * self.red) def _snake_case (self ): return (self.nir / ((self.nir + self.red) / 2)) * (self.ndvi() + 1) def _snake_case (self ): return (self.red + self.green + self.blue) / 3_0.5 def _snake_case (self ): return self.nir / self.red def _snake_case (self ): return (self.rvi() - 1) / (self.rvi() + 1) def _snake_case (self ): return ( (2 * self.nir + 1) - ((2 * self.nir + 1) ** 2 - 8 * (self.nir - self.red)) ** (1 / 2) ) / 2 def _snake_case (self ): return self.green / (self.nir + self.red + self.green) def _snake_case (self ): return self.nir / (self.nir + self.red + self.green) def _snake_case (self ): return self.red / (self.nir + self.red + self.green) def _snake_case (self ): return (self.green - self.red) / (self.green + self.red) def _snake_case (self ): return (self.red - self.green) / (self.red + self.green) def _snake_case (self ): __lowerCAmelCase = np.max([np.max(self.red ), np.max(self.green ), np.max(self.blue )] ) __lowerCAmelCase = np.min([np.min(self.red ), np.min(self.green ), np.min(self.blue )] ) return (max_value - min_value) / max_value def _snake_case (self ): return (2 * self.red - self.green - self.blue) / (self.green - self.blue) def _snake_case (self ): return self.nir / self.red def _snake_case (self ): return (self.ndvi() + 0.5) ** (1 / 2) def _snake_case (self ): return (self.nir - self.redEdge) / (self.nir + self.redEdge)
9
'''simple docstring''' import gc import unittest import numpy as np import torch from torch.backends.cuda import sdp_kernel from diffusers import ( CMStochasticIterativeScheduler, ConsistencyModelPipeline, UNetaDModel, ) from diffusers.utils import randn_tensor, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_a, require_torch_gpu from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class a__ ( __A , unittest.TestCase ): """simple docstring""" __UpperCamelCase : Optional[Any] = ConsistencyModelPipeline __UpperCamelCase : Optional[int] = UNCONDITIONAL_IMAGE_GENERATION_PARAMS __UpperCamelCase : int = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS # Override required_optional_params to remove num_images_per_prompt __UpperCamelCase : List[Any] = frozenset( [ 'num_inference_steps', 'generator', 'latents', 'output_type', 'return_dict', 'callback', 'callback_steps', ] ) @property def _snake_case (self ): __lowerCAmelCase = UNetaDModel.from_pretrained( '''diffusers/consistency-models-test''' , subfolder='''test_unet''' , ) return unet @property def _snake_case (self ): __lowerCAmelCase = UNetaDModel.from_pretrained( '''diffusers/consistency-models-test''' , subfolder='''test_unet_class_cond''' , ) return unet def _snake_case (self , __lowercase=False ): if class_cond: __lowerCAmelCase = self.dummy_cond_unet else: __lowerCAmelCase = self.dummy_uncond_unet # Default to CM multistep sampler __lowerCAmelCase = CMStochasticIterativeScheduler( num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , ) __lowerCAmelCase = { '''unet''': unet, '''scheduler''': scheduler, } return components def _snake_case (self , __lowercase , __lowercase=0 ): if str(__lowercase ).startswith('''mps''' ): __lowerCAmelCase = torch.manual_seed(__lowercase ) else: __lowerCAmelCase = torch.Generator(device=__lowercase ).manual_seed(__lowercase ) __lowerCAmelCase = { '''batch_size''': 1, '''num_inference_steps''': None, '''timesteps''': [22, 0], '''generator''': generator, '''output_type''': '''np''', } return inputs def _snake_case (self ): __lowerCAmelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator __lowerCAmelCase = self.get_dummy_components() __lowerCAmelCase = ConsistencyModelPipeline(**__lowercase ) __lowerCAmelCase = pipe.to(__lowercase ) pipe.set_progress_bar_config(disable=__lowercase ) __lowerCAmelCase = self.get_dummy_inputs(__lowercase ) __lowerCAmelCase = pipe(**__lowercase ).images assert image.shape == (1, 32, 32, 3) __lowerCAmelCase = image[0, -3:, -3:, -1] __lowerCAmelCase = np.array([0.3_5_7_2, 0.6_2_7_3, 0.4_0_3_1, 0.3_9_6_1, 0.4_3_2_1, 0.5_7_3_0, 0.5_2_6_6, 0.4_7_8_0, 0.5_0_0_4] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def _snake_case (self ): __lowerCAmelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator __lowerCAmelCase = self.get_dummy_components(class_cond=__lowercase ) __lowerCAmelCase = ConsistencyModelPipeline(**__lowercase ) __lowerCAmelCase = pipe.to(__lowercase ) pipe.set_progress_bar_config(disable=__lowercase ) __lowerCAmelCase = self.get_dummy_inputs(__lowercase ) __lowerCAmelCase = 0 __lowerCAmelCase = pipe(**__lowercase ).images assert image.shape == (1, 32, 32, 3) __lowerCAmelCase = image[0, -3:, -3:, -1] __lowerCAmelCase = np.array([0.3_5_7_2, 0.6_2_7_3, 0.4_0_3_1, 0.3_9_6_1, 0.4_3_2_1, 0.5_7_3_0, 0.5_2_6_6, 0.4_7_8_0, 0.5_0_0_4] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def _snake_case (self ): __lowerCAmelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator __lowerCAmelCase = self.get_dummy_components() __lowerCAmelCase = ConsistencyModelPipeline(**__lowercase ) __lowerCAmelCase = pipe.to(__lowercase ) pipe.set_progress_bar_config(disable=__lowercase ) __lowerCAmelCase = self.get_dummy_inputs(__lowercase ) __lowerCAmelCase = 1 __lowerCAmelCase = None __lowerCAmelCase = pipe(**__lowercase ).images assert image.shape == (1, 32, 32, 3) __lowerCAmelCase = image[0, -3:, -3:, -1] __lowerCAmelCase = np.array([0.5_0_0_4, 0.5_0_0_4, 0.4_9_9_4, 0.5_0_0_8, 0.4_9_7_6, 0.5_0_1_8, 0.4_9_9_0, 0.4_9_8_2, 0.4_9_8_7] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def _snake_case (self ): __lowerCAmelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator __lowerCAmelCase = self.get_dummy_components(class_cond=__lowercase ) __lowerCAmelCase = ConsistencyModelPipeline(**__lowercase ) __lowerCAmelCase = pipe.to(__lowercase ) pipe.set_progress_bar_config(disable=__lowercase ) __lowerCAmelCase = self.get_dummy_inputs(__lowercase ) __lowerCAmelCase = 1 __lowerCAmelCase = None __lowerCAmelCase = 0 __lowerCAmelCase = pipe(**__lowercase ).images assert image.shape == (1, 32, 32, 3) __lowerCAmelCase = image[0, -3:, -3:, -1] __lowerCAmelCase = np.array([0.5_0_0_4, 0.5_0_0_4, 0.4_9_9_4, 0.5_0_0_8, 0.4_9_7_6, 0.5_0_1_8, 0.4_9_9_0, 0.4_9_8_2, 0.4_9_8_7] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 @slow @require_torch_gpu class a__ ( unittest.TestCase ): """simple docstring""" def _snake_case (self ): super().tearDown() gc.collect() torch.cuda.empty_cache() def _snake_case (self , __lowercase=0 , __lowercase=False , __lowercase="cpu" , __lowercase=torch.floataa , __lowercase=(1, 3, 64, 64) ): __lowerCAmelCase = torch.manual_seed(__lowercase ) __lowerCAmelCase = { '''num_inference_steps''': None, '''timesteps''': [22, 0], '''class_labels''': 0, '''generator''': generator, '''output_type''': '''np''', } if get_fixed_latents: __lowerCAmelCase = self.get_fixed_latents(seed=__lowercase , device=__lowercase , dtype=__lowercase , shape=__lowercase ) __lowerCAmelCase = latents return inputs def _snake_case (self , __lowercase=0 , __lowercase="cpu" , __lowercase=torch.floataa , __lowercase=(1, 3, 64, 64) ): if type(__lowercase ) == str: __lowerCAmelCase = torch.device(__lowercase ) __lowerCAmelCase = torch.Generator(device=__lowercase ).manual_seed(__lowercase ) __lowerCAmelCase = randn_tensor(__lowercase , generator=__lowercase , device=__lowercase , dtype=__lowercase ) return latents def _snake_case (self ): __lowerCAmelCase = UNetaDModel.from_pretrained('''diffusers/consistency_models''' , subfolder='''diffusers_cd_imagenet64_l2''' ) __lowerCAmelCase = CMStochasticIterativeScheduler( num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , ) __lowerCAmelCase = ConsistencyModelPipeline(unet=__lowercase , scheduler=__lowercase ) pipe.to(torch_device=__lowercase ) pipe.set_progress_bar_config(disable=__lowercase ) __lowerCAmelCase = self.get_inputs() __lowerCAmelCase = pipe(**__lowercase ).images assert image.shape == (1, 64, 64, 3) __lowerCAmelCase = image[0, -3:, -3:, -1] __lowerCAmelCase = np.array([0.0_8_8_8, 0.0_8_8_1, 0.0_6_6_6, 0.0_4_7_9, 0.0_2_9_2, 0.0_1_9_5, 0.0_2_0_1, 0.0_1_6_3, 0.0_2_5_4] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2 def _snake_case (self ): __lowerCAmelCase = UNetaDModel.from_pretrained('''diffusers/consistency_models''' , subfolder='''diffusers_cd_imagenet64_l2''' ) __lowerCAmelCase = CMStochasticIterativeScheduler( num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , ) __lowerCAmelCase = ConsistencyModelPipeline(unet=__lowercase , scheduler=__lowercase ) pipe.to(torch_device=__lowercase ) pipe.set_progress_bar_config(disable=__lowercase ) __lowerCAmelCase = self.get_inputs() __lowerCAmelCase = 1 __lowerCAmelCase = None __lowerCAmelCase = pipe(**__lowercase ).images assert image.shape == (1, 64, 64, 3) __lowerCAmelCase = image[0, -3:, -3:, -1] __lowerCAmelCase = np.array([0.0_3_4_0, 0.0_1_5_2, 0.0_0_6_3, 0.0_2_6_7, 0.0_2_2_1, 0.0_1_0_7, 0.0_4_1_6, 0.0_1_8_6, 0.0_2_1_7] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2 @require_torch_a def _snake_case (self ): __lowerCAmelCase = UNetaDModel.from_pretrained('''diffusers/consistency_models''' , subfolder='''diffusers_cd_imagenet64_l2''' ) __lowerCAmelCase = CMStochasticIterativeScheduler( num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , ) __lowerCAmelCase = ConsistencyModelPipeline(unet=__lowercase , scheduler=__lowercase ) pipe.to(torch_device=__lowercase , torch_dtype=torch.floataa ) pipe.set_progress_bar_config(disable=__lowercase ) __lowerCAmelCase = self.get_inputs(get_fixed_latents=__lowercase , device=__lowercase ) # Ensure usage of flash attention in torch 2.0 with sdp_kernel(enable_flash=__lowercase , enable_math=__lowercase , enable_mem_efficient=__lowercase ): __lowerCAmelCase = pipe(**__lowercase ).images assert image.shape == (1, 64, 64, 3) __lowerCAmelCase = image[0, -3:, -3:, -1] __lowerCAmelCase = np.array([0.1_8_7_5, 0.1_4_2_8, 0.1_2_8_9, 0.2_1_5_1, 0.2_0_9_2, 0.1_4_7_7, 0.1_8_7_7, 0.1_6_4_1, 0.1_3_5_3] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 @require_torch_a def _snake_case (self ): __lowerCAmelCase = UNetaDModel.from_pretrained('''diffusers/consistency_models''' , subfolder='''diffusers_cd_imagenet64_l2''' ) __lowerCAmelCase = CMStochasticIterativeScheduler( num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , ) __lowerCAmelCase = ConsistencyModelPipeline(unet=__lowercase , scheduler=__lowercase ) pipe.to(torch_device=__lowercase , torch_dtype=torch.floataa ) pipe.set_progress_bar_config(disable=__lowercase ) __lowerCAmelCase = self.get_inputs(get_fixed_latents=__lowercase , device=__lowercase ) __lowerCAmelCase = 1 __lowerCAmelCase = None # Ensure usage of flash attention in torch 2.0 with sdp_kernel(enable_flash=__lowercase , enable_math=__lowercase , enable_mem_efficient=__lowercase ): __lowerCAmelCase = pipe(**__lowercase ).images assert image.shape == (1, 64, 64, 3) __lowerCAmelCase = image[0, -3:, -3:, -1] __lowerCAmelCase = np.array([0.1_6_6_3, 0.1_9_4_8, 0.2_2_7_5, 0.1_6_8_0, 0.1_2_0_4, 0.1_2_4_5, 0.1_8_5_8, 0.1_3_3_8, 0.2_0_9_5] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
9
1
'''simple docstring''' import argparse import os import numpy as np import tensorflow as tf import torch from transformers import BertModel def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase): __lowerCAmelCase = ('''dense.weight''', '''attention.self.query''', '''attention.self.key''', '''attention.self.value''') __lowerCAmelCase = ( ('''layer.''', '''layer_'''), ('''word_embeddings.weight''', '''word_embeddings'''), ('''position_embeddings.weight''', '''position_embeddings'''), ('''token_type_embeddings.weight''', '''token_type_embeddings'''), ('''.''', '''/'''), ('''LayerNorm/weight''', '''LayerNorm/gamma'''), ('''LayerNorm/bias''', '''LayerNorm/beta'''), ('''weight''', '''kernel'''), ) if not os.path.isdir(lowerCamelCase): os.makedirs(lowerCamelCase) __lowerCAmelCase = model.state_dict() def to_tf_var_name(lowerCamelCase): for patt, repl in iter(lowerCamelCase): __lowerCAmelCase = name.replace(lowerCamelCase, lowerCamelCase) return F"""bert/{name}""" def create_tf_var(lowerCamelCase, lowerCamelCase, lowerCamelCase): __lowerCAmelCase = tf.dtypes.as_dtype(tensor.dtype) __lowerCAmelCase = tf.get_variable(dtype=lowerCamelCase, shape=tensor.shape, name=lowerCamelCase, initializer=tf.zeros_initializer()) session.run(tf.variables_initializer([tf_var])) session.run(lowerCamelCase) return tf_var tf.reset_default_graph() with tf.Session() as session: for var_name in state_dict: __lowerCAmelCase = to_tf_var_name(lowerCamelCase) __lowerCAmelCase = state_dict[var_name].numpy() if any(x in var_name for x in tensors_to_transpose): __lowerCAmelCase = torch_tensor.T __lowerCAmelCase = create_tf_var(tensor=lowerCamelCase, name=lowerCamelCase, session=lowerCamelCase) tf.keras.backend.set_value(lowerCamelCase, lowerCamelCase) __lowerCAmelCase = session.run(lowerCamelCase) print(F"""Successfully created {tf_name}: {np.allclose(lowerCamelCase, lowerCamelCase)}""") __lowerCAmelCase = tf.train.Saver(tf.trainable_variables()) saver.save(lowerCamelCase, os.path.join(lowerCamelCase, model_name.replace('''-''', '''_''') + '''.ckpt''')) def __magic_name__( lowerCamelCase=None): __lowerCAmelCase = argparse.ArgumentParser() parser.add_argument('''--model_name''', type=lowerCamelCase, required=lowerCamelCase, help='''model name e.g. bert-base-uncased''') parser.add_argument( '''--cache_dir''', type=lowerCamelCase, default=lowerCamelCase, required=lowerCamelCase, help='''Directory containing pytorch model''') parser.add_argument('''--pytorch_model_path''', type=lowerCamelCase, required=lowerCamelCase, help='''/path/to/<pytorch-model-name>.bin''') parser.add_argument('''--tf_cache_dir''', type=lowerCamelCase, required=lowerCamelCase, help='''Directory in which to save tensorflow model''') __lowerCAmelCase = parser.parse_args(lowerCamelCase) __lowerCAmelCase = BertModel.from_pretrained( pretrained_model_name_or_path=args.model_name, state_dict=torch.load(args.pytorch_model_path), cache_dir=args.cache_dir, ) convert_pytorch_checkpoint_to_tf(model=lowerCamelCase, ckpt_dir=args.tf_cache_dir, model_name=args.model_name) if __name__ == "__main__": main()
9
'''simple docstring''' from collections import Counter import numpy as np from sklearn import datasets from sklearn.model_selection import train_test_split _UpperCAmelCase : List[Any] = datasets.load_iris() _UpperCAmelCase : Dict = np.array(data["""data"""]) _UpperCAmelCase : int = np.array(data["""target"""]) _UpperCAmelCase : str = data["""target_names"""] _UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase : Optional[Any] = train_test_split(X, y) def __magic_name__( lowerCamelCase, lowerCamelCase): return np.linalg.norm(np.array(lowerCamelCase) - np.array(lowerCamelCase)) def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase=5): __lowerCAmelCase = zip(lowerCamelCase, lowerCamelCase) # List of distances of all points from the point to be classified __lowerCAmelCase = [] for data_point in data: __lowerCAmelCase = euclidean_distance(data_point[0], lowerCamelCase) distances.append((distance, data_point[1])) # Choosing 'k' points with the least distances. __lowerCAmelCase = [i[1] for i in sorted(lowerCamelCase)[:k]] # Most commonly occurring class among them # is the class into which the point is classified __lowerCAmelCase = Counter(lowerCamelCase).most_common(1)[0][0] return classes[result] if __name__ == "__main__": print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4]))
9
1
'''simple docstring''' import os from typing import List, Optional, Union from ...tokenization_utils import PreTrainedTokenizer from ...tokenization_utils_base import AddedToken from ...utils import logging _UpperCAmelCase : Tuple = logging.get_logger(__name__) _UpperCAmelCase : Dict = {"""vocab_file""": """vocab.txt"""} _UpperCAmelCase : Union[str, Any] = { """vocab_file""": { """facebook/esm2_t6_8M_UR50D""": """https://huggingface.co/facebook/esm2_t6_8M_UR50D/resolve/main/vocab.txt""", """facebook/esm2_t12_35M_UR50D""": """https://huggingface.co/facebook/esm2_t12_35M_UR50D/resolve/main/vocab.txt""", }, } _UpperCAmelCase : List[str] = { """facebook/esm2_t6_8M_UR50D""": 1_0_2_4, """facebook/esm2_t12_35M_UR50D""": 1_0_2_4, } def __magic_name__( lowerCamelCase): with open(lowerCamelCase, '''r''') as f: __lowerCAmelCase = f.read().splitlines() return [l.strip() for l in lines] class a__ ( __A ): """simple docstring""" __UpperCamelCase : List[Any] = VOCAB_FILES_NAMES __UpperCamelCase : List[str] = PRETRAINED_VOCAB_FILES_MAP __UpperCamelCase : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __UpperCamelCase : Tuple = ['input_ids', 'attention_mask'] def __init__(self , __lowercase , __lowercase="<unk>" , __lowercase="<cls>" , __lowercase="<pad>" , __lowercase="<mask>" , __lowercase="<eos>" , **__lowercase , ): super().__init__(**__lowercase ) __lowerCAmelCase = load_vocab_file(__lowercase ) __lowerCAmelCase = dict(enumerate(self.all_tokens ) ) __lowerCAmelCase = {tok: ind for ind, tok in enumerate(self.all_tokens )} __lowerCAmelCase = unk_token __lowerCAmelCase = cls_token __lowerCAmelCase = pad_token __lowerCAmelCase = mask_token __lowerCAmelCase = eos_token __lowerCAmelCase = self.all_tokens self._create_trie(self.unique_no_split_tokens ) def _snake_case (self , __lowercase ): return self._id_to_token.get(__lowercase , self.unk_token ) def _snake_case (self , __lowercase ): return self._token_to_id.get(__lowercase , self._token_to_id.get(self.unk_token ) ) def _snake_case (self , __lowercase , **__lowercase ): return text.split() def _snake_case (self , __lowercase=False ): return len(self._id_to_token ) def _snake_case (self ): return {token: i for i, token in enumerate(self.all_tokens )} def _snake_case (self , __lowercase ): return self._token_to_id.get(__lowercase , self._token_to_id.get(self.unk_token ) ) def _snake_case (self , __lowercase ): return self._id_to_token.get(__lowercase , self.unk_token ) def _snake_case (self , __lowercase , __lowercase = None ): __lowerCAmelCase = [self.cls_token_id] __lowerCAmelCase = [self.eos_token_id] # No sep token in ESM vocabulary if token_ids_a is None: if self.eos_token_id is None: return cls + token_ids_a else: return cls + token_ids_a + sep elif self.eos_token_id is None: raise ValueError('''Cannot tokenize multiple sequences when EOS token is not set!''' ) return cls + token_ids_a + sep + token_ids_a + sep # Multiple inputs always have an EOS token def _snake_case (self , __lowercase , __lowercase = None , __lowercase = False ): if already_has_special_tokens: if token_ids_a is not None: raise ValueError( '''You should not supply a second sequence if the provided sequence of ''' '''ids is already formatted with special tokens for the model.''' ) return [1 if token in self.all_special_ids else 0 for token in token_ids_a] __lowerCAmelCase = [1] + ([0] * len(__lowercase )) + [1] if token_ids_a is not None: mask += [0] * len(__lowercase ) + [1] return mask def _snake_case (self , __lowercase , __lowercase ): __lowerCAmelCase = os.path.join(__lowercase , (filename_prefix + '''-''' if filename_prefix else '''''') + '''vocab.txt''' ) with open(__lowercase , '''w''' ) as f: f.write('''\n'''.join(self.all_tokens ) ) return (vocab_file,) @property def _snake_case (self ): return self.get_vocab_size(with_added_tokens=__lowercase ) def _snake_case (self , __lowercase , __lowercase = False ): return super()._add_tokens(__lowercase , special_tokens=__lowercase )
9
'''simple docstring''' import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import CLIPTokenizer, CLIPTokenizerFast from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import OwlViTImageProcessor, OwlViTProcessor @require_vision class a__ ( unittest.TestCase ): """simple docstring""" def _snake_case (self ): __lowerCAmelCase = tempfile.mkdtemp() # fmt: off __lowerCAmelCase = ['''''', '''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''lo''', '''l</w>''', '''w</w>''', '''r</w>''', '''t</w>''', '''low</w>''', '''er</w>''', '''lowest</w>''', '''newer</w>''', '''wider''', '''<unk>''', '''<|startoftext|>''', '''<|endoftext|>'''] # fmt: on __lowerCAmelCase = dict(zip(__lowercase , range(len(__lowercase ) ) ) ) __lowerCAmelCase = ['''#version: 0.2''', '''l o''', '''lo w</w>''', '''e r</w>''', ''''''] __lowerCAmelCase = {'''unk_token''': '''<unk>'''} __lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) __lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(__lowercase ) + '''\n''' ) with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write('''\n'''.join(__lowercase ) ) __lowerCAmelCase = { '''do_resize''': True, '''size''': 20, '''do_center_crop''': True, '''crop_size''': 18, '''do_normalize''': True, '''image_mean''': [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3], '''image_std''': [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1], } __lowerCAmelCase = os.path.join(self.tmpdirname , __lowercase ) with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp: json.dump(__lowercase , __lowercase ) def _snake_case (self , **__lowercase ): return CLIPTokenizer.from_pretrained(self.tmpdirname , pad_token='''!''' , **__lowercase ) def _snake_case (self , **__lowercase ): return CLIPTokenizerFast.from_pretrained(self.tmpdirname , pad_token='''!''' , **__lowercase ) def _snake_case (self , **__lowercase ): return OwlViTImageProcessor.from_pretrained(self.tmpdirname , **__lowercase ) def _snake_case (self ): shutil.rmtree(self.tmpdirname ) def _snake_case (self ): __lowerCAmelCase = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )] __lowerCAmelCase = [Image.fromarray(np.moveaxis(__lowercase , 0 , -1 ) ) for x in image_inputs] return image_inputs def _snake_case (self ): __lowerCAmelCase = self.get_tokenizer() __lowerCAmelCase = self.get_rust_tokenizer() __lowerCAmelCase = self.get_image_processor() __lowerCAmelCase = OwlViTProcessor(tokenizer=__lowercase , image_processor=__lowercase ) processor_slow.save_pretrained(self.tmpdirname ) __lowerCAmelCase = OwlViTProcessor.from_pretrained(self.tmpdirname , use_fast=__lowercase ) __lowerCAmelCase = OwlViTProcessor(tokenizer=__lowercase , image_processor=__lowercase ) processor_fast.save_pretrained(self.tmpdirname ) __lowerCAmelCase = OwlViTProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() ) self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() ) self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() ) self.assertIsInstance(processor_slow.tokenizer , __lowercase ) self.assertIsInstance(processor_fast.tokenizer , __lowercase ) self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor_slow.image_processor , __lowercase ) self.assertIsInstance(processor_fast.image_processor , __lowercase ) def _snake_case (self ): __lowerCAmelCase = OwlViTProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) __lowerCAmelCase = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' ) __lowerCAmelCase = self.get_image_processor(do_normalize=__lowercase ) __lowerCAmelCase = OwlViTProcessor.from_pretrained( self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=__lowercase ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , __lowercase ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , __lowercase ) def _snake_case (self ): __lowerCAmelCase = self.get_image_processor() __lowerCAmelCase = self.get_tokenizer() __lowerCAmelCase = OwlViTProcessor(tokenizer=__lowercase , image_processor=__lowercase ) __lowerCAmelCase = self.prepare_image_inputs() __lowerCAmelCase = image_processor(__lowercase , return_tensors='''np''' ) __lowerCAmelCase = processor(images=__lowercase , return_tensors='''np''' ) for key in input_image_proc.keys(): self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 ) def _snake_case (self ): __lowerCAmelCase = self.get_image_processor() __lowerCAmelCase = self.get_tokenizer() __lowerCAmelCase = OwlViTProcessor(tokenizer=__lowercase , image_processor=__lowercase ) __lowerCAmelCase = '''lower newer''' __lowerCAmelCase = processor(text=__lowercase , return_tensors='''np''' ) __lowerCAmelCase = tokenizer(__lowercase , return_tensors='''np''' ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key][0].tolist() , encoded_processor[key][0].tolist() ) def _snake_case (self ): __lowerCAmelCase = self.get_image_processor() __lowerCAmelCase = self.get_tokenizer() __lowerCAmelCase = OwlViTProcessor(tokenizer=__lowercase , image_processor=__lowercase ) __lowerCAmelCase = '''lower newer''' __lowerCAmelCase = self.prepare_image_inputs() __lowerCAmelCase = processor(text=__lowercase , images=__lowercase ) self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask''', '''pixel_values'''] ) # test if it raises when no input is passed with pytest.raises(__lowercase ): processor() def _snake_case (self ): __lowerCAmelCase = '''google/owlvit-base-patch32''' __lowerCAmelCase = OwlViTProcessor.from_pretrained(__lowercase ) __lowerCAmelCase = ['''cat''', '''nasa badge'''] __lowerCAmelCase = processor(text=__lowercase ) __lowerCAmelCase = 16 self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask'''] ) self.assertEqual(inputs['''input_ids'''].shape , (2, seq_length) ) # test if it raises when no input is passed with pytest.raises(__lowercase ): processor() def _snake_case (self ): __lowerCAmelCase = '''google/owlvit-base-patch32''' __lowerCAmelCase = OwlViTProcessor.from_pretrained(__lowercase ) __lowerCAmelCase = [['''cat''', '''nasa badge'''], ['''person''']] __lowerCAmelCase = processor(text=__lowercase ) __lowerCAmelCase = 16 __lowerCAmelCase = len(__lowercase ) __lowerCAmelCase = max([len(__lowercase ) for texts in input_texts] ) self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask'''] ) self.assertEqual(inputs['''input_ids'''].shape , (batch_size * num_max_text_queries, seq_length) ) # test if it raises when no input is passed with pytest.raises(__lowercase ): processor() def _snake_case (self ): __lowerCAmelCase = '''google/owlvit-base-patch32''' __lowerCAmelCase = OwlViTProcessor.from_pretrained(__lowercase ) __lowerCAmelCase = ['''cat''', '''nasa badge'''] __lowerCAmelCase = processor(text=__lowercase ) __lowerCAmelCase = 16 __lowerCAmelCase = inputs['''input_ids'''] __lowerCAmelCase = [ [4_94_06, 23_68, 4_94_07, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [4_94_06, 68_41, 1_13_01, 4_94_07, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], ] self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask'''] ) self.assertEqual(inputs['''input_ids'''].shape , (2, seq_length) ) self.assertListEqual(list(input_ids[0] ) , predicted_ids[0] ) self.assertListEqual(list(input_ids[1] ) , predicted_ids[1] ) def _snake_case (self ): __lowerCAmelCase = self.get_image_processor() __lowerCAmelCase = self.get_tokenizer() __lowerCAmelCase = OwlViTProcessor(tokenizer=__lowercase , image_processor=__lowercase ) __lowerCAmelCase = self.prepare_image_inputs() __lowerCAmelCase = self.prepare_image_inputs() __lowerCAmelCase = processor(images=__lowercase , query_images=__lowercase ) self.assertListEqual(list(inputs.keys() ) , ['''query_pixel_values''', '''pixel_values'''] ) # test if it raises when no input is passed with pytest.raises(__lowercase ): processor() def _snake_case (self ): __lowerCAmelCase = self.get_image_processor() __lowerCAmelCase = self.get_tokenizer() __lowerCAmelCase = OwlViTProcessor(tokenizer=__lowercase , image_processor=__lowercase ) __lowerCAmelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] __lowerCAmelCase = processor.batch_decode(__lowercase ) __lowerCAmelCase = tokenizer.batch_decode(__lowercase ) self.assertListEqual(__lowercase , __lowercase )
9
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) _UpperCAmelCase : Tuple = { """configuration_roberta""": ["""ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """RobertaConfig""", """RobertaOnnxConfig"""], """tokenization_roberta""": ["""RobertaTokenizer"""], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCAmelCase : Any = ["""RobertaTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCAmelCase : Dict = [ """ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST""", """RobertaForCausalLM""", """RobertaForMaskedLM""", """RobertaForMultipleChoice""", """RobertaForQuestionAnswering""", """RobertaForSequenceClassification""", """RobertaForTokenClassification""", """RobertaModel""", """RobertaPreTrainedModel""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCAmelCase : Tuple = [ """TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFRobertaForCausalLM""", """TFRobertaForMaskedLM""", """TFRobertaForMultipleChoice""", """TFRobertaForQuestionAnswering""", """TFRobertaForSequenceClassification""", """TFRobertaForTokenClassification""", """TFRobertaMainLayer""", """TFRobertaModel""", """TFRobertaPreTrainedModel""", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCAmelCase : Tuple = [ """FlaxRobertaForCausalLM""", """FlaxRobertaForMaskedLM""", """FlaxRobertaForMultipleChoice""", """FlaxRobertaForQuestionAnswering""", """FlaxRobertaForSequenceClassification""", """FlaxRobertaForTokenClassification""", """FlaxRobertaModel""", """FlaxRobertaPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_roberta import ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaConfig, RobertaOnnxConfig from .tokenization_roberta import RobertaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_roberta_fast import RobertaTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_roberta import ( ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, RobertaForCausalLM, RobertaForMaskedLM, RobertaForMultipleChoice, RobertaForQuestionAnswering, RobertaForSequenceClassification, RobertaForTokenClassification, RobertaModel, RobertaPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_roberta import ( TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, TFRobertaForCausalLM, TFRobertaForMaskedLM, TFRobertaForMultipleChoice, TFRobertaForQuestionAnswering, TFRobertaForSequenceClassification, TFRobertaForTokenClassification, TFRobertaMainLayer, TFRobertaModel, TFRobertaPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_roberta import ( FlaxRobertaForCausalLM, FlaxRobertaForMaskedLM, FlaxRobertaForMultipleChoice, FlaxRobertaForQuestionAnswering, FlaxRobertaForSequenceClassification, FlaxRobertaForTokenClassification, FlaxRobertaModel, FlaxRobertaPreTrainedModel, ) else: import sys _UpperCAmelCase : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
9
'''simple docstring''' from __future__ import annotations from itertools import permutations from random import randint from timeit import repeat def __magic_name__( ): __lowerCAmelCase = [randint(-1_0_0_0, 1_0_0_0) for i in range(1_0)] __lowerCAmelCase = randint(-5_0_0_0, 5_0_0_0) return (arr, r) _UpperCAmelCase : Dict = make_dataset() def __magic_name__( lowerCamelCase, lowerCamelCase): for triplet in permutations(lowerCamelCase, 3): if sum(lowerCamelCase) == target: return tuple(sorted(lowerCamelCase)) return (0, 0, 0) def __magic_name__( lowerCamelCase, lowerCamelCase): arr.sort() __lowerCAmelCase = len(lowerCamelCase) for i in range(n - 1): __lowerCAmelCase , __lowerCAmelCase = i + 1, n - 1 while left < right: if arr[i] + arr[left] + arr[right] == target: return (arr[i], arr[left], arr[right]) elif arr[i] + arr[left] + arr[right] < target: left += 1 elif arr[i] + arr[left] + arr[right] > target: right -= 1 return (0, 0, 0) def __magic_name__( ): __lowerCAmelCase = ''' from __main__ import dataset, triplet_sum1, triplet_sum2 ''' __lowerCAmelCase = ''' triplet_sum1(*dataset) ''' __lowerCAmelCase = ''' triplet_sum2(*dataset) ''' __lowerCAmelCase = repeat(setup=lowerCamelCase, stmt=lowerCamelCase, repeat=5, number=1_0_0_0_0) __lowerCAmelCase = repeat(setup=lowerCamelCase, stmt=lowerCamelCase, repeat=5, number=1_0_0_0_0) return (min(lowerCamelCase), min(lowerCamelCase)) if __name__ == "__main__": from doctest import testmod testmod() _UpperCAmelCase : Union[str, Any] = solution_times() print(f"""The time for naive implementation is {times[0]}.""") print(f"""The time for optimized implementation is {times[1]}.""")
9
1
'''simple docstring''' import warnings from ...utils import logging from .image_processing_deformable_detr import DeformableDetrImageProcessor _UpperCAmelCase : Union[str, Any] = logging.get_logger(__name__) class a__ ( __A ): """simple docstring""" def __init__(self , *__lowercase , **__lowercase ): warnings.warn( '''The class DeformableDetrFeatureExtractor is deprecated and will be removed in version 5 of Transformers.''' ''' Please use DeformableDetrImageProcessor instead.''' , __lowercase , ) super().__init__(*__lowercase , **__lowercase )
9
'''simple docstring''' import numpy as np def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase = 1E-12, lowerCamelCase = 1_0_0, ): assert np.shape(lowerCamelCase)[0] == np.shape(lowerCamelCase)[1] # Ensure proper dimensionality. assert np.shape(lowerCamelCase)[0] == np.shape(lowerCamelCase)[0] # Ensure inputs are either both complex or both real assert np.iscomplexobj(lowerCamelCase) == np.iscomplexobj(lowerCamelCase) __lowerCAmelCase = np.iscomplexobj(lowerCamelCase) if is_complex: # Ensure complex input_matrix is Hermitian assert np.array_equal(lowerCamelCase, input_matrix.conj().T) # Set convergence to False. Will define convergence when we exceed max_iterations # or when we have small changes from one iteration to next. __lowerCAmelCase = False __lowerCAmelCase = 0 __lowerCAmelCase = 0 __lowerCAmelCase = 1E12 while not convergence: # Multiple matrix by the vector. __lowerCAmelCase = np.dot(lowerCamelCase, lowerCamelCase) # Normalize the resulting output vector. __lowerCAmelCase = w / np.linalg.norm(lowerCamelCase) # Find rayleigh quotient # (faster than usual b/c we know vector is normalized already) __lowerCAmelCase = vector.conj().T if is_complex else vector.T __lowerCAmelCase = np.dot(lowerCamelCase, np.dot(lowerCamelCase, lowerCamelCase)) # Check convergence. __lowerCAmelCase = np.abs(lambda_ - lambda_previous) / lambda_ iterations += 1 if error <= error_tol or iterations >= max_iterations: __lowerCAmelCase = True __lowerCAmelCase = lambda_ if is_complex: __lowerCAmelCase = np.real(lambda_) return lambda_, vector def __magic_name__( ): __lowerCAmelCase = np.array([[4_1, 4, 2_0], [4, 2_6, 3_0], [2_0, 3_0, 5_0]]) __lowerCAmelCase = np.array([4_1, 4, 2_0]) __lowerCAmelCase = real_input_matrix.astype(np.complexaaa) __lowerCAmelCase = np.triu(1J * complex_input_matrix, 1) complex_input_matrix += imag_matrix complex_input_matrix += -1 * imag_matrix.T __lowerCAmelCase = np.array([4_1, 4, 2_0]).astype(np.complexaaa) for problem_type in ["real", "complex"]: if problem_type == "real": __lowerCAmelCase = real_input_matrix __lowerCAmelCase = real_vector elif problem_type == "complex": __lowerCAmelCase = complex_input_matrix __lowerCAmelCase = complex_vector # Our implementation. __lowerCAmelCase , __lowerCAmelCase = power_iteration(lowerCamelCase, lowerCamelCase) # Numpy implementation. # Get eigenvalues and eigenvectors using built-in numpy # eigh (eigh used for symmetric or hermetian matrices). __lowerCAmelCase , __lowerCAmelCase = np.linalg.eigh(lowerCamelCase) # Last eigenvalue is the maximum one. __lowerCAmelCase = eigen_values[-1] # Last column in this matrix is eigenvector corresponding to largest eigenvalue. __lowerCAmelCase = eigen_vectors[:, -1] # Check our implementation and numpy gives close answers. assert np.abs(eigen_value - eigen_value_max) <= 1E-6 # Take absolute values element wise of each eigenvector. # as they are only unique to a minus sign. assert np.linalg.norm(np.abs(lowerCamelCase) - np.abs(lowerCamelCase)) <= 1E-6 if __name__ == "__main__": import doctest doctest.testmod() test_power_iteration()
9
1
'''simple docstring''' from math import ceil from typing import List, Optional, Union import numpy as np from ...audio_utils import mel_filter_bank, spectrogram, window_function from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor from ...utils import TensorType, logging _UpperCAmelCase : Any = logging.get_logger(__name__) class a__ ( __A ): """simple docstring""" __UpperCamelCase : List[Any] = ['audio_values', 'audio_mask'] def __init__(self , __lowercase=20_48 , __lowercase=1 , __lowercase=[16, 16] , __lowercase=1_28 , __lowercase=4_41_00 , __lowercase=86 , __lowercase=20_48 , __lowercase=0.0 , **__lowercase , ): super().__init__( feature_size=__lowercase , sampling_rate=__lowercase , padding_value=__lowercase , **__lowercase , ) __lowerCAmelCase = spectrogram_length __lowerCAmelCase = num_channels __lowerCAmelCase = patch_size __lowerCAmelCase = feature_size // self.patch_size[1] __lowerCAmelCase = n_fft __lowerCAmelCase = sampling_rate // hop_length_to_sampling_rate __lowerCAmelCase = sampling_rate __lowerCAmelCase = padding_value __lowerCAmelCase = mel_filter_bank( num_frequency_bins=1 + n_fft // 2 , num_mel_filters=__lowercase , min_frequency=0.0 , max_frequency=2_2_0_5_0.0 , sampling_rate=__lowercase , norm='''slaney''' , mel_scale='''slaney''' , ).T def _snake_case (self , __lowercase ): __lowerCAmelCase = spectrogram( __lowercase , window_function(self.n_fft , '''hann''' ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel='''dB''' , db_range=8_0.0 , ) __lowerCAmelCase = log_spec[:, :-1] __lowerCAmelCase = log_spec - 2_0.0 __lowerCAmelCase = np.clip(log_spec / 4_0.0 , -2.0 , 0.0 ) + 1.0 return log_spec def __call__(self , __lowercase , __lowercase = None , __lowercase = True , __lowercase = None , __lowercase = False , __lowercase = False , **__lowercase , ): if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( '''This feature extractor is set to support sampling rate''' F""" of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled""" F""" with {self.sampling_rate} and not {sampling_rate}.""" ) else: logger.warning( '''It is strongly recommended to pass the `sampling_rate` argument to this function. ''' '''Failing to do so can result in silent errors that might be hard to debug.''' ) __lowerCAmelCase = isinstance(__lowercase , np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(F"""Only mono-channel audio is supported for input to {self}""" ) __lowerCAmelCase = is_batched_numpy or ( isinstance(__lowercase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: __lowerCAmelCase = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech] elif not is_batched and not isinstance(__lowercase , np.ndarray ): __lowerCAmelCase = np.asarray(__lowercase , dtype=np.floataa ) elif isinstance(__lowercase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): __lowerCAmelCase = raw_speech.astype(np.floataa ) # always return batch if not is_batched: __lowerCAmelCase = [np.asarray([raw_speech] ).T] # Convert audio signals to log mel spectrograms, truncate by time axis __lowerCAmelCase = [ self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech ] if isinstance(audio_features[0] , __lowercase ): __lowerCAmelCase = [np.asarray(__lowercase , dtype=np.floataa ) for feature in audio_features] # Create audio attention mask __lowerCAmelCase = max( [ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch if return_attention_mask: __lowerCAmelCase = [ (ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1] + (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0] for feature in audio_features ] __lowerCAmelCase = np.array(__lowercase ).astype(np.floataa ) # convert into correct format for padding __lowerCAmelCase = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch __lowerCAmelCase = np.ones([len(__lowercase ), 1, max_time_len, self.feature_size] ).astype(np.floataa ) __lowerCAmelCase = padded_audio_features * self.padding_value for i in range(len(__lowercase ) ): __lowerCAmelCase = audio_features[i] __lowerCAmelCase = feature # return as BatchFeature if return_attention_mask: __lowerCAmelCase = {'''audio_values''': padded_audio_features, '''audio_mask''': audio_mask} else: __lowerCAmelCase = {'''audio_values''': padded_audio_features} __lowerCAmelCase = BatchFeature(data=__lowercase , tensor_type=__lowercase ) return encoded_inputs
9
'''simple docstring''' from typing import Dict, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import flip_channel_order, resize, to_channel_dimension_format, to_pil_image from ...image_utils import ( ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends if is_vision_available(): import PIL # soft dependency if is_pytesseract_available(): import pytesseract _UpperCAmelCase : str = logging.get_logger(__name__) def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase): return [ int(1_0_0_0 * (box[0] / width)), int(1_0_0_0 * (box[1] / height)), int(1_0_0_0 * (box[2] / width)), int(1_0_0_0 * (box[3] / height)), ] def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase = None): __lowerCAmelCase = tesseract_config if tesseract_config is not None else '''''' # apply OCR __lowerCAmelCase = to_pil_image(lowerCamelCase) __lowerCAmelCase , __lowerCAmelCase = pil_image.size __lowerCAmelCase = pytesseract.image_to_data(lowerCamelCase, lang=lowerCamelCase, output_type='''dict''', config=lowerCamelCase) __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = data['''text'''], data['''left'''], data['''top'''], data['''width'''], data['''height'''] # filter empty words and corresponding coordinates __lowerCAmelCase = [idx for idx, word in enumerate(lowerCamelCase) if not word.strip()] __lowerCAmelCase = [word for idx, word in enumerate(lowerCamelCase) if idx not in irrelevant_indices] __lowerCAmelCase = [coord for idx, coord in enumerate(lowerCamelCase) if idx not in irrelevant_indices] __lowerCAmelCase = [coord for idx, coord in enumerate(lowerCamelCase) if idx not in irrelevant_indices] __lowerCAmelCase = [coord for idx, coord in enumerate(lowerCamelCase) if idx not in irrelevant_indices] __lowerCAmelCase = [coord for idx, coord in enumerate(lowerCamelCase) if idx not in irrelevant_indices] # turn coordinates into (left, top, left+width, top+height) format __lowerCAmelCase = [] for x, y, w, h in zip(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase): __lowerCAmelCase = [x, y, x + w, y + h] actual_boxes.append(lowerCamelCase) # finally, normalize the bounding boxes __lowerCAmelCase = [] for box in actual_boxes: normalized_boxes.append(normalize_box(lowerCamelCase, lowerCamelCase, lowerCamelCase)) assert len(lowerCamelCase) == len(lowerCamelCase), "Not as many words as there are bounding boxes" return words, normalized_boxes class a__ ( __A ): """simple docstring""" __UpperCamelCase : str = ['pixel_values'] def __init__(self , __lowercase = True , __lowercase = None , __lowercase = PILImageResampling.BILINEAR , __lowercase = True , __lowercase = None , __lowercase = "" , **__lowercase , ): super().__init__(**__lowercase ) __lowerCAmelCase = size if size is not None else {'''height''': 2_24, '''width''': 2_24} __lowerCAmelCase = get_size_dict(__lowercase ) __lowerCAmelCase = do_resize __lowerCAmelCase = size __lowerCAmelCase = resample __lowerCAmelCase = apply_ocr __lowerCAmelCase = ocr_lang __lowerCAmelCase = tesseract_config def _snake_case (self , __lowercase , __lowercase , __lowercase = PILImageResampling.BILINEAR , __lowercase = None , **__lowercase , ): __lowerCAmelCase = get_size_dict(__lowercase ) if "height" not in size or "width" not in size: raise ValueError(F"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""" ) __lowerCAmelCase = (size['''height'''], size['''width''']) return resize(__lowercase , size=__lowercase , resample=__lowercase , data_format=__lowercase , **__lowercase ) def _snake_case (self , __lowercase , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = ChannelDimension.FIRST , **__lowercase , ): __lowerCAmelCase = do_resize if do_resize is not None else self.do_resize __lowerCAmelCase = size if size is not None else self.size __lowerCAmelCase = get_size_dict(__lowercase ) __lowerCAmelCase = resample if resample is not None else self.resample __lowerCAmelCase = apply_ocr if apply_ocr is not None else self.apply_ocr __lowerCAmelCase = ocr_lang if ocr_lang is not None else self.ocr_lang __lowerCAmelCase = tesseract_config if tesseract_config is not None else self.tesseract_config __lowerCAmelCase = make_list_of_images(__lowercase ) if not valid_images(__lowercase ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) if do_resize and size is None: raise ValueError('''Size must be specified if do_resize is True.''' ) # All transformations expect numpy arrays. __lowerCAmelCase = [to_numpy_array(__lowercase ) for image in images] if apply_ocr: requires_backends(self , '''pytesseract''' ) __lowerCAmelCase = [] __lowerCAmelCase = [] for image in images: __lowerCAmelCase , __lowerCAmelCase = apply_tesseract(__lowercase , __lowercase , __lowercase ) words_batch.append(__lowercase ) boxes_batch.append(__lowercase ) if do_resize: __lowerCAmelCase = [self.resize(image=__lowercase , size=__lowercase , resample=__lowercase ) for image in images] # flip color channels from RGB to BGR (as Detectron2 requires this) __lowerCAmelCase = [flip_channel_order(__lowercase ) for image in images] __lowerCAmelCase = [to_channel_dimension_format(__lowercase , __lowercase ) for image in images] __lowerCAmelCase = BatchFeature(data={'''pixel_values''': images} , tensor_type=__lowercase ) if apply_ocr: __lowerCAmelCase = words_batch __lowerCAmelCase = boxes_batch return data
9
1
'''simple docstring''' import warnings from typing import List, Optional, Union from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class a__ ( __A ): """simple docstring""" __UpperCamelCase : Dict = ['image_processor', 'tokenizer'] __UpperCamelCase : Optional[int] = 'LayoutLMv2ImageProcessor' __UpperCamelCase : List[Any] = ('LayoutXLMTokenizer', 'LayoutXLMTokenizerFast') def __init__(self , __lowercase=None , __lowercase=None , **__lowercase ): if "feature_extractor" in kwargs: warnings.warn( '''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`''' ''' instead.''' , __lowercase , ) __lowerCAmelCase = kwargs.pop('''feature_extractor''' ) __lowerCAmelCase = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('''You need to specify an `image_processor`.''' ) if tokenizer is None: raise ValueError('''You need to specify a `tokenizer`.''' ) super().__init__(__lowercase , __lowercase ) def __call__(self , __lowercase , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = True , __lowercase = False , __lowercase = None , __lowercase = None , __lowercase = 0 , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = False , __lowercase = False , __lowercase = False , __lowercase = False , __lowercase = True , __lowercase = None , **__lowercase , ): # verify input if self.image_processor.apply_ocr and (boxes is not None): raise ValueError( '''You cannot provide bounding boxes ''' '''if you initialized the image processor with apply_ocr set to True.''' ) if self.image_processor.apply_ocr and (word_labels is not None): raise ValueError( '''You cannot provide word labels if you initialized the image processor with apply_ocr set to True.''' ) if return_overflowing_tokens is True and return_offsets_mapping is False: raise ValueError('''You cannot return overflowing tokens without returning the offsets mapping.''' ) # first, apply the image processor __lowerCAmelCase = self.image_processor(images=__lowercase , return_tensors=__lowercase ) # second, apply the tokenizer if text is not None and self.image_processor.apply_ocr and text_pair is None: if isinstance(__lowercase , __lowercase ): __lowerCAmelCase = [text] # add batch dimension (as the image processor always adds a batch dimension) __lowerCAmelCase = features['''words'''] __lowerCAmelCase = self.tokenizer( text=text if text is not None else features['''words'''] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['''boxes'''] , word_labels=__lowercase , add_special_tokens=__lowercase , padding=__lowercase , truncation=__lowercase , max_length=__lowercase , stride=__lowercase , pad_to_multiple_of=__lowercase , return_token_type_ids=__lowercase , return_attention_mask=__lowercase , return_overflowing_tokens=__lowercase , return_special_tokens_mask=__lowercase , return_offsets_mapping=__lowercase , return_length=__lowercase , verbose=__lowercase , return_tensors=__lowercase , **__lowercase , ) # add pixel values __lowerCAmelCase = features.pop('''pixel_values''' ) if return_overflowing_tokens is True: __lowerCAmelCase = self.get_overflowing_images(__lowercase , encoded_inputs['''overflow_to_sample_mapping'''] ) __lowerCAmelCase = images return encoded_inputs def _snake_case (self , __lowercase , __lowercase ): # in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image __lowerCAmelCase = [] for sample_idx in overflow_to_sample_mapping: images_with_overflow.append(images[sample_idx] ) if len(__lowercase ) != len(__lowercase ): raise ValueError( '''Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got''' F""" {len(__lowercase )} and {len(__lowercase )}""" ) return images_with_overflow def _snake_case (self , *__lowercase , **__lowercase ): return self.tokenizer.batch_decode(*__lowercase , **__lowercase ) def _snake_case (self , *__lowercase , **__lowercase ): return self.tokenizer.decode(*__lowercase , **__lowercase ) @property def _snake_case (self ): return ["input_ids", "bbox", "attention_mask", "image"] @property def _snake_case (self ): warnings.warn( '''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , __lowercase , ) return self.image_processor_class @property def _snake_case (self ): warnings.warn( '''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , __lowercase , ) return self.image_processor
9
'''simple docstring''' from ..utils import DummyObject, requires_backends class a__ ( metaclass=__A ): """simple docstring""" __UpperCamelCase : int = ['torch', 'scipy'] def __init__(self , *__lowercase , **__lowercase ): requires_backends(self , ['''torch''', '''scipy'''] ) @classmethod def _snake_case (cls , *__lowercase , **__lowercase ): requires_backends(cls , ['''torch''', '''scipy'''] ) @classmethod def _snake_case (cls , *__lowercase , **__lowercase ): requires_backends(cls , ['''torch''', '''scipy'''] )
9
1
'''simple docstring''' import importlib import torch import yaml from omegaconf import OmegaConf from taming.models.vqgan import VQModel def __magic_name__( lowerCamelCase, lowerCamelCase=False): __lowerCAmelCase = OmegaConf.load(lowerCamelCase) if display: print(yaml.dump(OmegaConf.to_container(lowerCamelCase))) return config def __magic_name__( lowerCamelCase, lowerCamelCase=None, lowerCamelCase=None): if conf_path is None: __lowerCAmelCase = '''./model_checkpoints/vqgan_only.yaml''' __lowerCAmelCase = load_config(lowerCamelCase, display=lowerCamelCase) __lowerCAmelCase = VQModel(**config.model.params) if ckpt_path is None: __lowerCAmelCase = '''./model_checkpoints/vqgan_only.pt''' __lowerCAmelCase = torch.load(lowerCamelCase, map_location=lowerCamelCase) if ".ckpt" in ckpt_path: __lowerCAmelCase = sd['''state_dict'''] model.load_state_dict(lowerCamelCase, strict=lowerCamelCase) model.to(lowerCamelCase) del sd return model def __magic_name__( lowerCamelCase, lowerCamelCase): __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = model.encode(lowerCamelCase) print(F"""VQGAN --- {model.__class__.__name__}: latent shape: {z.shape[2:]}""") __lowerCAmelCase = model.decode(lowerCamelCase) return xrec def __magic_name__( lowerCamelCase, lowerCamelCase=False): __lowerCAmelCase , __lowerCAmelCase = string.rsplit('''.''', 1) if reload: __lowerCAmelCase = importlib.import_module(lowerCamelCase) importlib.reload(lowerCamelCase) return getattr(importlib.import_module(lowerCamelCase, package=lowerCamelCase), cls) def __magic_name__( lowerCamelCase): if "target" not in config: raise KeyError('''Expected key `target` to instantiate.''') return get_obj_from_str(config['''target'''])(**config.get('''params''', {})) def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase=True, lowerCamelCase=True): __lowerCAmelCase = instantiate_from_config(lowerCamelCase) if sd is not None: model.load_state_dict(lowerCamelCase) if gpu: model.cuda() if eval_mode: model.eval() return {"model": model} def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase): # load the specified checkpoint if ckpt: __lowerCAmelCase = torch.load(lowerCamelCase, map_location='''cpu''') __lowerCAmelCase = pl_sd['''global_step'''] print(F"""loaded model from global step {global_step}.""") else: __lowerCAmelCase = {'''state_dict''': None} __lowerCAmelCase = None __lowerCAmelCase = load_model_from_config(config.model, pl_sd['''state_dict'''], gpu=lowerCamelCase, eval_mode=lowerCamelCase)['''model'''] return model, global_step
9
'''simple docstring''' import unittest from typing import Dict, List, Optional, Union import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import BridgeTowerImageProcessor class a__ ( unittest.TestCase ): """simple docstring""" def __init__(self , __lowercase , __lowercase = True , __lowercase = None , __lowercase = 32 , __lowercase = True , __lowercase = 1 / 2_55 , __lowercase = True , __lowercase = True , __lowercase = [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3] , __lowercase = [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1] , __lowercase = True , __lowercase=7 , __lowercase=30 , __lowercase=4_00 , __lowercase=3 , ): __lowerCAmelCase = parent __lowerCAmelCase = do_resize __lowerCAmelCase = size if size is not None else {'''shortest_edge''': 2_88} __lowerCAmelCase = size_divisor __lowerCAmelCase = do_rescale __lowerCAmelCase = rescale_factor __lowerCAmelCase = do_normalize __lowerCAmelCase = do_center_crop __lowerCAmelCase = image_mean __lowerCAmelCase = image_std __lowerCAmelCase = do_pad __lowerCAmelCase = batch_size __lowerCAmelCase = num_channels __lowerCAmelCase = min_resolution __lowerCAmelCase = max_resolution def _snake_case (self ): return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "size": self.size, "size_divisor": self.size_divisor, } def _snake_case (self , __lowercase , __lowercase=False ): if not batched: __lowerCAmelCase = self.size['''shortest_edge'''] __lowerCAmelCase = image_inputs[0] if isinstance(__lowercase , Image.Image ): __lowerCAmelCase , __lowerCAmelCase = image.size else: __lowerCAmelCase , __lowerCAmelCase = image.shape[1], image.shape[2] __lowerCAmelCase = size / min(__lowercase , __lowercase ) if h < w: __lowerCAmelCase , __lowerCAmelCase = size, scale * w else: __lowerCAmelCase , __lowerCAmelCase = scale * h, size __lowerCAmelCase = int((13_33 / 8_00) * size ) if max(__lowercase , __lowercase ) > max_size: __lowerCAmelCase = max_size / max(__lowercase , __lowercase ) __lowerCAmelCase = newh * scale __lowerCAmelCase = neww * scale __lowerCAmelCase , __lowerCAmelCase = int(newh + 0.5 ), int(neww + 0.5 ) __lowerCAmelCase , __lowerCAmelCase = ( newh // self.size_divisor * self.size_divisor, neww // self.size_divisor * self.size_divisor, ) else: __lowerCAmelCase = [] for image in image_inputs: __lowerCAmelCase , __lowerCAmelCase = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) __lowerCAmelCase = max(__lowercase , key=lambda __lowercase : item[0] )[0] __lowerCAmelCase = max(__lowercase , key=lambda __lowercase : item[1] )[1] return expected_height, expected_width @require_torch @require_vision class a__ ( __A , unittest.TestCase ): """simple docstring""" __UpperCamelCase : Any = BridgeTowerImageProcessor if is_vision_available() else None def _snake_case (self ): __lowerCAmelCase = BridgeTowerImageProcessingTester(self ) @property def _snake_case (self ): return self.image_processor_tester.prepare_image_processor_dict() def _snake_case (self ): __lowerCAmelCase = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(__lowercase , '''image_mean''' ) ) self.assertTrue(hasattr(__lowercase , '''image_std''' ) ) self.assertTrue(hasattr(__lowercase , '''do_normalize''' ) ) self.assertTrue(hasattr(__lowercase , '''do_resize''' ) ) self.assertTrue(hasattr(__lowercase , '''size''' ) ) self.assertTrue(hasattr(__lowercase , '''size_divisor''' ) ) def _snake_case (self ): pass def _snake_case (self ): # Initialize image processor __lowerCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random PIL images __lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowercase ) for image in image_inputs: self.assertIsInstance(__lowercase , Image.Image ) # Test not batched input __lowerCAmelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values __lowerCAmelCase , __lowerCAmelCase = self.image_processor_tester.get_expected_values(__lowercase ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched __lowerCAmelCase = image_processing(__lowercase , return_tensors='''pt''' ).pixel_values __lowerCAmelCase , __lowerCAmelCase = self.image_processor_tester.get_expected_values(__lowercase , batched=__lowercase ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def _snake_case (self ): # Initialize image processor __lowerCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors __lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowercase , numpify=__lowercase ) for image in image_inputs: self.assertIsInstance(__lowercase , np.ndarray ) # Test not batched input __lowerCAmelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values __lowerCAmelCase , __lowerCAmelCase = self.image_processor_tester.get_expected_values(__lowercase ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched __lowerCAmelCase = image_processing(__lowercase , return_tensors='''pt''' ).pixel_values __lowerCAmelCase , __lowerCAmelCase = self.image_processor_tester.get_expected_values(__lowercase , batched=__lowercase ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def _snake_case (self ): # Initialize image processor __lowerCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors __lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowercase , torchify=__lowercase ) for image in image_inputs: self.assertIsInstance(__lowercase , torch.Tensor ) # Test not batched input __lowerCAmelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values __lowerCAmelCase , __lowerCAmelCase = self.image_processor_tester.get_expected_values(__lowercase ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched __lowerCAmelCase = image_processing(__lowercase , return_tensors='''pt''' ).pixel_values __lowerCAmelCase , __lowerCAmelCase = self.image_processor_tester.get_expected_values(__lowercase , batched=__lowercase ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , )
9
1