code
stringlengths
87
55.2k
code_codestyle
int64
0
349
style_context
stringlengths
135
49.1k
style_context_codestyle
int64
0
349
label
int64
0
1
"""simple docstring""" def a__ ( SCREAMING_SNAKE_CASE : int = 5_0 ): '''simple docstring''' lowerCAmelCase : List[Any] = [1] * (length + 1) for row_length in range(3 , length + 1 ): for block_length in range(3 , row_length + 1 ): for block_start in range(row_length - block_length ): ways_number[row_length] += ways_number[ row_length - block_start - block_length - 1 ] ways_number[row_length] += 1 return ways_number[length] if __name__ == "__main__": print(F"{solution() = }")
108
"""simple docstring""" import unittest from .lib import ( Matrix, Vector, axpy, square_zero_matrix, unit_basis_vector, zero_vector, ) class lowercase_ ( unittest.TestCase ): '''simple docstring''' def lowerCAmelCase_ ( self : int ): _A = Vector([1, 2, 3] ) self.assertEqual(x.component(0 ) , 1 ) self.assertEqual(x.component(2 ) , 3 ) _A = Vector() def lowerCAmelCase_ ( self : Optional[Any] ): _A = Vector([0, 0, 0, 0, 0, 1] ) self.assertEqual(str(_UpperCAmelCase ) , '(0,0,0,0,0,1)' ) def lowerCAmelCase_ ( self : Optional[int] ): _A = Vector([1, 2, 3, 4] ) self.assertEqual(len(_UpperCAmelCase ) , 4 ) def lowerCAmelCase_ ( self : int ): _A = Vector([1, 2] ) _A = Vector([1, 2, 3, 4, 5] ) _A = Vector([0, 0, 0, 0, 0, 0, 0, 0, 0, 0] ) _A = Vector([1, -1, 1, -1, 2, -3, 4, -5] ) self.assertAlmostEqual(x.euclidean_length() , 2.236 , 3 ) self.assertAlmostEqual(y.euclidean_length() , 7.416 , 3 ) self.assertEqual(z.euclidean_length() , 0 ) self.assertAlmostEqual(w.euclidean_length() , 7.616 , 3 ) def lowerCAmelCase_ ( self : str ): _A = Vector([1, 2, 3] ) _A = Vector([1, 1, 1] ) self.assertEqual((x + y).component(0 ) , 2 ) self.assertEqual((x + y).component(1 ) , 3 ) self.assertEqual((x + y).component(2 ) , 4 ) def lowerCAmelCase_ ( self : Optional[Any] ): _A = Vector([1, 2, 3] ) _A = Vector([1, 1, 1] ) self.assertEqual((x - y).component(0 ) , 0 ) self.assertEqual((x - y).component(1 ) , 1 ) self.assertEqual((x - y).component(2 ) , 2 ) def lowerCAmelCase_ ( self : int ): _A = Vector([1, 2, 3] ) _A = Vector([2, -1, 4] ) # for test of dot product _A = Vector([1, -2, -1] ) self.assertEqual(str(x * 3.0 ) , '(3.0,6.0,9.0)' ) self.assertEqual((a * b) , 0 ) def lowerCAmelCase_ ( self : Dict ): self.assertEqual(str(zero_vector(10 ) ).count('0' ) , 10 ) def lowerCAmelCase_ ( self : Tuple ): self.assertEqual(str(unit_basis_vector(3 , 1 ) ) , '(0,1,0)' ) def lowerCAmelCase_ ( self : Union[str, Any] ): _A = Vector([1, 2, 3] ) _A = Vector([1, 0, 1] ) self.assertEqual(str(axpy(2 , _UpperCAmelCase , _UpperCAmelCase ) ) , '(3,4,7)' ) def lowerCAmelCase_ ( self : Union[str, Any] ): _A = Vector([1, 0, 0, 0, 0, 0] ) _A = x.copy() self.assertEqual(str(_UpperCAmelCase ) , str(_UpperCAmelCase ) ) def lowerCAmelCase_ ( self : Optional[Any] ): _A = Vector([1, 0, 0] ) x.change_component(0 , 0 ) x.change_component(1 , 1 ) self.assertEqual(str(_UpperCAmelCase ) , '(0,1,0)' ) def lowerCAmelCase_ ( self : Any ): _A = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) self.assertEqual('|1,2,3|\n|2,4,5|\n|6,7,8|\n' , str(_UpperCAmelCase ) ) def lowerCAmelCase_ ( self : Any ): _A = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) _A = [[-3, -14, -10], [-5, -10, -5], [-2, -1, 0]] for x in range(a.height() ): for y in range(a.width() ): self.assertEqual(minors[x][y] , a.minor(_UpperCAmelCase , _UpperCAmelCase ) ) def lowerCAmelCase_ ( self : Any ): _A = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) _A = [[-3, 14, -10], [5, -10, 5], [-2, 1, 0]] for x in range(a.height() ): for y in range(a.width() ): self.assertEqual(cofactors[x][y] , a.cofactor(_UpperCAmelCase , _UpperCAmelCase ) ) def lowerCAmelCase_ ( self : str ): _A = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) self.assertEqual(-5 , a.determinant() ) def lowerCAmelCase_ ( self : Tuple ): _A = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]] , 3 , 3 ) _A = Vector([1, 2, 3] ) self.assertEqual('(14,32,50)' , str(a * x ) ) self.assertEqual('|2,4,6|\n|8,10,12|\n|14,16,18|\n' , str(a * 2 ) ) def lowerCAmelCase_ ( self : Any ): _A = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) a.change_component(0 , 2 , 5 ) self.assertEqual('|1,2,5|\n|2,4,5|\n|6,7,8|\n' , str(_UpperCAmelCase ) ) def lowerCAmelCase_ ( self : List[Any] ): _A = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) self.assertEqual(7 , a.component(2 , 1 ) , 0.01 ) def lowerCAmelCase_ ( self : Tuple ): _A = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) _A = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 ) self.assertEqual('|2,4,10|\n|4,8,10|\n|12,14,18|\n' , str(a + b ) ) def lowerCAmelCase_ ( self : Optional[Any] ): _A = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) _A = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 ) self.assertEqual('|0,0,-4|\n|0,0,0|\n|0,0,-2|\n' , str(a - b ) ) def lowerCAmelCase_ ( self : int ): self.assertEqual( '|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n' , str(square_zero_matrix(5 ) ) , ) if __name__ == "__main__": unittest.main()
315
0
from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase__ = logging.get_logger(__name__) class snake_case__(__lowerCAmelCase ): """simple docstring""" lowercase_ = '''timm_backbone''' def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE : Union[str, Any]=None , SCREAMING_SNAKE_CASE : int=3 , SCREAMING_SNAKE_CASE : str=True , SCREAMING_SNAKE_CASE : Optional[Any]=True , SCREAMING_SNAKE_CASE : List[Any]=None , **SCREAMING_SNAKE_CASE : Union[str, Any] , ): super().__init__(**_UpperCAmelCase ) lowercase__ : str = backbone lowercase__ : Optional[int] = num_channels lowercase__ : Tuple = features_only lowercase__ : str = use_pretrained_backbone lowercase__ : Tuple = True lowercase__ : Union[str, Any] = out_indices if out_indices is not None else (-1,)
130
"""simple docstring""" import warnings from ...configuration_utils import PretrainedConfig from ...utils import logging a = logging.get_logger(__name__) a = { '''xlnet-base-cased''': '''https://huggingface.co/xlnet-base-cased/resolve/main/config.json''', '''xlnet-large-cased''': '''https://huggingface.co/xlnet-large-cased/resolve/main/config.json''', } class lowercase_ ( __lowerCAmelCase ): '''simple docstring''' UpperCAmelCase : int = '''xlnet''' UpperCAmelCase : List[Any] = ['''mems'''] UpperCAmelCase : Any = { '''n_token''': '''vocab_size''', # Backward compatibility '''hidden_size''': '''d_model''', '''num_attention_heads''': '''n_head''', '''num_hidden_layers''': '''n_layer''', } def __init__( self : Union[str, Any] , _UpperCAmelCase : Dict=32_000 , _UpperCAmelCase : List[str]=1_024 , _UpperCAmelCase : Any=24 , _UpperCAmelCase : Union[str, Any]=16 , _UpperCAmelCase : Union[str, Any]=4_096 , _UpperCAmelCase : Tuple="gelu" , _UpperCAmelCase : Any=True , _UpperCAmelCase : str="bi" , _UpperCAmelCase : int=0.02 , _UpperCAmelCase : Optional[Any]=1E-1_2 , _UpperCAmelCase : Optional[int]=0.1 , _UpperCAmelCase : Any=512 , _UpperCAmelCase : Dict=None , _UpperCAmelCase : int=True , _UpperCAmelCase : int=False , _UpperCAmelCase : Optional[Any]=False , _UpperCAmelCase : int=-1 , _UpperCAmelCase : Optional[int]=False , _UpperCAmelCase : Union[str, Any]="last" , _UpperCAmelCase : int=True , _UpperCAmelCase : str="tanh" , _UpperCAmelCase : str=0.1 , _UpperCAmelCase : Dict=5 , _UpperCAmelCase : Optional[Any]=5 , _UpperCAmelCase : Union[str, Any]=5 , _UpperCAmelCase : List[str]=1 , _UpperCAmelCase : Dict=2 , **_UpperCAmelCase : int , ): _A = vocab_size _A = d_model _A = n_layer _A = n_head if d_model % n_head != 0: raise ValueError(F'''\'d_model % n_head\' ({d_model % n_head}) should be equal to 0''' ) if "d_head" in kwargs: if kwargs["d_head"] != d_model // n_head: raise ValueError( F'''`d_head` ({kwargs["d_head"]}) should be equal to `d_model // n_head` ({d_model // n_head})''' ) _A = d_model // n_head _A = ff_activation _A = d_inner _A = untie_r _A = attn_type _A = initializer_range _A = layer_norm_eps _A = dropout _A = mem_len _A = reuse_len _A = bi_data _A = clamp_len _A = same_length _A = summary_type _A = summary_use_proj _A = summary_activation _A = summary_last_dropout _A = start_n_top _A = end_n_top _A = bos_token_id _A = pad_token_id _A = eos_token_id if "use_cache" in kwargs: warnings.warn( 'The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems_eval`' ' instead.' , _UpperCAmelCase , ) _A = kwargs['use_cache'] _A = use_mems_eval _A = use_mems_train super().__init__(pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase ) @property def lowerCAmelCase_ ( self : Tuple ): logger.info(F'''The model {self.model_type} is one of the few models that has no sequence length limit.''' ) return -1 @max_position_embeddings.setter def lowerCAmelCase_ ( self : Optional[Any] , _UpperCAmelCase : Optional[Any] ): # Message copied from Transformer-XL documentation raise NotImplementedError( F'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
315
0
import os from bleurt import score # From: git+https://github.com/google-research/bleurt.git import datasets __lowerCamelCase : Dict = datasets.logging.get_logger(__name__) __lowerCamelCase : List[Any] = """\ @inproceedings{bleurt, title={BLEURT: Learning Robust Metrics for Text Generation}, author={Thibault Sellam and Dipanjan Das and Ankur P. Parikh}, booktitle={ACL}, year={2020}, url={https://arxiv.org/abs/2004.04696} } """ __lowerCamelCase : Dict = """\ BLEURT a learnt evaluation metric for Natural Language Generation. It is built using multiple phases of transfer learning starting from a pretrained BERT model (Devlin et al. 2018) and then employing another pre-training phrase using synthetic data. Finally it is trained on WMT human annotations. You may run BLEURT out-of-the-box or fine-tune it for your specific application (the latter is expected to perform better). See the project\'s README at https://github.com/google-research/bleurt#readme for more information. """ __lowerCamelCase : Optional[Any] = """ BLEURT score. Args: `predictions` (list of str): prediction/candidate sentences `references` (list of str): reference sentences `checkpoint` BLEURT checkpoint. Will default to BLEURT-tiny if None. Returns: \'scores\': List of scores. Examples: >>> predictions = [\"hello there\", \"general kenobi\"] >>> references = [\"hello there\", \"general kenobi\"] >>> bleurt = datasets.load_metric(\"bleurt\") >>> results = bleurt.compute(predictions=predictions, references=references) >>> print([round(v, 2) for v in results[\"scores\"]]) [1.03, 1.04] """ __lowerCamelCase : int = { """bleurt-tiny-128""": """https://storage.googleapis.com/bleurt-oss/bleurt-tiny-128.zip""", """bleurt-tiny-512""": """https://storage.googleapis.com/bleurt-oss/bleurt-tiny-512.zip""", """bleurt-base-128""": """https://storage.googleapis.com/bleurt-oss/bleurt-base-128.zip""", """bleurt-base-512""": """https://storage.googleapis.com/bleurt-oss/bleurt-base-512.zip""", """bleurt-large-128""": """https://storage.googleapis.com/bleurt-oss/bleurt-large-128.zip""", """bleurt-large-512""": """https://storage.googleapis.com/bleurt-oss/bleurt-large-512.zip""", """BLEURT-20-D3""": """https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D3.zip""", """BLEURT-20-D6""": """https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D6.zip""", """BLEURT-20-D12""": """https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D12.zip""", """BLEURT-20""": """https://storage.googleapis.com/bleurt-oss-21/BLEURT-20.zip""", } @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class A__ ( datasets.Metric ): def __UpperCamelCase( self ): '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , homepage="https://github.com/google-research/bleurt" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Value("string" , id="sequence" ), "references": datasets.Value("string" , id="sequence" ), } ) , codebase_urls=["https://github.com/google-research/bleurt"] , reference_urls=["https://github.com/google-research/bleurt", "https://arxiv.org/abs/2004.04696"] , ) def __UpperCamelCase( self , A_ ): '''simple docstring''' if self.config_name == "default": logger.warning( "Using default BLEURT-Base checkpoint for sequence maximum length 128. " "You can use a bigger model for better results with e.g.: datasets.load_metric(\'bleurt\', \'bleurt-large-512\')." ) UpperCamelCase : Any = "bleurt-base-128" if self.config_name.lower() in CHECKPOINT_URLS: UpperCamelCase : Optional[int] = self.config_name.lower() elif self.config_name.upper() in CHECKPOINT_URLS: UpperCamelCase : Tuple = self.config_name.upper() else: raise KeyError( F"""{self.config_name} model not found. You should supply the name of a model checkpoint for bleurt in {CHECKPOINT_URLS.keys()}""" ) # download the model checkpoint specified by self.config_name and set up the scorer UpperCamelCase : Union[str, Any] = dl_manager.download_and_extract(CHECKPOINT_URLS[checkpoint_name] ) UpperCamelCase : List[Any] = score.BleurtScorer(os.path.join(_UpperCAmelCase , _UpperCAmelCase ) ) def __UpperCamelCase( self , A_ , A_ ): '''simple docstring''' UpperCamelCase : Optional[Any] = self.scorer.score(references=_UpperCAmelCase , candidates=_UpperCAmelCase ) return {"scores": scores}
52
"""simple docstring""" import argparse import json import os import pickle import shutil import numpy as np import torch from distiller import Distiller from lm_seqs_dataset import LmSeqsDataset from transformers import ( BertConfig, BertForMaskedLM, BertTokenizer, DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer, GPTaConfig, GPTaLMHeadModel, GPTaTokenizer, RobertaConfig, RobertaForMaskedLM, RobertaTokenizer, ) from utils import git_log, init_gpu_params, logger, set_seed a = { '''distilbert''': (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer), '''roberta''': (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer), '''bert''': (BertConfig, BertForMaskedLM, BertTokenizer), '''gpt2''': (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer), } def _snake_case ( _snake_case : Tuple ) -> Dict: '''simple docstring''' assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0) assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0) if args.mlm: assert os.path.isfile(args.token_counts ) assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"]) else: assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"]) assert args.teacher_type == args.student_type or ( args.student_type == "distilbert" and args.teacher_type == "bert" ) assert os.path.isfile(args.student_config ) if args.student_pretrained_weights is not None: assert os.path.isfile(args.student_pretrained_weights ) if args.freeze_token_type_embds: assert args.student_type in ["roberta"] assert args.alpha_ce >= 0.0 assert args.alpha_mlm >= 0.0 assert args.alpha_clm >= 0.0 assert args.alpha_mse >= 0.0 assert args.alpha_cos >= 0.0 assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0 def _snake_case ( _snake_case : str , _snake_case : List[Any] ) -> Tuple: '''simple docstring''' if args.student_type == "roberta": _A = False elif args.student_type == "gpt2": _A = False def _snake_case ( _snake_case : str , _snake_case : int ) -> Tuple: '''simple docstring''' if args.student_type == "roberta": _A = False def _snake_case ( ) -> Tuple: '''simple docstring''' _A = argparse.ArgumentParser(description='Training' ) parser.add_argument('--force' , action='store_true' , help='Overwrite dump_path if it already exists.' ) parser.add_argument( '--dump_path' , type=_snake_case , required=_snake_case , help='The output directory (log, checkpoints, parameters, etc.)' ) parser.add_argument( '--data_file' , type=_snake_case , required=_snake_case , help='The binarized file (tokenized + tokens_to_ids) and grouped by sequence.' , ) parser.add_argument( '--student_type' , type=_snake_case , choices=['distilbert', 'roberta', 'gpt2'] , required=_snake_case , help='The student type (DistilBERT, RoBERTa).' , ) parser.add_argument('--student_config' , type=_snake_case , required=_snake_case , help='Path to the student configuration.' ) parser.add_argument( '--student_pretrained_weights' , default=_snake_case , type=_snake_case , help='Load student initialization checkpoint.' ) parser.add_argument( '--teacher_type' , choices=['bert', 'roberta', 'gpt2'] , required=_snake_case , help='Teacher type (BERT, RoBERTa).' ) parser.add_argument('--teacher_name' , type=_snake_case , required=_snake_case , help='The teacher model.' ) parser.add_argument('--temperature' , default=2.0 , type=_snake_case , help='Temperature for the softmax temperature.' ) parser.add_argument( '--alpha_ce' , default=0.5 , type=_snake_case , help='Linear weight for the distillation loss. Must be >=0.' ) parser.add_argument( '--alpha_mlm' , default=0.0 , type=_snake_case , help='Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag.' , ) parser.add_argument('--alpha_clm' , default=0.5 , type=_snake_case , help='Linear weight for the CLM loss. Must be >=0.' ) parser.add_argument('--alpha_mse' , default=0.0 , type=_snake_case , help='Linear weight of the MSE loss. Must be >=0.' ) parser.add_argument( '--alpha_cos' , default=0.0 , type=_snake_case , help='Linear weight of the cosine embedding loss. Must be >=0.' ) parser.add_argument( '--mlm' , action='store_true' , help='The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM.' ) parser.add_argument( '--mlm_mask_prop' , default=0.15 , type=_snake_case , help='Proportion of tokens for which we need to make a prediction.' , ) parser.add_argument('--word_mask' , default=0.8 , type=_snake_case , help='Proportion of tokens to mask out.' ) parser.add_argument('--word_keep' , default=0.1 , type=_snake_case , help='Proportion of tokens to keep.' ) parser.add_argument('--word_rand' , default=0.1 , type=_snake_case , help='Proportion of tokens to randomly replace.' ) parser.add_argument( '--mlm_smoothing' , default=0.7 , type=_snake_case , help='Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec).' , ) parser.add_argument('--token_counts' , type=_snake_case , help='The token counts in the data_file for MLM.' ) parser.add_argument( '--restrict_ce_to_mask' , action='store_true' , help='If true, compute the distillation loss only the [MLM] prediction distribution.' , ) parser.add_argument( '--freeze_pos_embs' , action='store_true' , help='Freeze positional embeddings during distillation. For student_type in [\'roberta\', \'gpt2\'] only.' , ) parser.add_argument( '--freeze_token_type_embds' , action='store_true' , help='Freeze token type embeddings during distillation if existent. For student_type in [\'roberta\'] only.' , ) parser.add_argument('--n_epoch' , type=_snake_case , default=3 , help='Number of pass on the whole dataset.' ) parser.add_argument('--batch_size' , type=_snake_case , default=5 , help='Batch size (for each process).' ) parser.add_argument( '--group_by_size' , action='store_false' , help='If true, group sequences that have similar length into the same batch. Default is true.' , ) parser.add_argument( '--gradient_accumulation_steps' , type=_snake_case , default=50 , help='Gradient accumulation for larger training batches.' , ) parser.add_argument('--warmup_prop' , default=0.05 , type=_snake_case , help='Linear warmup proportion.' ) parser.add_argument('--weight_decay' , default=0.0 , type=_snake_case , help='Weight decay if we apply some.' ) parser.add_argument('--learning_rate' , default=5E-4 , type=_snake_case , help='The initial learning rate for Adam.' ) parser.add_argument('--adam_epsilon' , default=1E-6 , type=_snake_case , help='Epsilon for Adam optimizer.' ) parser.add_argument('--max_grad_norm' , default=5.0 , type=_snake_case , help='Max gradient norm.' ) parser.add_argument('--initializer_range' , default=0.02 , type=_snake_case , help='Random initialization range.' ) parser.add_argument( '--fp16' , action='store_true' , help='Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit' , ) parser.add_argument( '--fp16_opt_level' , type=_snake_case , default='O1' , help=( 'For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].' 'See details at https://nvidia.github.io/apex/amp.html' ) , ) parser.add_argument('--n_gpu' , type=_snake_case , default=1 , help='Number of GPUs in the node.' ) parser.add_argument('--local_rank' , type=_snake_case , default=-1 , help='Distributed training - Local rank' ) parser.add_argument('--seed' , type=_snake_case , default=56 , help='Random seed' ) parser.add_argument('--log_interval' , type=_snake_case , default=5_00 , help='Tensorboard logging interval.' ) parser.add_argument('--checkpoint_interval' , type=_snake_case , default=40_00 , help='Checkpoint interval.' ) _A = parser.parse_args() sanity_checks(_snake_case ) # ARGS # init_gpu_params(_snake_case ) set_seed(_snake_case ) if args.is_master: if os.path.exists(args.dump_path ): if not args.force: raise ValueError( F'''Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite''' ' itUse `--force` if you want to overwrite it' ) else: shutil.rmtree(args.dump_path ) if not os.path.exists(args.dump_path ): os.makedirs(args.dump_path ) logger.info(F'''Experiment will be dumped and logged in {args.dump_path}''' ) # SAVE PARAMS # logger.info(F'''Param: {args}''' ) with open(os.path.join(args.dump_path , 'parameters.json' ) , 'w' ) as f: json.dump(vars(_snake_case ) , _snake_case , indent=4 ) git_log(args.dump_path ) _A , _A , _A = MODEL_CLASSES[args.student_type] _A , _A , _A = MODEL_CLASSES[args.teacher_type] # TOKENIZER # _A = teacher_tokenizer_class.from_pretrained(args.teacher_name ) _A = {} for tok_name, tok_symbol in tokenizer.special_tokens_map.items(): _A = tokenizer.all_special_tokens.index(_snake_case ) _A = tokenizer.all_special_ids[idx] logger.info(F'''Special tokens {special_tok_ids}''' ) _A = special_tok_ids _A = tokenizer.max_model_input_sizes[args.teacher_name] # DATA LOADER # logger.info(F'''Loading data from {args.data_file}''' ) with open(args.data_file , 'rb' ) as fp: _A = pickle.load(_snake_case ) if args.mlm: logger.info(F'''Loading token counts from {args.token_counts} (already pre-computed)''' ) with open(args.token_counts , 'rb' ) as fp: _A = pickle.load(_snake_case ) _A = np.maximum(_snake_case , 1 ) ** -args.mlm_smoothing for idx in special_tok_ids.values(): _A = 0.0 # do not predict special tokens _A = torch.from_numpy(_snake_case ) else: _A = None _A = LmSeqsDataset(params=_snake_case , data=_snake_case ) logger.info('Data loader created.' ) # STUDENT # logger.info(F'''Loading student config from {args.student_config}''' ) _A = student_config_class.from_pretrained(args.student_config ) _A = True if args.student_pretrained_weights is not None: logger.info(F'''Loading pretrained weights from {args.student_pretrained_weights}''' ) _A = student_model_class.from_pretrained(args.student_pretrained_weights , config=_snake_case ) else: _A = student_model_class(_snake_case ) if args.n_gpu > 0: student.to(F'''cuda:{args.local_rank}''' ) logger.info('Student loaded.' ) # TEACHER # _A = teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=_snake_case ) if args.n_gpu > 0: teacher.to(F'''cuda:{args.local_rank}''' ) logger.info(F'''Teacher loaded from {args.teacher_name}.''' ) # FREEZING # if args.freeze_pos_embs: freeze_pos_embeddings(_snake_case , _snake_case ) if args.freeze_token_type_embds: freeze_token_type_embeddings(_snake_case , _snake_case ) # SANITY CHECKS # assert student.config.vocab_size == teacher.config.vocab_size assert student.config.hidden_size == teacher.config.hidden_size assert student.config.max_position_embeddings == teacher.config.max_position_embeddings if args.mlm: assert token_probs.size(0 ) == stu_architecture_config.vocab_size # DISTILLER # torch.cuda.empty_cache() _A = Distiller( params=_snake_case , dataset=_snake_case , token_probs=_snake_case , student=_snake_case , teacher=_snake_case ) distiller.train() logger.info('Let\'s go get some drinks.' ) if __name__ == "__main__": main()
315
0
"""simple docstring""" import absl # noqa: F401 # Here to have a nice missing dependency error message early on import nltk # noqa: F401 # Here to have a nice missing dependency error message early on import numpy # noqa: F401 # Here to have a nice missing dependency error message early on import six # noqa: F401 # Here to have a nice missing dependency error message early on from rouge_score import rouge_scorer, scoring import datasets UpperCamelCase_ = '\\n@inproceedings{lin-2004-rouge,\n title = "{ROUGE}: A Package for Automatic Evaluation of Summaries",\n author = "Lin, Chin-Yew",\n booktitle = "Text Summarization Branches Out",\n month = jul,\n year = "2004",\n address = "Barcelona, Spain",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W04-1013",\n pages = "74--81",\n}\n' UpperCamelCase_ = '\\nROUGE, or Recall-Oriented Understudy for Gisting Evaluation, is a set of metrics and a software package used for\nevaluating automatic summarization and machine translation software in natural language processing.\nThe metrics compare an automatically produced summary or translation against a reference or a set of references (human-produced) summary or translation.\n\nNote that ROUGE is case insensitive, meaning that upper case letters are treated the same way as lower case letters.\n\nThis metrics is a wrapper around Google Research reimplementation of ROUGE:\nhttps://github.com/google-research/google-research/tree/master/rouge\n' UpperCamelCase_ = '\nCalculates average rouge scores for a list of hypotheses and references\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n rouge_types: A list of rouge types to calculate.\n Valid names:\n `"rouge{n}"` (e.g. `"rouge1"`, `"rouge2"`) where: {n} is the n-gram based scoring,\n `"rougeL"`: Longest common subsequence based scoring.\n `"rougeLSum"`: rougeLsum splits text using `"\n"`.\n See details in https://github.com/huggingface/datasets/issues/617\n use_stemmer: Bool indicating whether Porter stemmer should be used to strip word suffixes.\n use_aggregator: Return aggregates if this is set to True\nReturns:\n rouge1: rouge_1 (precision, recall, f1),\n rouge2: rouge_2 (precision, recall, f1),\n rougeL: rouge_l (precision, recall, f1),\n rougeLsum: rouge_lsum (precision, recall, f1)\nExamples:\n\n >>> rouge = datasets.load_metric(\'rouge\')\n >>> predictions = ["hello there", "general kenobi"]\n >>> references = ["hello there", "general kenobi"]\n >>> results = rouge.compute(predictions=predictions, references=references)\n >>> print(list(results.keys()))\n [\'rouge1\', \'rouge2\', \'rougeL\', \'rougeLsum\']\n >>> print(results["rouge1"])\n AggregateScore(low=Score(precision=1.0, recall=1.0, fmeasure=1.0), mid=Score(precision=1.0, recall=1.0, fmeasure=1.0), high=Score(precision=1.0, recall=1.0, fmeasure=1.0))\n >>> print(results["rouge1"].mid.fmeasure)\n 1.0\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class snake_case ( datasets.Metric ): def UpperCAmelCase__ ( self) ->List[str]: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Value("string" , id="sequence"), "references": datasets.Value("string" , id="sequence"), }) , codebase_urls=["https://github.com/google-research/google-research/tree/master/rouge"] , reference_urls=[ "https://en.wikipedia.org/wiki/ROUGE_(metric)", "https://github.com/google-research/google-research/tree/master/rouge", ] , ) def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase=True , __UpperCAmelCase=False) ->Union[str, Any]: if rouge_types is None: a_ = ["rouge1", "rouge2", "rougeL", "rougeLsum"] a_ = rouge_scorer.RougeScorer(rouge_types=_UpperCAmelCase , use_stemmer=_UpperCAmelCase) if use_aggregator: a_ = scoring.BootstrapAggregator() else: a_ = [] for ref, pred in zip(_UpperCAmelCase , _UpperCAmelCase): a_ = scorer.score(_UpperCAmelCase , _UpperCAmelCase) if use_aggregator: aggregator.add_scores(_UpperCAmelCase) else: scores.append(_UpperCAmelCase) if use_aggregator: a_ = aggregator.aggregate() else: a_ = {} for key in scores[0]: a_ = [score[key] for score in scores] return result
243
"""simple docstring""" from manim import * class lowercase_ ( __lowerCAmelCase ): '''simple docstring''' def lowerCAmelCase_ ( self : Dict ): _A = Rectangle(height=0.5 , width=0.5 ) _A = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 ) _A = Rectangle(height=0.25 , width=0.25 ) _A = [mem.copy() for i in range(6 )] _A = [mem.copy() for i in range(6 )] _A = VGroup(*_UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 ) _A = VGroup(*_UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 ) _A = VGroup(_UpperCAmelCase , _UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 ) _A = Text('CPU' , font_size=24 ) _A = Group(_UpperCAmelCase , _UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0.5 , aligned_edge=_UpperCAmelCase ) cpu.move_to([-2.5, -0.5, 0] ) self.add(_UpperCAmelCase ) _A = [mem.copy() for i in range(4 )] _A = VGroup(*_UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 ) _A = Text('GPU' , font_size=24 ) _A = Group(_UpperCAmelCase , _UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0.5 , aligned_edge=_UpperCAmelCase ) gpu.move_to([-1, -1, 0] ) self.add(_UpperCAmelCase ) _A = [mem.copy() for i in range(6 )] _A = VGroup(*_UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 ) _A = Text('Model' , font_size=24 ) _A = Group(_UpperCAmelCase , _UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0.5 , aligned_edge=_UpperCAmelCase ) model.move_to([3, -1.0, 0] ) self.add(_UpperCAmelCase ) _A = [] _A = [] for i, rect in enumerate(_UpperCAmelCase ): _A = fill.copy().set_fill(_UpperCAmelCase , opacity=0.8 ) target.move_to(_UpperCAmelCase ) model_arr.append(_UpperCAmelCase ) _A = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(_UpperCAmelCase , opacity=0.8 ) cpu_target.move_to(cpu_left_col_base[i] ) model_cpu_arr.append(_UpperCAmelCase ) self.add(*_UpperCAmelCase , *_UpperCAmelCase ) _A = [meta_mem.copy() for i in range(6 )] _A = [meta_mem.copy() for i in range(6 )] _A = VGroup(*_UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 ) _A = VGroup(*_UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 ) _A = VGroup(_UpperCAmelCase , _UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 ) _A = Text('Disk' , font_size=24 ) _A = Group(_UpperCAmelCase , _UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0.5 , aligned_edge=_UpperCAmelCase ) disk.move_to([-4, -1.25, 0] ) self.add(_UpperCAmelCase , _UpperCAmelCase ) _A = Square(side_length=2.2 ) key.move_to([-5, 2, 0] ) _A = MarkupText( F'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' , font_size=18 , ) key_text.move_to([-5, 2.4, 0] ) self.add(_UpperCAmelCase , _UpperCAmelCase ) _A = MarkupText( F'''<span fgcolor=\'{BLUE}\'>●</span> Checkpoint''' , font_size=18 , ) blue_text.next_to(_UpperCAmelCase , DOWN * 2.4 , aligned_edge=key_text.get_left() ) self.add(_UpperCAmelCase ) _A = MarkupText( F'''Now watch as an input is passed through the model\nand how the memory is utilized and handled.''' , font_size=24 , ) step_a.move_to([2, 2, 0] ) self.play(Write(_UpperCAmelCase ) ) _A = Square(0.3 ) input.set_fill(_UpperCAmelCase , opacity=1.0 ) input.set_stroke(width=0.0 ) input.next_to(model_base[0] , _UpperCAmelCase , buff=0.5 ) self.play(Write(_UpperCAmelCase ) ) input.generate_target() input.target.next_to(model_arr[0] , direction=_UpperCAmelCase , buff=0.02 ) self.play(MoveToTarget(_UpperCAmelCase ) ) self.play(FadeOut(_UpperCAmelCase ) ) _A = Arrow(start=_UpperCAmelCase , end=_UpperCAmelCase , color=_UpperCAmelCase , buff=0.5 ) a.next_to(model_arr[0].get_left() , _UpperCAmelCase , buff=0.2 ) model_cpu_arr[0].generate_target() model_cpu_arr[0].target.move_to(gpu_rect[0] ) _A = MarkupText( F'''As the input reaches a layer, the hook triggers\nand weights are moved from the CPU\nto the GPU and back.''' , font_size=24 , ) step_a.move_to([2, 2, 0] ) self.play(Write(_UpperCAmelCase , run_time=3 ) ) _A = {'run_time': 1, 'fade_in': True, 'fade_out': True, 'buff': 0.02} self.play( Write(_UpperCAmelCase ) , Circumscribe(model_arr[0] , color=_UpperCAmelCase , **_UpperCAmelCase ) , Circumscribe(model_cpu_arr[0] , color=_UpperCAmelCase , **_UpperCAmelCase ) , Circumscribe(gpu_rect[0] , color=_UpperCAmelCase , **_UpperCAmelCase ) , ) self.play(MoveToTarget(model_cpu_arr[0] ) ) _A = a.copy() for i in range(6 ): a_c.next_to(model_arr[i].get_right() + 0.02 , _UpperCAmelCase , buff=0.2 ) input.generate_target() input.target.move_to(model_arr[i].get_right() + 0.02 ) _A = AnimationGroup( FadeOut(_UpperCAmelCase , run_time=0.5 ) , MoveToTarget(_UpperCAmelCase , run_time=0.5 ) , FadeIn(_UpperCAmelCase , run_time=0.5 ) , lag_ratio=0.2 ) self.play(_UpperCAmelCase ) model_cpu_arr[i].generate_target() model_cpu_arr[i].target.move_to(cpu_left_col_base[i] ) if i < 5: model_cpu_arr[i + 1].generate_target() model_cpu_arr[i + 1].target.move_to(gpu_rect[0] ) if i >= 1: _A = 0.7 self.play( Circumscribe(model_arr[i] , **_UpperCAmelCase ) , Circumscribe(cpu_left_col_base[i] , **_UpperCAmelCase ) , Circumscribe(cpu_left_col_base[i + 1] , color=_UpperCAmelCase , **_UpperCAmelCase ) , Circumscribe(gpu_rect[0] , color=_UpperCAmelCase , **_UpperCAmelCase ) , Circumscribe(model_arr[i + 1] , color=_UpperCAmelCase , **_UpperCAmelCase ) , ) if i < 1: self.play( MoveToTarget(model_cpu_arr[i] ) , MoveToTarget(model_cpu_arr[i + 1] ) , ) else: self.play( MoveToTarget(model_cpu_arr[i] , run_time=0.7 ) , MoveToTarget(model_cpu_arr[i + 1] , run_time=0.7 ) , ) else: model_cpu_arr[i].generate_target() model_cpu_arr[i].target.move_to(cpu_left_col_base[-1] ) input.generate_target() input.target.next_to(model_arr[-1].get_right() , RIGHT + 0.02 , buff=0.2 ) self.play( Circumscribe(model_arr[-1] , color=_UpperCAmelCase , **_UpperCAmelCase ) , Circumscribe(cpu_left_col_base[-1] , color=_UpperCAmelCase , **_UpperCAmelCase ) , Circumscribe(gpu_rect[0] , color=_UpperCAmelCase , **_UpperCAmelCase ) , ) self.play(MoveToTarget(model_cpu_arr[i] ) ) _A = a_c _A = a_c.copy() input.generate_target() input.target.next_to(model_base[-1] , RIGHT + 0.02 , buff=0.5 ) self.play( FadeOut(_UpperCAmelCase ) , FadeOut(_UpperCAmelCase , run_time=0.5 ) , ) _A = MarkupText(F'''Inference on a model too large for GPU memory\nis successfully completed.''' , font_size=24 ) step_a.move_to([2, 2, 0] ) self.play(Write(_UpperCAmelCase , run_time=3 ) , MoveToTarget(_UpperCAmelCase ) ) self.wait()
315
0
import json from typing import TYPE_CHECKING, List, Optional, Tuple from tokenizers import pre_tokenizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation _snake_case = logging.get_logger(__name__) _snake_case = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"} _snake_case = { "tokenizer_file": { "EleutherAI/gpt-neox-20b": "https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json", }, } _snake_case = { "gpt-neox-20b": 2048, } class lowercase ( __lowerCAmelCase ): _a = VOCAB_FILES_NAMES _a = PRETRAINED_VOCAB_FILES_MAP _a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _a = ['''input_ids''', '''attention_mask'''] def __init__( self , _a=None , _a=None , _a=None , _a="<|endoftext|>" , _a="<|endoftext|>" , _a="<|endoftext|>" , _a=False , **_a , ) -> List[str]: super().__init__( _UpperCAmelCase , _UpperCAmelCase , tokenizer_file=_UpperCAmelCase , unk_token=_UpperCAmelCase , bos_token=_UpperCAmelCase , eos_token=_UpperCAmelCase , add_prefix_space=_UpperCAmelCase , **_UpperCAmelCase , ) _A : Any = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get("""add_prefix_space""" , _UpperCAmelCase ) != add_prefix_space: _A : List[Any] = getattr(_UpperCAmelCase , pre_tok_state.pop("""type""" ) ) _A : Optional[int] = add_prefix_space _A : Dict = pre_tok_class(**_UpperCAmelCase ) _A : Union[str, Any] = add_prefix_space def a__ ( self , _a , _a = None ) -> Tuple: _A : List[str] = self._tokenizer.model.save(_UpperCAmelCase , name=_UpperCAmelCase ) return tuple(_UpperCAmelCase ) def a__ ( self , _a ) -> Tuple: _A : Tuple = [] for is_user, text in conversation.iter_texts(): input_ids.extend(self.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) + [self.eos_token_id] ) if len(_UpperCAmelCase ) > self.model_max_length: _A : str = input_ids[-self.model_max_length :] return input_ids
26
"""simple docstring""" def _snake_case ( _snake_case : list , _snake_case : int = 0 ) -> list: '''simple docstring''' _A = length or len(_snake_case ) _A = False for i in range(length - 1 ): if list_data[i] > list_data[i + 1]: _A , _A = list_data[i + 1], list_data[i] _A = True return list_data if not swapped else bubble_sort(_snake_case , length - 1 ) if __name__ == "__main__": import doctest doctest.testmod()
315
0
import unittest from transformers import is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device if is_torch_available(): from transformers import AutoModelForSeqaSeqLM, AutoTokenizer @require_torch @require_sentencepiece @require_tokenizers class snake_case__ (unittest.TestCase ): """simple docstring""" @slow def SCREAMING_SNAKE_CASE__( self ) -> List[Any]: """simple docstring""" a__ : Optional[Any] = AutoModelForSeqaSeqLM.from_pretrained("""google/mt5-small""" , return_dict=_UpperCAmelCase ).to(_UpperCAmelCase ) a__ : Dict = AutoTokenizer.from_pretrained("""google/mt5-small""" ) a__ : Any = tokenizer("""Hello there""" , return_tensors="""pt""" ).input_ids a__ : str = tokenizer("""Hi I am""" , return_tensors="""pt""" ).input_ids a__ : Optional[Any] = model(input_ids.to(_UpperCAmelCase ) , labels=labels.to(_UpperCAmelCase ) ).loss a__ : Optional[int] = -(labels.shape[-1] * loss.item()) a__ : str = -8_4.9_1_2_7 self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 )
170
"""simple docstring""" import warnings from typing import Any, Dict, List, Optional, Union import numpy as np from ...audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import PaddingStrategy, TensorType, logging a = logging.get_logger(__name__) class lowercase_ ( __lowerCAmelCase ): '''simple docstring''' UpperCAmelCase : Any = ['''input_values''', '''attention_mask'''] def __init__( self : Dict , _UpperCAmelCase : int = 1 , _UpperCAmelCase : int = 16_000 , _UpperCAmelCase : float = 0.0 , _UpperCAmelCase : bool = False , _UpperCAmelCase : int = 80 , _UpperCAmelCase : int = 16 , _UpperCAmelCase : int = 64 , _UpperCAmelCase : str = "hann_window" , _UpperCAmelCase : float = 1.0 , _UpperCAmelCase : float = 80 , _UpperCAmelCase : float = 7_600 , _UpperCAmelCase : float = 1E-1_0 , _UpperCAmelCase : int = 2 , _UpperCAmelCase : bool = True , **_UpperCAmelCase : List[Any] , ): super().__init__(feature_size=_UpperCAmelCase , sampling_rate=_UpperCAmelCase , padding_value=_UpperCAmelCase , **_UpperCAmelCase ) _A = do_normalize _A = return_attention_mask _A = num_mel_bins _A = hop_length _A = win_length _A = win_function _A = frame_signal_scale _A = fmin _A = fmax _A = mel_floor _A = reduction_factor _A = win_length * sampling_rate // 1_000 _A = hop_length * sampling_rate // 1_000 _A = optimal_fft_length(self.sample_size ) _A = (self.n_fft // 2) + 1 _A = window_function(window_length=self.sample_size , name=self.win_function , periodic=_UpperCAmelCase ) _A = mel_filter_bank( num_frequency_bins=self.n_freqs , num_mel_filters=self.num_mel_bins , min_frequency=self.fmin , max_frequency=self.fmax , sampling_rate=self.sampling_rate , norm='slaney' , mel_scale='slaney' , ) if frame_signal_scale != 1.0: warnings.warn( 'The argument `frame_signal_scale` is deprecated and will be removed in version 4.30.0 of Transformers' , _UpperCAmelCase , ) if reduction_factor != 2.0: warnings.warn( 'The argument `reduction_factor` is deprecated and will be removed in version 4.30.0 of Transformers' , _UpperCAmelCase , ) @staticmethod # Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm def lowerCAmelCase_ ( _UpperCAmelCase : List[np.ndarray] , _UpperCAmelCase : List[np.ndarray] , _UpperCAmelCase : float = 0.0 ): if attention_mask is not None: _A = np.array(_UpperCAmelCase , np.intaa ) _A = [] for vector, length in zip(_UpperCAmelCase , attention_mask.sum(-1 ) ): _A = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1E-7 ) if length < normed_slice.shape[0]: _A = padding_value normed_input_values.append(_UpperCAmelCase ) else: _A = [(x - x.mean()) / np.sqrt(x.var() + 1E-7 ) for x in input_values] return normed_input_values def lowerCAmelCase_ ( self : Dict , _UpperCAmelCase : np.ndarray , ): _A = spectrogram( _UpperCAmelCase , window=self.window , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , mel_filters=self.mel_filters , mel_floor=self.mel_floor , log_mel='log10' , ) return log_mel_spec.T def __call__( self : int , _UpperCAmelCase : Optional[Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]]] = None , _UpperCAmelCase : Optional[Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]]] = None , _UpperCAmelCase : Union[bool, str, PaddingStrategy] = False , _UpperCAmelCase : Optional[int] = None , _UpperCAmelCase : bool = False , _UpperCAmelCase : Optional[int] = None , _UpperCAmelCase : Optional[bool] = None , _UpperCAmelCase : Optional[Union[str, TensorType]] = None , _UpperCAmelCase : Optional[int] = None , **_UpperCAmelCase : Optional[int] , ): if audio is None and audio_target is None: raise ValueError('You must provide either `audio` or `audio_target` values.' ) if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( F'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of''' F''' {self.sampling_rate}. Please make sure that the provided audio input was sampled with''' F''' {self.sampling_rate} and not {sampling_rate}.''' ) else: logger.warning( 'It is strongly recommended to pass the ``sampling_rate`` argument to this function. ' 'Failing to do so can result in silent errors that might be hard to debug.' ) if audio is not None: _A = self._process_audio( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase , ) else: _A = None if audio_target is not None: _A = self._process_audio( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase , ) if inputs is None: return inputs_target else: _A = inputs_target['input_values'] _A = inputs_target.get('attention_mask' ) if decoder_attention_mask is not None: _A = decoder_attention_mask return inputs def lowerCAmelCase_ ( self : Optional[int] , _UpperCAmelCase : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , _UpperCAmelCase : bool = False , _UpperCAmelCase : Union[bool, str, PaddingStrategy] = False , _UpperCAmelCase : Optional[int] = None , _UpperCAmelCase : bool = False , _UpperCAmelCase : Optional[int] = None , _UpperCAmelCase : Optional[bool] = None , _UpperCAmelCase : Optional[Union[str, TensorType]] = None , **_UpperCAmelCase : List[Any] , ): _A = isinstance(_UpperCAmelCase , np.ndarray ) and len(speech.shape ) > 1 if is_batched_numpy and len(speech.shape ) > 2: raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' ) _A = is_batched_numpy or ( isinstance(_UpperCAmelCase , (list, tuple) ) and (isinstance(speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: _A = [np.asarray(_UpperCAmelCase , dtype=np.floataa ) for speech in speech] elif not is_batched and not isinstance(_UpperCAmelCase , np.ndarray ): _A = np.asarray(_UpperCAmelCase , dtype=np.floataa ) elif isinstance(_UpperCAmelCase , np.ndarray ) and speech.dtype is np.dtype(np.floataa ): _A = speech.astype(np.floataa ) # always return batch if not is_batched: _A = [speech] # needed to make pad() work on spectrogram inputs _A = self.feature_size # convert into correct format for padding if is_target: _A = [self._extract_mel_features(_UpperCAmelCase ) for waveform in speech] _A = BatchFeature({'input_values': features} ) _A = self.num_mel_bins else: _A = BatchFeature({'input_values': speech} ) _A = self.pad( _UpperCAmelCase , padding=_UpperCAmelCase , max_length=_UpperCAmelCase , truncation=_UpperCAmelCase , pad_to_multiple_of=_UpperCAmelCase , return_attention_mask=_UpperCAmelCase , **_UpperCAmelCase , ) _A = feature_size_hack # convert input values to correct format _A = padded_inputs['input_values'] if not isinstance(input_values[0] , np.ndarray ): _A = [np.asarray(_UpperCAmelCase , dtype=np.floataa ) for array in input_values] elif ( not isinstance(_UpperCAmelCase , np.ndarray ) and isinstance(input_values[0] , np.ndarray ) and input_values[0].dtype is np.dtype(np.floataa ) ): _A = [array.astype(np.floataa ) for array in input_values] elif isinstance(_UpperCAmelCase , np.ndarray ) and input_values.dtype is np.dtype(np.floataa ): _A = input_values.astype(np.floataa ) # convert attention_mask to correct format _A = padded_inputs.get('attention_mask' ) if attention_mask is not None: _A = [np.asarray(_UpperCAmelCase , dtype=np.intaa ) for array in attention_mask] # zero-mean and unit-variance normalization if not is_target and self.do_normalize: _A = ( attention_mask if self._get_padding_strategies(_UpperCAmelCase , max_length=_UpperCAmelCase ) is not PaddingStrategy.DO_NOT_PAD else None ) _A = self.zero_mean_unit_var_norm( padded_inputs['input_values'] , attention_mask=_UpperCAmelCase , padding_value=self.padding_value ) if return_tensors is not None: _A = padded_inputs.convert_to_tensors(_UpperCAmelCase ) return padded_inputs def lowerCAmelCase_ ( self : Any ): _A = super().to_dict() # Don't serialize these as they are derived from the other properties. _A = ['window', 'mel_filters', 'sample_size', 'sample_stride', 'n_fft', 'n_freqs'] for name in names: if name in output: del output[name] return output
315
0
'''simple docstring''' UpperCAmelCase = [ '''Audio''', '''Array2D''', '''Array3D''', '''Array4D''', '''Array5D''', '''ClassLabel''', '''Features''', '''Sequence''', '''Value''', '''Image''', '''Translation''', '''TranslationVariableLanguages''', ] from .audio import Audio from .features import ArrayaD, ArrayaD, ArrayaD, ArrayaD, ClassLabel, Features, Sequence, Value from .image import Image from .translation import Translation, TranslationVariableLanguages
141
"""simple docstring""" from __future__ import annotations def _snake_case ( _snake_case : int , _snake_case : int ) -> list[list[int]]: '''simple docstring''' _A = [] create_all_state(1 , _snake_case , _snake_case , [] , _snake_case ) return result def _snake_case ( _snake_case : int , _snake_case : int , _snake_case : int , _snake_case : list[int] , _snake_case : list[list[int]] , ) -> None: '''simple docstring''' if level == 0: total_list.append(current_list[:] ) return for i in range(_snake_case , total_number - level + 2 ): current_list.append(_snake_case ) create_all_state(i + 1 , _snake_case , level - 1 , _snake_case , _snake_case ) current_list.pop() def _snake_case ( _snake_case : list[list[int]] ) -> None: '''simple docstring''' for i in total_list: print(*_snake_case ) if __name__ == "__main__": a = 4 a = 2 a = generate_all_combinations(n, k) print_all_state(total_list)
315
0
from __future__ import annotations import unittest from transformers import AutoTokenizer, MBartConfig, is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFAutoModelForSeqaSeqLM, TFMBartForConditionalGeneration, TFMBartModel @require_tf class _A : _UpperCamelCase : int = MBartConfig _UpperCamelCase : Any = {} _UpperCamelCase : Tuple = '''gelu''' def __init__( self : Optional[Any] , _A : str , _A : List[Any]=13 , _A : List[str]=7 , _A : Optional[Any]=True , _A : Tuple=False , _A : int=99 , _A : Union[str, Any]=32 , _A : Dict=2 , _A : List[str]=4 , _A : Union[str, Any]=37 , _A : str=0.1 , _A : Any=0.1 , _A : List[str]=20 , _A : Any=2 , _A : Optional[int]=1 , _A : Any=0 , ) -> List[str]: """simple docstring""" lowercase : str = parent lowercase : Any = batch_size lowercase : Optional[Any] = seq_length lowercase : List[Any] = is_training lowercase : Any = use_labels lowercase : str = vocab_size lowercase : Any = hidden_size lowercase : List[Any] = num_hidden_layers lowercase : int = num_attention_heads lowercase : List[Any] = intermediate_size lowercase : Dict = hidden_dropout_prob lowercase : List[Any] = attention_probs_dropout_prob lowercase : Optional[int] = max_position_embeddings lowercase : List[str] = eos_token_id lowercase : Optional[int] = pad_token_id lowercase : Any = bos_token_id def __a ( self : Tuple ) -> int: """simple docstring""" lowercase : int = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) lowercase : int = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 ) lowercase : Optional[Any] = tf.concat([input_ids, eos_tensor] , axis=1 ) lowercase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowercase : Optional[int] = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , ) lowercase : Union[str, Any] = prepare_mbart_inputs_dict(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) return config, inputs_dict def __a ( self : Optional[int] , _A : Optional[Any] , _A : int ) -> Tuple: """simple docstring""" lowercase : Optional[Any] = TFMBartModel(config=_UpperCAmelCase ).get_decoder() lowercase : List[str] = inputs_dict['''input_ids'''] lowercase : Dict = input_ids[:1, :] lowercase : List[str] = inputs_dict['''attention_mask'''][:1, :] lowercase : int = inputs_dict['''head_mask'''] lowercase : List[Any] = 1 # first forward pass lowercase : Tuple = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , head_mask=_UpperCAmelCase , use_cache=_UpperCAmelCase ) lowercase , lowercase : Optional[Any] = outputs.to_tuple() lowercase : Optional[Any] = past_key_values[1] def snake_case( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=None , __magic_name__=None , __magic_name__=None , __magic_name__=None , __magic_name__=None , ) -> Tuple: '''simple docstring''' if attention_mask is None: lowercase : Tuple = tf.cast(tf.math.not_equal(_snake_case , config.pad_token_id ) , tf.inta ) if decoder_attention_mask is None: lowercase : Optional[Any] = tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ), ] , axis=-1 , ) if head_mask is None: lowercase : Optional[int] = tf.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: lowercase : Dict = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) if cross_attn_head_mask is None: lowercase : List[Any] = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } @require_tf class _A ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ): _UpperCamelCase : Union[str, Any] = (TFMBartForConditionalGeneration, TFMBartModel) if is_tf_available() else () _UpperCamelCase : Union[str, Any] = (TFMBartForConditionalGeneration,) if is_tf_available() else () _UpperCamelCase : Tuple = ( { '''conversational''': TFMBartForConditionalGeneration, '''feature-extraction''': TFMBartModel, '''summarization''': TFMBartForConditionalGeneration, '''text2text-generation''': TFMBartForConditionalGeneration, '''translation''': TFMBartForConditionalGeneration, } if is_tf_available() else {} ) _UpperCamelCase : Optional[int] = True _UpperCamelCase : Union[str, Any] = False _UpperCamelCase : Union[str, Any] = False def __a ( self : Tuple , _A : List[str] , _A : Union[str, Any] , _A : Optional[Any] , _A : Optional[int] , _A : Union[str, Any] ) -> int: """simple docstring""" if pipeline_test_casse_name != "FeatureExtractionPipelineTests": # Exception encountered when calling layer '...' return True return False def __a ( self : str ) -> List[str]: """simple docstring""" lowercase : Tuple = TFMBartModelTester(self ) lowercase : Optional[int] = ConfigTester(self , config_class=_UpperCAmelCase ) def __a ( self : List[str] ) -> str: """simple docstring""" self.config_tester.run_common_tests() def __a ( self : Dict ) -> List[str]: """simple docstring""" lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*_UpperCAmelCase ) @require_sentencepiece @require_tokenizers @require_tf class _A ( unittest.TestCase ): _UpperCamelCase : Optional[int] = [ ''' UN Chief Says There Is No Military Solution in Syria''', ] _UpperCamelCase : str = [ '''Şeful ONU declară că nu există o soluţie militară în Siria''', ] _UpperCamelCase : Dict = '''facebook/mbart-large-en-ro''' @cached_property def __a ( self : List[str] ) -> Tuple: """simple docstring""" return AutoTokenizer.from_pretrained(self.model_name ) @cached_property def __a ( self : List[str] ) -> str: """simple docstring""" lowercase : Union[str, Any] = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name ) return model def __a ( self : Union[str, Any] , **_A : Any ) -> Tuple: """simple docstring""" lowercase : Union[str, Any] = self.translate_src_text(**_UpperCAmelCase ) self.assertListEqual(self.expected_text , _UpperCAmelCase ) def __a ( self : Union[str, Any] , **_A : List[Any] ) -> Dict: """simple docstring""" lowercase : List[Any] = self.tokenizer(self.src_text , **_UpperCAmelCase , return_tensors='''tf''' ) lowercase : Optional[Any] = self.model.generate( model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 ) lowercase : Dict = self.tokenizer.batch_decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase ) return generated_words @slow def __a ( self : Any ) -> Dict: """simple docstring""" self._assert_generated_batch_equal_expected()
308
"""simple docstring""" def _snake_case ( _snake_case : int = 10_00 ) -> int: '''simple docstring''' return sum(2 * a * ((a - 1) // 2) for a in range(3 , n + 1 ) ) if __name__ == "__main__": print(solution())
315
0
UpperCAmelCase : List[str] = "\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n" UpperCAmelCase : str = [{"type": "code", "content": INSTALL_CONTENT}] UpperCAmelCase : Any = { "{processor_class}": "FakeProcessorClass", "{model_class}": "FakeModelClass", "{object_class}": "FakeObjectClass", }
252
"""simple docstring""" import flax.linen as nn import jax.numpy as jnp from .attention_flax import FlaxTransformeraDModel from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD class lowercase_ ( nn.Module ): '''simple docstring''' UpperCAmelCase : int UpperCAmelCase : int UpperCAmelCase : float = 0.0 UpperCAmelCase : int = 1 UpperCAmelCase : int = 1 UpperCAmelCase : bool = True UpperCAmelCase : bool = False UpperCAmelCase : bool = False UpperCAmelCase : bool = False UpperCAmelCase : jnp.dtype = jnp.floataa def lowerCAmelCase_ ( self : List[str] ): _A = [] _A = [] for i in range(self.num_layers ): _A = self.in_channels if i == 0 else self.out_channels _A = FlaxResnetBlockaD( in_channels=_UpperCAmelCase , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(_UpperCAmelCase ) _A = FlaxTransformeraDModel( in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) attentions.append(_UpperCAmelCase ) _A = resnets _A = attentions if self.add_downsample: _A = FlaxDownsampleaD(self.out_channels , dtype=self.dtype ) def __call__( self : Dict , _UpperCAmelCase : int , _UpperCAmelCase : str , _UpperCAmelCase : List[str] , _UpperCAmelCase : Tuple=True ): _A = () for resnet, attn in zip(self.resnets , self.attentions ): _A = resnet(_UpperCAmelCase , _UpperCAmelCase , deterministic=_UpperCAmelCase ) _A = attn(_UpperCAmelCase , _UpperCAmelCase , deterministic=_UpperCAmelCase ) output_states += (hidden_states,) if self.add_downsample: _A = self.downsamplers_a(_UpperCAmelCase ) output_states += (hidden_states,) return hidden_states, output_states class lowercase_ ( nn.Module ): '''simple docstring''' UpperCAmelCase : int UpperCAmelCase : int UpperCAmelCase : float = 0.0 UpperCAmelCase : int = 1 UpperCAmelCase : bool = True UpperCAmelCase : jnp.dtype = jnp.floataa def lowerCAmelCase_ ( self : List[Any] ): _A = [] for i in range(self.num_layers ): _A = self.in_channels if i == 0 else self.out_channels _A = FlaxResnetBlockaD( in_channels=_UpperCAmelCase , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(_UpperCAmelCase ) _A = resnets if self.add_downsample: _A = FlaxDownsampleaD(self.out_channels , dtype=self.dtype ) def __call__( self : List[str] , _UpperCAmelCase : Tuple , _UpperCAmelCase : int , _UpperCAmelCase : List[str]=True ): _A = () for resnet in self.resnets: _A = resnet(_UpperCAmelCase , _UpperCAmelCase , deterministic=_UpperCAmelCase ) output_states += (hidden_states,) if self.add_downsample: _A = self.downsamplers_a(_UpperCAmelCase ) output_states += (hidden_states,) return hidden_states, output_states class lowercase_ ( nn.Module ): '''simple docstring''' UpperCAmelCase : int UpperCAmelCase : int UpperCAmelCase : int UpperCAmelCase : float = 0.0 UpperCAmelCase : int = 1 UpperCAmelCase : int = 1 UpperCAmelCase : bool = True UpperCAmelCase : bool = False UpperCAmelCase : bool = False UpperCAmelCase : bool = False UpperCAmelCase : jnp.dtype = jnp.floataa def lowerCAmelCase_ ( self : Any ): _A = [] _A = [] for i in range(self.num_layers ): _A = self.in_channels if (i == self.num_layers - 1) else self.out_channels _A = self.prev_output_channel if i == 0 else self.out_channels _A = FlaxResnetBlockaD( in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(_UpperCAmelCase ) _A = FlaxTransformeraDModel( in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) attentions.append(_UpperCAmelCase ) _A = resnets _A = attentions if self.add_upsample: _A = FlaxUpsampleaD(self.out_channels , dtype=self.dtype ) def __call__( self : Optional[Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : int , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Optional[Any]=True ): for resnet, attn in zip(self.resnets , self.attentions ): # pop res hidden states _A = res_hidden_states_tuple[-1] _A = res_hidden_states_tuple[:-1] _A = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 ) _A = resnet(_UpperCAmelCase , _UpperCAmelCase , deterministic=_UpperCAmelCase ) _A = attn(_UpperCAmelCase , _UpperCAmelCase , deterministic=_UpperCAmelCase ) if self.add_upsample: _A = self.upsamplers_a(_UpperCAmelCase ) return hidden_states class lowercase_ ( nn.Module ): '''simple docstring''' UpperCAmelCase : int UpperCAmelCase : int UpperCAmelCase : int UpperCAmelCase : float = 0.0 UpperCAmelCase : int = 1 UpperCAmelCase : bool = True UpperCAmelCase : jnp.dtype = jnp.floataa def lowerCAmelCase_ ( self : Any ): _A = [] for i in range(self.num_layers ): _A = self.in_channels if (i == self.num_layers - 1) else self.out_channels _A = self.prev_output_channel if i == 0 else self.out_channels _A = FlaxResnetBlockaD( in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(_UpperCAmelCase ) _A = resnets if self.add_upsample: _A = FlaxUpsampleaD(self.out_channels , dtype=self.dtype ) def __call__( self : int , _UpperCAmelCase : int , _UpperCAmelCase : Dict , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Optional[int]=True ): for resnet in self.resnets: # pop res hidden states _A = res_hidden_states_tuple[-1] _A = res_hidden_states_tuple[:-1] _A = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 ) _A = resnet(_UpperCAmelCase , _UpperCAmelCase , deterministic=_UpperCAmelCase ) if self.add_upsample: _A = self.upsamplers_a(_UpperCAmelCase ) return hidden_states class lowercase_ ( nn.Module ): '''simple docstring''' UpperCAmelCase : int UpperCAmelCase : float = 0.0 UpperCAmelCase : int = 1 UpperCAmelCase : int = 1 UpperCAmelCase : bool = False UpperCAmelCase : bool = False UpperCAmelCase : jnp.dtype = jnp.floataa def lowerCAmelCase_ ( self : Dict ): # there is always at least one resnet _A = [ FlaxResnetBlockaD( in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , ) ] _A = [] for _ in range(self.num_layers ): _A = FlaxTransformeraDModel( in_channels=self.in_channels , n_heads=self.num_attention_heads , d_head=self.in_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) attentions.append(_UpperCAmelCase ) _A = FlaxResnetBlockaD( in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(_UpperCAmelCase ) _A = resnets _A = attentions def __call__( self : str , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : str , _UpperCAmelCase : Optional[int]=True ): _A = self.resnets[0](_UpperCAmelCase , _UpperCAmelCase ) for attn, resnet in zip(self.attentions , self.resnets[1:] ): _A = attn(_UpperCAmelCase , _UpperCAmelCase , deterministic=_UpperCAmelCase ) _A = resnet(_UpperCAmelCase , _UpperCAmelCase , deterministic=_UpperCAmelCase ) return hidden_states
315
0
import json import os import shutil import tempfile import unittest import numpy as np from transformers import BertTokenizerFast from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer from transformers.testing_utils import require_tokenizers, require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor @require_tokenizers @require_vision class _SCREAMING_SNAKE_CASE ( unittest.TestCase ): '''simple docstring''' def SCREAMING_SNAKE_CASE_ (self : Any) ->str: '''simple docstring''' lowerCamelCase__: Dict =tempfile.mkdtemp() # fmt: off lowerCamelCase__: Optional[int] =["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "want", "##want", "##ed", "wa", "un", "runn", "##ing", ",", "low", "lowest"] # fmt: on lowerCamelCase__: Optional[int] =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"]) with open(self.vocab_file , "w" , encoding="utf-8") as vocab_writer: vocab_writer.write("".join([x + "\n" for x in vocab_tokens])) lowerCamelCase__: Optional[int] ={ "do_resize": True, "size": {"height": 18, "width": 18}, "do_normalize": True, "image_mean": [0.5, 0.5, 0.5], "image_std": [0.5, 0.5, 0.5], } lowerCamelCase__: Optional[int] =os.path.join(self.tmpdirname , _UpperCAmelCase) with open(self.image_processor_file , "w" , encoding="utf-8") as fp: json.dump(_UpperCAmelCase , _UpperCAmelCase) def SCREAMING_SNAKE_CASE_ (self : List[Any] , **UpperCAmelCase_ : List[str]) ->Tuple: '''simple docstring''' return BertTokenizer.from_pretrained(self.tmpdirname , **_UpperCAmelCase) def SCREAMING_SNAKE_CASE_ (self : List[Any] , **UpperCAmelCase_ : Tuple) ->str: '''simple docstring''' return ViTImageProcessor.from_pretrained(self.tmpdirname , **_UpperCAmelCase) def SCREAMING_SNAKE_CASE_ (self : Tuple) ->List[str]: '''simple docstring''' shutil.rmtree(self.tmpdirname) def SCREAMING_SNAKE_CASE_ (self : List[str]) ->Tuple: '''simple docstring''' lowerCamelCase__: List[Any] =[np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta)] lowerCamelCase__: str =[Image.fromarray(np.moveaxis(_UpperCAmelCase , 0 , -1)) for x in image_inputs] return image_inputs def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->Union[str, Any]: '''simple docstring''' lowerCamelCase__: Any =self.get_tokenizer() lowerCamelCase__: Optional[Any] =self.get_image_processor() lowerCamelCase__: Dict =VisionTextDualEncoderProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase) processor.save_pretrained(self.tmpdirname) lowerCamelCase__: str =VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab()) self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast)) self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string()) self.assertIsInstance(processor.image_processor , _UpperCAmelCase) def SCREAMING_SNAKE_CASE_ (self : List[str]) ->str: '''simple docstring''' lowerCamelCase__: Dict =VisionTextDualEncoderProcessor( tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor()) processor.save_pretrained(self.tmpdirname) lowerCamelCase__: Optional[int] =self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)") lowerCamelCase__: Dict =self.get_image_processor(do_normalize=_UpperCAmelCase , padding_value=1.0) lowerCamelCase__: Any =VisionTextDualEncoderProcessor.from_pretrained( self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=_UpperCAmelCase , padding_value=1.0) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab()) self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast)) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string()) self.assertIsInstance(processor.image_processor , _UpperCAmelCase) def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->List[Any]: '''simple docstring''' lowerCamelCase__: Dict =self.get_image_processor() lowerCamelCase__: List[str] =self.get_tokenizer() lowerCamelCase__: int =VisionTextDualEncoderProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase) lowerCamelCase__: Union[str, Any] =self.prepare_image_inputs() lowerCamelCase__: str =image_processor(_UpperCAmelCase , return_tensors="np") lowerCamelCase__: List[str] =processor(images=_UpperCAmelCase , return_tensors="np") for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2) def SCREAMING_SNAKE_CASE_ (self : str) ->int: '''simple docstring''' lowerCamelCase__: Union[str, Any] =self.get_image_processor() lowerCamelCase__: List[Any] =self.get_tokenizer() lowerCamelCase__: str =VisionTextDualEncoderProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase) lowerCamelCase__: Optional[Any] ="lower newer" lowerCamelCase__: List[str] =processor(text=_UpperCAmelCase) lowerCamelCase__: List[str] =tokenizer(_UpperCAmelCase) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key]) def SCREAMING_SNAKE_CASE_ (self : Tuple) ->Optional[int]: '''simple docstring''' lowerCamelCase__: str =self.get_image_processor() lowerCamelCase__: List[str] =self.get_tokenizer() lowerCamelCase__: Optional[Any] =VisionTextDualEncoderProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase) lowerCamelCase__: str ="lower newer" lowerCamelCase__: int =self.prepare_image_inputs() lowerCamelCase__: int =processor(text=_UpperCAmelCase , images=_UpperCAmelCase) self.assertListEqual(list(inputs.keys()) , ["input_ids", "token_type_ids", "attention_mask", "pixel_values"]) # test if it raises when no input is passed with self.assertRaises(_UpperCAmelCase): processor() def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->Dict: '''simple docstring''' lowerCamelCase__: Dict =self.get_image_processor() lowerCamelCase__: Any =self.get_tokenizer() lowerCamelCase__: List[Any] =VisionTextDualEncoderProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase) lowerCamelCase__: int =[[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] lowerCamelCase__: Optional[int] =processor.batch_decode(_UpperCAmelCase) lowerCamelCase__: Dict =tokenizer.batch_decode(_UpperCAmelCase) self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase) def SCREAMING_SNAKE_CASE_ (self : Dict) ->str: '''simple docstring''' lowerCamelCase__: str =self.get_image_processor() lowerCamelCase__: Optional[int] =self.get_tokenizer() lowerCamelCase__: Dict =VisionTextDualEncoderProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase) lowerCamelCase__: str ="lower newer" lowerCamelCase__: Optional[Any] =self.prepare_image_inputs() lowerCamelCase__: List[str] =processor(text=_UpperCAmelCase , images=_UpperCAmelCase) self.assertListEqual(list(inputs.keys()) , processor.model_input_names)
10
"""simple docstring""" import numpy class lowercase_ : '''simple docstring''' def __init__( self : Dict , _UpperCAmelCase : numpy.ndarray , _UpperCAmelCase : numpy.ndarray ): _A = input_array # Random initial weights are assigned where first argument is the # number of nodes in previous layer and second argument is the # number of nodes in the next layer. # Random initial weights are assigned. # self.input_array.shape[1] is used to represent number of nodes in input layer. # First hidden layer consists of 4 nodes. _A = numpy.random.rand( self.input_array.shape[1] , 4 ) # Random initial values for the first hidden layer. # First hidden layer has 4 nodes. # Second hidden layer has 3 nodes. _A = numpy.random.rand( 4 , 3 ) # Random initial values for the second hidden layer. # Second hidden layer has 3 nodes. # Output layer has 1 node. _A = numpy.random.rand(3 , 1 ) # Real output values provided. _A = output_array # Predicted output values by the neural network. # Predicted_output array initially consists of zeroes. _A = numpy.zeros(output_array.shape ) def lowerCAmelCase_ ( self : List[str] ): _A = sigmoid( numpy.dot(self.input_array , self.input_layer_and_first_hidden_layer_weights ) ) # layer_between_first_hidden_layer_and_second_hidden_layer is the layer # connecting the first hidden set of nodes with the second hidden set of nodes. _A = sigmoid( numpy.dot( self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) ) # layer_between_second_hidden_layer_and_output is the layer connecting # second hidden layer with the output node. _A = sigmoid( numpy.dot( self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) ) return self.layer_between_second_hidden_layer_and_output def lowerCAmelCase_ ( self : Optional[int] ): _A = numpy.dot( self.layer_between_first_hidden_layer_and_second_hidden_layer.T , 2 * (self.output_array - self.predicted_output) * sigmoid_derivative(self.predicted_output ) , ) _A = numpy.dot( self.layer_between_input_and_first_hidden_layer.T , numpy.dot( 2 * (self.output_array - self.predicted_output) * sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , ) * sigmoid_derivative( self.layer_between_first_hidden_layer_and_second_hidden_layer ) , ) _A = numpy.dot( self.input_array.T , numpy.dot( numpy.dot( 2 * (self.output_array - self.predicted_output) * sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , ) * sigmoid_derivative( self.layer_between_first_hidden_layer_and_second_hidden_layer ) , self.first_hidden_layer_and_second_hidden_layer_weights.T , ) * sigmoid_derivative(self.layer_between_input_and_first_hidden_layer ) , ) self.input_layer_and_first_hidden_layer_weights += ( updated_input_layer_and_first_hidden_layer_weights ) self.first_hidden_layer_and_second_hidden_layer_weights += ( updated_first_hidden_layer_and_second_hidden_layer_weights ) self.second_hidden_layer_and_output_layer_weights += ( updated_second_hidden_layer_and_output_layer_weights ) def lowerCAmelCase_ ( self : Any , _UpperCAmelCase : numpy.ndarray , _UpperCAmelCase : int , _UpperCAmelCase : bool ): for iteration in range(1 , iterations + 1 ): _A = self.feedforward() self.back_propagation() if give_loss: _A = numpy.mean(numpy.square(output - self.feedforward() ) ) print(F'''Iteration {iteration} Loss: {loss}''' ) def lowerCAmelCase_ ( self : Optional[int] , _UpperCAmelCase : numpy.ndarray ): _A = input_arr _A = sigmoid( numpy.dot(self.array , self.input_layer_and_first_hidden_layer_weights ) ) _A = sigmoid( numpy.dot( self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) ) _A = sigmoid( numpy.dot( self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) ) return int(self.layer_between_second_hidden_layer_and_output > 0.6 ) def _snake_case ( _snake_case : numpy.ndarray ) -> numpy.ndarray: '''simple docstring''' return 1 / (1 + numpy.exp(-value )) def _snake_case ( _snake_case : numpy.ndarray ) -> numpy.ndarray: '''simple docstring''' return (value) * (1 - (value)) def _snake_case ( ) -> int: '''simple docstring''' _A = numpy.array( ( [0, 0, 0], [0, 0, 1], [0, 1, 0], [0, 1, 1], [1, 0, 0], [1, 0, 1], [1, 1, 0], [1, 1, 1], ) , dtype=numpy.floataa , ) # True output values for the given input values. _A = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]) , dtype=numpy.floataa ) # Calling neural network class. _A = TwoHiddenLayerNeuralNetwork( input_array=_snake_case , output_array=_snake_case ) # Calling training function. # Set give_loss to True if you want to see loss in every iteration. neural_network.train(output=_snake_case , iterations=10 , give_loss=_snake_case ) return neural_network.predict(numpy.array(([1, 1, 1]) , dtype=numpy.floataa ) ) if __name__ == "__main__": example()
315
0
import argparse import json from dataclasses import dataclass, field from functools import partial from pathlib import Path from typing import List import timm import torch import torch.nn as nn from huggingface_hub import hf_hub_download from torch import Tensor from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification from transformers.utils import logging logging.set_verbosity_info() __UpperCamelCase : List[Any] = logging.get_logger() @dataclass class __magic_name__ : A: nn.Module A: List[nn.Module] = field(default_factory=__lowerCAmelCase) A: list = field(default_factory=__lowerCAmelCase) def UpperCAmelCase__ ( self : Dict , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Tensor , lowerCamelCase__ : Tensor ) -> Any: '''simple docstring''' UpperCamelCase__ : str = len(list(m.modules() ) ) == 1 or isinstance(_UpperCAmelCase , nn.Convad ) or isinstance(_UpperCAmelCase , nn.BatchNormad ) if has_not_submodules: self.traced.append(_UpperCAmelCase ) def __call__( self : List[str] , lowerCamelCase__ : Tensor ) -> str: '''simple docstring''' for m in self.module.modules(): self.handles.append(m.register_forward_hook(self._forward_hook ) ) self.module(_UpperCAmelCase ) [x.remove() for x in self.handles] return self @property def UpperCAmelCase__ ( self : Union[str, Any] ) -> Any: '''simple docstring''' return list(filter(lambda lowerCamelCase__ : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) ) @dataclass class __magic_name__ : A: nn.Module A: nn.Module A: int = 0 A: List = field(default_factory=__lowerCAmelCase) A: List = field(default_factory=__lowerCAmelCase) def __call__( self : List[str] , lowerCamelCase__ : Tensor ) -> Tuple: '''simple docstring''' UpperCamelCase__ : List[Any] = Tracker(self.dest )(_UpperCAmelCase ).parametrized UpperCamelCase__ : Optional[Any] = Tracker(self.src )(_UpperCAmelCase ).parametrized UpperCamelCase__ : Any = list(filter(lambda lowerCamelCase__ : type(_UpperCAmelCase ) not in self.src_skip , _UpperCAmelCase ) ) UpperCamelCase__ : List[str] = list(filter(lambda lowerCamelCase__ : type(_UpperCAmelCase ) not in self.dest_skip , _UpperCAmelCase ) ) if len(_UpperCAmelCase ) != len(_UpperCAmelCase ): raise Exception( F"Numbers of operations are different. Source module has {len(_UpperCAmelCase )} operations while" F" destination module has {len(_UpperCAmelCase )}." ) for dest_m, src_m in zip(_UpperCAmelCase , _UpperCAmelCase ): dest_m.load_state_dict(src_m.state_dict() ) if self.verbose == 1: print(F"Transfered from={src_m} to={dest_m}" ) def _a ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : ResNetConfig , SCREAMING_SNAKE_CASE : Path , SCREAMING_SNAKE_CASE : bool = True ): """simple docstring""" print(F"Converting {name}..." ) with torch.no_grad(): UpperCamelCase__ : str = timm.create_model(_snake_case , pretrained=_snake_case ).eval() UpperCamelCase__ : List[Any] = ResNetForImageClassification(_snake_case ).eval() UpperCamelCase__ : Dict = ModuleTransfer(src=_snake_case , dest=_snake_case ) UpperCamelCase__ : Any = torch.randn((1, 3, 224, 224) ) module_transfer(_snake_case ) assert torch.allclose(from_model(_snake_case ) , our_model(_snake_case ).logits ), "The model logits don't match the original one." UpperCamelCase__ : str = F"resnet{'-'.join(name.split('resnet' ) )}" print(_snake_case ) if push_to_hub: our_model.push_to_hub( repo_path_or_name=save_directory / checkpoint_name , commit_message='''Add model''' , use_temp_dir=_snake_case , ) # we can use the convnext one UpperCamelCase__ : int = AutoImageProcessor.from_pretrained('''facebook/convnext-base-224-22k-1k''' ) image_processor.push_to_hub( repo_path_or_name=save_directory / checkpoint_name , commit_message='''Add image processor''' , use_temp_dir=_snake_case , ) print(F"Pushed {checkpoint_name}" ) def _a ( SCREAMING_SNAKE_CASE : Path , SCREAMING_SNAKE_CASE : str = None , SCREAMING_SNAKE_CASE : bool = True ): """simple docstring""" UpperCamelCase__ : List[str] = '''imagenet-1k-id2label.json''' UpperCamelCase__ : Optional[int] = 1000 UpperCamelCase__ : List[str] = (1, num_labels) UpperCamelCase__ : Union[str, Any] = '''huggingface/label-files''' UpperCamelCase__ : List[str] = num_labels UpperCamelCase__ : Dict = json.load(open(hf_hub_download(_snake_case , _snake_case , repo_type='''dataset''' ) , '''r''' ) ) UpperCamelCase__ : Optional[Any] = {int(_snake_case ): v for k, v in idalabel.items()} UpperCamelCase__ : Union[str, Any] = idalabel UpperCamelCase__ : Union[str, Any] = {v: k for k, v in idalabel.items()} UpperCamelCase__ : Any = partial(_snake_case , num_labels=_snake_case , idalabel=_snake_case , labelaid=_snake_case ) UpperCamelCase__ : List[Any] = { '''resnet18''': ImageNetPreTrainedConfig( depths=[2, 2, 2, 2] , hidden_sizes=[64, 128, 256, 512] , layer_type='''basic''' ), '''resnet26''': ImageNetPreTrainedConfig( depths=[2, 2, 2, 2] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='''bottleneck''' ), '''resnet34''': ImageNetPreTrainedConfig( depths=[3, 4, 6, 3] , hidden_sizes=[64, 128, 256, 512] , layer_type='''basic''' ), '''resnet50''': ImageNetPreTrainedConfig( depths=[3, 4, 6, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='''bottleneck''' ), '''resnet101''': ImageNetPreTrainedConfig( depths=[3, 4, 23, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='''bottleneck''' ), '''resnet152''': ImageNetPreTrainedConfig( depths=[3, 8, 36, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='''bottleneck''' ), } if model_name: convert_weight_and_push(_snake_case , names_to_config[model_name] , _snake_case , _snake_case ) else: for model_name, config in names_to_config.items(): convert_weight_and_push(_snake_case , _snake_case , _snake_case , _snake_case ) return config, expected_shape if __name__ == "__main__": __UpperCamelCase : Union[str, Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( "--model_name", default=None, type=str, help=( "The name of the model you wish to convert, it must be one of the supported resnet* architecture," " currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted." ), ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=Path, required=True, help="Path to the output PyTorch model directory.", ) parser.add_argument( "--push_to_hub", default=True, type=bool, required=False, help="If True, push model and image processor to the hub.", ) __UpperCamelCase : Union[str, Any] = parser.parse_args() __UpperCamelCase : str = args.pytorch_dump_folder_path pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True) convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
146
"""simple docstring""" from __future__ import annotations from collections.abc import Iterator from typing import Generic, TypeVar a = TypeVar('''T''') class lowercase_ ( Generic[T] ): '''simple docstring''' def __init__( self : Any , _UpperCAmelCase : T ): _A = data _A = None def __str__( self : str ): return F'''{self.data}''' class lowercase_ ( Generic[T] ): '''simple docstring''' def __init__( self : Tuple ): _A = None def __iter__( self : List[Any] ): _A = self.top while node: yield node.data _A = node.next def __str__( self : Union[str, Any] ): return "->".join([str(_UpperCAmelCase ) for item in self] ) def __len__( self : List[Any] ): return len(tuple(iter(self ) ) ) def lowerCAmelCase_ ( self : str ): return self.top is None def lowerCAmelCase_ ( self : Union[str, Any] , _UpperCAmelCase : T ): _A = Node(_UpperCAmelCase ) if not self.is_empty(): _A = self.top _A = node def lowerCAmelCase_ ( self : Dict ): if self.is_empty(): raise IndexError('pop from empty stack' ) assert isinstance(self.top , _UpperCAmelCase ) _A = self.top _A = self.top.next return pop_node.data def lowerCAmelCase_ ( self : Tuple ): if self.is_empty(): raise IndexError('peek from empty stack' ) assert self.top is not None return self.top.data def lowerCAmelCase_ ( self : Optional[Any] ): _A = None if __name__ == "__main__": from doctest import testmod testmod()
315
0
"""simple docstring""" def a__ ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ): '''simple docstring''' while b: lowerCAmelCase , lowerCAmelCase : List[Any] = b, a % b return a def a__ ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ): '''simple docstring''' return a if b == 0 else euclidean_gcd_recursive(_snake_case , a % b ) def a__ ( ): '''simple docstring''' print(f"""euclidean_gcd(3, 5) = {euclidean_gcd(3 , 5 )}""" ) print(f"""euclidean_gcd(5, 3) = {euclidean_gcd(5 , 3 )}""" ) print(f"""euclidean_gcd(1, 3) = {euclidean_gcd(1 , 3 )}""" ) print(f"""euclidean_gcd(3, 6) = {euclidean_gcd(3 , 6 )}""" ) print(f"""euclidean_gcd(6, 3) = {euclidean_gcd(6 , 3 )}""" ) print(f"""euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 , 5 )}""" ) print(f"""euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 , 3 )}""" ) print(f"""euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 , 3 )}""" ) print(f"""euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 , 6 )}""" ) print(f"""euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 , 3 )}""" ) if __name__ == "__main__": main()
108
"""simple docstring""" import warnings from ...utils import logging from .image_processing_imagegpt import ImageGPTImageProcessor a = logging.get_logger(__name__) class lowercase_ ( __lowerCAmelCase ): '''simple docstring''' def __init__( self : Any , *_UpperCAmelCase : List[str] , **_UpperCAmelCase : Union[str, Any] ): warnings.warn( 'The class ImageGPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.' ' Please use ImageGPTImageProcessor instead.' , _UpperCAmelCase , ) super().__init__(*_UpperCAmelCase , **_UpperCAmelCase )
315
0
lowerCAmelCase__ = { '''A''': ['''B''', '''C''', '''E'''], '''B''': ['''A''', '''D''', '''E'''], '''C''': ['''A''', '''F''', '''G'''], '''D''': ['''B'''], '''E''': ['''A''', '''B''', '''D'''], '''F''': ['''C'''], '''G''': ['''C'''], } def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): """simple docstring""" lowercase__ : List[Any] = set() # keep track of all the paths to be checked lowercase__ : Dict = [[start]] # return path if start is goal if start == goal: return [start] # keeps looping until all possible paths have been checked while queue: # pop the first path from the queue lowercase__ : str = queue.pop(0 ) # get the last node from the path lowercase__ : Any = path[-1] if node not in explored: lowercase__ : Any = graph[node] # go through all neighbour nodes, construct a new path and # push it into the queue for neighbour in neighbours: lowercase__ : Dict = list(_snake_case ) new_path.append(_snake_case ) queue.append(_snake_case ) # return path if neighbour is goal if neighbour == goal: return new_path # mark node as explored explored.add(_snake_case ) # in case there's no path between the 2 nodes return [] def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): """simple docstring""" if not graph or start not in graph or target not in graph: return -1 if start == target: return 0 lowercase__ : Any = [start] lowercase__ : Tuple = set(_snake_case ) # Keep tab on distances from `start` node. lowercase__ : Any = {start: 0, target: -1} while queue: lowercase__ : str = queue.pop(0 ) if node == target: lowercase__ : List[Any] = ( dist[node] if dist[target] == -1 else min(dist[target] , dist[node] ) ) for adjacent in graph[node]: if adjacent not in visited: visited.add(_snake_case ) queue.append(_snake_case ) lowercase__ : Any = dist[node] + 1 return dist[target] if __name__ == "__main__": print(bfs_shortest_path(demo_graph, '''G''', '''D''')) # returns ['G', 'C', 'A', 'B', 'D'] print(bfs_shortest_path_distance(demo_graph, '''G''', '''D''')) # returns 4
130
"""simple docstring""" from __future__ import annotations import collections import pprint from pathlib import Path def _snake_case ( _snake_case : str ) -> str: '''simple docstring''' return "".join(sorted(_snake_case ) ) def _snake_case ( _snake_case : str ) -> list[str]: '''simple docstring''' return word_by_signature[signature(_snake_case )] a = Path(__file__).parent.joinpath('''words.txt''').read_text(encoding='''utf-8''') a = sorted({word.strip().lower() for word in data.splitlines()}) a = collections.defaultdict(list) for word in word_list: word_by_signature[signature(word)].append(word) if __name__ == "__main__": a = {word: anagram(word) for word in word_list if len(anagram(word)) > 1} with open('''anagrams.txt''', '''w''') as file: file.write('''all_anagrams = \n ''') file.write(pprint.pformat(all_anagrams))
315
0
def A_ ( _lowerCAmelCase ) -> int: UpperCamelCase : Union[str, Any] = [[0 for _ in range(_snake_case )] for _ in range(m + 1 )] for i in range(m + 1 ): UpperCamelCase : Optional[Any] = 1 for n in range(m + 1 ): for k in range(1 , _snake_case ): memo[n][k] += memo[n][k - 1] if n - k > 0: memo[n][k] += memo[n - k - 1][k] return memo[m][m - 1] if __name__ == "__main__": import sys if len(sys.argv) == 1: try: __lowerCamelCase : Any = int(input("""Enter a number: """).strip()) print(partition(n)) except ValueError: print("""Please enter a number.""") else: try: __lowerCamelCase : Optional[Any] = int(sys.argv[1]) print(partition(n)) except ValueError: print("""Please pass a number.""")
52
"""simple docstring""" import logging import os import sys from dataclasses import dataclass, field from typing import Optional import torch from datasets import load_dataset from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor from torchvision.transforms.functional import InterpolationMode import transformers from transformers import ( HfArgumentParser, Trainer, TrainingArguments, ViTImageProcessor, ViTMAEConfig, ViTMAEForPreTraining, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version a = logging.getLogger(__name__) # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version('''4.31.0''') require_version('''datasets>=1.8.0''', '''To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt''') @dataclass class lowercase_ : '''simple docstring''' UpperCAmelCase : Optional[str] = field( default='''cifar10''' , metadata={'''help''': '''Name of a dataset from the datasets package'''} ) UpperCAmelCase : Optional[str] = field( default=__lowerCAmelCase , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} ) UpperCAmelCase : Optional[str] = field( default=__lowerCAmelCase , metadata={'''help''': '''The column name of the images in the files.'''} ) UpperCAmelCase : Optional[str] = field(default=__lowerCAmelCase , metadata={'''help''': '''A folder containing the training data.'''} ) UpperCAmelCase : Optional[str] = field(default=__lowerCAmelCase , metadata={'''help''': '''A folder containing the validation data.'''} ) UpperCAmelCase : Optional[float] = field( default=0.15 , metadata={'''help''': '''Percent to split off of train for validation.'''} ) UpperCAmelCase : Optional[int] = field( default=__lowerCAmelCase , metadata={ '''help''': ( '''For debugging purposes or quicker training, truncate the number of training examples to this ''' '''value if set.''' ) } , ) UpperCAmelCase : Optional[int] = field( default=__lowerCAmelCase , metadata={ '''help''': ( '''For debugging purposes or quicker training, truncate the number of evaluation examples to this ''' '''value if set.''' ) } , ) def lowerCAmelCase_ ( self : Dict ): _A = {} if self.train_dir is not None: _A = self.train_dir if self.validation_dir is not None: _A = self.validation_dir _A = data_files if data_files else None @dataclass class lowercase_ : '''simple docstring''' UpperCAmelCase : str = field( default=__lowerCAmelCase , metadata={ '''help''': ( '''The model checkpoint for weights initialization.Don\'t set if you want to train a model from scratch.''' ) } , ) UpperCAmelCase : Optional[str] = field( default=__lowerCAmelCase , metadata={'''help''': '''Pretrained config name or path if not the same as model_name_or_path'''} ) UpperCAmelCase : Optional[str] = field( default=__lowerCAmelCase , metadata={ '''help''': ( '''Override some existing default config settings when a model is trained from scratch. Example: ''' '''n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index''' ) } , ) UpperCAmelCase : Optional[str] = field( default=__lowerCAmelCase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from s3'''} ) UpperCAmelCase : str = field( default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , ) UpperCAmelCase : str = field(default=__lowerCAmelCase , metadata={'''help''': '''Name or path of preprocessor config.'''} ) UpperCAmelCase : bool = field( default=__lowerCAmelCase , metadata={ '''help''': ( '''Will use the token generated when running `huggingface-cli login` (necessary to use this script ''' '''with private models).''' ) } , ) UpperCAmelCase : float = field( default=0.75 , metadata={'''help''': '''The ratio of the number of masked tokens in the input sequence.'''} ) UpperCAmelCase : bool = field( default=__lowerCAmelCase , metadata={'''help''': '''Whether or not to train with normalized pixel values as target.'''} ) @dataclass class lowercase_ ( __lowerCAmelCase ): '''simple docstring''' UpperCAmelCase : float = field( default=1E-3 , metadata={'''help''': '''Base learning rate: absolute_lr = base_lr * total_batch_size / 256.'''} ) def _snake_case ( _snake_case : int ) -> Optional[int]: '''simple docstring''' _A = torch.stack([example['pixel_values'] for example in examples] ) return {"pixel_values": pixel_values} def _snake_case ( ) -> List[str]: '''simple docstring''' _A = HfArgumentParser((ModelArguments, DataTrainingArguments, CustomTrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. _A , _A , _A = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: _A , _A , _A = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry('run_mae' , _snake_case , _snake_case ) # Setup logging logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() _A = training_args.get_process_log_level() logger.setLevel(_snake_case ) transformers.utils.logging.set_verbosity(_snake_case ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}''' + F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' ) logger.info(F'''Training/evaluation parameters {training_args}''' ) # Detecting last checkpoint. _A = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: _A = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( F'''Output directory ({training_args.output_dir}) already exists and is not empty. ''' 'Use --overwrite_output_dir to overcome.' ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change ''' 'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' ) # Initialize our dataset. _A = load_dataset( data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) # If we don't have a validation split, split off a percentage of train as validation. _A = None if 'validation' in ds.keys() else data_args.train_val_split if isinstance(data_args.train_val_split , _snake_case ) and data_args.train_val_split > 0.0: _A = ds['train'].train_test_split(data_args.train_val_split ) _A = split['train'] _A = split['test'] # Load pretrained model and image processor # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. _A = { 'cache_dir': model_args.cache_dir, 'revision': model_args.model_revision, 'use_auth_token': True if model_args.use_auth_token else None, } if model_args.config_name: _A = ViTMAEConfig.from_pretrained(model_args.config_name , **_snake_case ) elif model_args.model_name_or_path: _A = ViTMAEConfig.from_pretrained(model_args.model_name_or_path , **_snake_case ) else: _A = ViTMAEConfig() logger.warning('You are instantiating a new config instance from scratch.' ) if model_args.config_overrides is not None: logger.info(F'''Overriding config: {model_args.config_overrides}''' ) config.update_from_string(model_args.config_overrides ) logger.info(F'''New config: {config}''' ) # adapt config config.update( { 'mask_ratio': model_args.mask_ratio, 'norm_pix_loss': model_args.norm_pix_loss, } ) # create image processor if model_args.image_processor_name: _A = ViTImageProcessor.from_pretrained(model_args.image_processor_name , **_snake_case ) elif model_args.model_name_or_path: _A = ViTImageProcessor.from_pretrained(model_args.model_name_or_path , **_snake_case ) else: _A = ViTImageProcessor() # create model if model_args.model_name_or_path: _A = ViTMAEForPreTraining.from_pretrained( model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=_snake_case , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) else: logger.info('Training new model from scratch' ) _A = ViTMAEForPreTraining(_snake_case ) if training_args.do_train: _A = ds['train'].column_names else: _A = ds['validation'].column_names if data_args.image_column_name is not None: _A = data_args.image_column_name elif "image" in column_names: _A = 'image' elif "img" in column_names: _A = 'img' else: _A = column_names[0] # transformations as done in original MAE paper # source: https://github.com/facebookresearch/mae/blob/main/main_pretrain.py if "shortest_edge" in image_processor.size: _A = image_processor.size['shortest_edge'] else: _A = (image_processor.size['height'], image_processor.size['width']) _A = Compose( [ Lambda(lambda _snake_case : img.convert('RGB' ) if img.mode != "RGB" else img ), RandomResizedCrop(_snake_case , scale=(0.2, 1.0) , interpolation=InterpolationMode.BICUBIC ), RandomHorizontalFlip(), ToTensor(), Normalize(mean=image_processor.image_mean , std=image_processor.image_std ), ] ) def preprocess_images(_snake_case : List[Any] ): _A = [transforms(_snake_case ) for image in examples[image_column_name]] return examples if training_args.do_train: if "train" not in ds: raise ValueError('--do_train requires a train dataset' ) if data_args.max_train_samples is not None: _A = ds['train'].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) ) # Set the training transforms ds["train"].set_transform(_snake_case ) if training_args.do_eval: if "validation" not in ds: raise ValueError('--do_eval requires a validation dataset' ) if data_args.max_eval_samples is not None: _A = ( ds['validation'].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) ) ) # Set the validation transforms ds["validation"].set_transform(_snake_case ) # Compute absolute learning rate _A = ( training_args.train_batch_size * training_args.gradient_accumulation_steps * training_args.world_size ) if training_args.base_learning_rate is not None: _A = training_args.base_learning_rate * total_train_batch_size / 2_56 # Initialize our trainer _A = Trainer( model=_snake_case , args=_snake_case , train_dataset=ds['train'] if training_args.do_train else None , eval_dataset=ds['validation'] if training_args.do_eval else None , tokenizer=_snake_case , data_collator=_snake_case , ) # Training if training_args.do_train: _A = None if training_args.resume_from_checkpoint is not None: _A = training_args.resume_from_checkpoint elif last_checkpoint is not None: _A = last_checkpoint _A = trainer.train(resume_from_checkpoint=_snake_case ) trainer.save_model() trainer.log_metrics('train' , train_result.metrics ) trainer.save_metrics('train' , train_result.metrics ) trainer.save_state() # Evaluation if training_args.do_eval: _A = trainer.evaluate() trainer.log_metrics('eval' , _snake_case ) trainer.save_metrics('eval' , _snake_case ) # Write model card and (optionally) push to hub _A = { 'tasks': 'masked-auto-encoding', 'dataset': data_args.dataset_name, 'tags': ['masked-auto-encoding'], } if training_args.push_to_hub: trainer.push_to_hub(**_snake_case ) else: trainer.create_model_card(**_snake_case ) def _snake_case ( _snake_case : List[str] ) -> Optional[Any]: '''simple docstring''' main() if __name__ == "__main__": main()
315
0
"""simple docstring""" import requests from bsa import BeautifulSoup def UpperCamelCase ( UpperCAmelCase = "https://www.worldometers.info/coronavirus" ) ->dict: """simple docstring""" a_ = BeautifulSoup(requests.get(_snake_case ).text , "html.parser" ) a_ = soup.findAll("h1" ) a_ = soup.findAll("div" , {"class": "maincounter-number"} ) keys += soup.findAll("span" , {"class": "panel-title"} ) values += soup.findAll("div" , {"class": "number-table-main"} ) return {key.text.strip(): value.text.strip() for key, value in zip(_snake_case , _snake_case )} if __name__ == "__main__": print('\033[1m' + 'COVID-19 Status of the World' + '\033[0m\n') for key, value in world_covidaa_stats().items(): print(F"""{key}\n{value}\n""")
243
"""simple docstring""" import logging import re import pytorch_quantization import pytorch_quantization.nn as quant_nn import torch from pytorch_quantization import calib from pytorch_quantization.tensor_quant import QuantDescriptor a = logging.getLogger(__name__) a = 50 # max width of layer names a = 70 # max width of quantizer names def _snake_case ( _snake_case : int ) -> List[Any]: '''simple docstring''' _A = parser.add_argument_group('quant_trainer arguments' ) group.add_argument('--wprec' , type=_snake_case , default=8 , help='weight precision' ) group.add_argument('--aprec' , type=_snake_case , default=8 , help='activation precision' ) group.add_argument('--quant-per-tensor' , action='store_true' , help='per tensor weight scaling' ) group.add_argument('--quant-disable' , action='store_true' , help='disable all quantizers' ) group.add_argument('--quant-disable-embeddings' , action='store_true' , help='disable all embeddings quantizers' ) group.add_argument('--quant-disable-keyword' , type=_snake_case , nargs='+' , help='disable quantizers by keyword' ) group.add_argument('--quant-disable-layer-module' , type=_snake_case , help='disable quantizers by keyword under layer.' ) group.add_argument('--quant-enable-layer-module' , type=_snake_case , help='enable quantizers by keyword under layer' ) group.add_argument('--calibrator' , default='max' , help='which quantization range calibrator to use' ) group.add_argument('--percentile' , default=_snake_case , type=_snake_case , help='percentile for PercentileCalibrator' ) group.add_argument('--fuse-qkv' , action='store_true' , help='use the same scale factor for qkv' ) group.add_argument('--clip-gelu' , metavar='N' , type=_snake_case , help='clip gelu output maximum value to N' ) group.add_argument( '--recalibrate-weights' , action='store_true' , help=( 'recalibrate weight amaxes by taking the max of the weights.' ' amaxes will be computed with the current quantization granularity (axis).' ) , ) def _snake_case ( _snake_case : Dict ) -> Optional[Any]: '''simple docstring''' if args.calibrator == "max": _A = 'max' elif args.calibrator == "percentile": if args.percentile is None: raise ValueError('Specify --percentile when using percentile calibrator' ) _A = 'histogram' elif args.calibrator == "mse": _A = 'histogram' else: raise ValueError(F'''Invalid calibrator {args.calibrator}''' ) _A = QuantDescriptor(num_bits=args.aprec , calib_method=_snake_case ) _A = QuantDescriptor(num_bits=args.wprec , axis=(None if args.quant_per_tensor else (0,)) ) quant_nn.QuantLinear.set_default_quant_desc_input(_snake_case ) quant_nn.QuantLinear.set_default_quant_desc_weight(_snake_case ) def _snake_case ( _snake_case : Union[str, Any] , _snake_case : List[Any] , _snake_case : Any=False , _snake_case : Union[str, Any]=False ) -> Optional[int]: '''simple docstring''' logger.info('Configuring Model for Quantization' ) logger.info(F'''using quantization package {pytorch_quantization.__file__}''' ) if not calib: if args.quant_disable_embeddings: set_quantizer_by_name(_snake_case , ['embeddings'] , which='weight' , _disabled=_snake_case ) if args.quant_disable: set_quantizer_by_name(_snake_case , [''] , _disabled=_snake_case ) if args.quant_disable_keyword: set_quantizer_by_name(_snake_case , args.quant_disable_keyword , _disabled=_snake_case ) if args.quant_disable_layer_module: set_quantizer_by_name(_snake_case , [R'layer.\d+.' + args.quant_disable_layer_module] , _disabled=_snake_case ) if args.quant_enable_layer_module: set_quantizer_by_name(_snake_case , [R'layer.\d+.' + args.quant_enable_layer_module] , _disabled=_snake_case ) if args.recalibrate_weights: recalibrate_weights(_snake_case ) if args.fuse_qkv: fuse_qkv(_snake_case , _snake_case ) if args.clip_gelu: clip_gelu(_snake_case , args.clip_gelu ) # if args.local_rank in [-1, 0] and not calib: print_quant_summary(_snake_case ) def _snake_case ( _snake_case : str ) -> Any: '''simple docstring''' logger.info('Enabling Calibration' ) for name, module in model.named_modules(): if name.endswith('_quantizer' ): if module._calibrator is not None: module.disable_quant() module.enable_calib() else: module.disable() logger.info(F'''{name:80}: {module}''' ) def _snake_case ( _snake_case : List[Any] , _snake_case : List[Any] ) -> str: '''simple docstring''' logger.info('Loading calibrated amax' ) for name, module in model.named_modules(): if name.endswith('_quantizer' ): if module._calibrator is not None: if isinstance(module._calibrator , calib.MaxCalibrator ): module.load_calib_amax() else: module.load_calib_amax('percentile' , percentile=args.percentile ) module.enable_quant() module.disable_calib() else: module.enable() model.cuda() print_quant_summary(_snake_case ) def _snake_case ( _snake_case : str , _snake_case : int ) -> str: '''simple docstring''' def fusea(_snake_case : int , _snake_case : str , _snake_case : Optional[Any] ): for mod in [qq, qk, qv]: if not hasattr(_snake_case , '_amax' ): print(' WARNING: NO AMAX BUFFER' ) return _A = qq._amax.detach().item() _A = qk._amax.detach().item() _A = qv._amax.detach().item() _A = max(_snake_case , _snake_case , _snake_case ) qq._amax.fill_(_snake_case ) qk._amax.fill_(_snake_case ) qv._amax.fill_(_snake_case ) logger.info(F''' q={q:5.2f} k={k:5.2f} v={v:5.2f} -> {amax:5.2f}''' ) for name, mod in model.named_modules(): if name.endswith('.attention.self' ): logger.info(F'''FUSE_QKV: {name:{name_width}}''' ) fusea(mod.matmul_q_input_quantizer , mod.matmul_k_input_quantizer , mod.matmul_v_input_quantizer ) if args.quant_per_tensor: fusea(mod.query._weight_quantizer , mod.key._weight_quantizer , mod.value._weight_quantizer ) def _snake_case ( _snake_case : int , _snake_case : str ) -> Union[str, Any]: '''simple docstring''' for name, mod in model.named_modules(): if name.endswith('.output.dense' ) and not name.endswith('attention.output.dense' ): _A = mod._input_quantizer._amax.data.detach().item() mod._input_quantizer._amax.data.detach().clamp_(max=_snake_case ) _A = mod._input_quantizer._amax.data.detach().item() logger.info(F'''CLIP_GELU: {name:{name_width}} amax: {amax_init:5.2f} -> {amax:5.2f}''' ) def _snake_case ( _snake_case : List[str] ) -> List[str]: '''simple docstring''' for name, mod in model.named_modules(): if hasattr(_snake_case , '_weight_quantizer' ) and mod._weight_quantizer.axis is not None: _A = mod.weight.shape[0] _A = mod._weight_quantizer._amax.detach() _A = torch.ones(_snake_case , dtype=amax.dtype , device=amax.device ) * amax print(F'''expanding {name} {amax} -> {mod._weight_quantizer._amax}''' ) def _snake_case ( _snake_case : Dict ) -> Tuple: '''simple docstring''' for name, mod in model.named_modules(): if hasattr(_snake_case , '_weight_quantizer' ): if not hasattr(mod.weight_quantizer , '_amax' ): print('RECALIB: {name:{name_width}} WARNING: NO AMAX BUFFER' ) continue # determine which axes to reduce across # e.g. a 4D tensor quantized per axis 0 should reduce over (1,2,3) _A = set() if mod._weight_quantizer.axis is None else set(mod._weight_quantizer.axis ) _A = set(range(len(mod.weight.size() ) ) ) - axis_set _A = pytorch_quantization.utils.reduce_amax(mod.weight , axis=_snake_case , keepdims=_snake_case ).detach() logger.info(F'''RECALIB: {name:{name_width}} {mod._weight_quantizer._amax.flatten()} -> {amax.flatten()}''' ) _A = amax def _snake_case ( _snake_case : Tuple , _snake_case : List[str]=25 , _snake_case : str=1_80 , _snake_case : int=None ) -> List[Any]: '''simple docstring''' if ignore is None: _A = [] elif not isinstance(_snake_case , _snake_case ): _A = [ignore] _A = 0 for name, mod in model.named_modules(): if not hasattr(_snake_case , 'weight' ): continue _A = max(_snake_case , len(_snake_case ) ) for name, mod in model.named_modules(): _A = getattr(_snake_case , '_input_quantizer' , _snake_case ) _A = getattr(_snake_case , '_weight_quantizer' , _snake_case ) if not hasattr(_snake_case , 'weight' ): continue if type(_snake_case ) in ignore: continue if [True for s in ignore if type(_snake_case ) is str and s in name]: continue _A = F'''Act:{input_q.extra_repr()}''' _A = F'''Wgt:{weight_q.extra_repr()}''' _A = F'''{name:{name_width}} {act_str} {wgt_str}''' if len(_snake_case ) <= line_width: logger.info(_snake_case ) else: logger.info(F'''{name:{name_width}} {act_str}''' ) logger.info(F'''{" ":{name_width}} {wgt_str}''' ) def _snake_case ( _snake_case : Dict ) -> int: '''simple docstring''' _A = 0 for name, mod in model.named_modules(): if isinstance(_snake_case , pytorch_quantization.nn.TensorQuantizer ): print(F'''{name:80} {mod}''' ) count += 1 print(F'''{count} TensorQuantizers found in model''' ) def _snake_case ( _snake_case : str , _snake_case : Dict , _snake_case : List[Any] , _snake_case : Union[str, Any] , _snake_case : Any ) -> int: '''simple docstring''' _A = getattr(_snake_case , _snake_case , _snake_case ) if quantizer_mod is not None: assert hasattr(_snake_case , _snake_case ) setattr(_snake_case , _snake_case , _snake_case ) else: logger.warning(F'''{name} has no {quantizer}''' ) def _snake_case ( _snake_case : Dict , _snake_case : Optional[int] , _snake_case : str="both" , **_snake_case : List[Any] ) -> str: '''simple docstring''' _A = F'''Warning: changing {which} quantizers of {name:{qname_width}}''' for k, v in kwargs.items(): s += F''' {k}={v}''' if which in ["input", "both"]: set_quantizer(_snake_case , _snake_case , '_input_quantizer' , _snake_case , _snake_case ) if which in ["weight", "both"]: set_quantizer(_snake_case , _snake_case , '_weight_quantizer' , _snake_case , _snake_case ) logger.info(_snake_case ) def _snake_case ( _snake_case : Any , _snake_case : int , **_snake_case : Dict ) -> List[str]: '''simple docstring''' for name, mod in model.named_modules(): if hasattr(_snake_case , '_input_quantizer' ) or hasattr(_snake_case , '_weight_quantizer' ): for n in names: if re.search(_snake_case , _snake_case ): set_quantizers(_snake_case , _snake_case , **_snake_case ) elif name.endswith('_quantizer' ): for n in names: if re.search(_snake_case , _snake_case ): _A = F'''Warning: changing {name:{name_width}}''' for k, v in kwargs.items(): s += F''' {k}={v}''' setattr(_snake_case , _snake_case , _snake_case ) logger.info(_snake_case )
315
0
import functools from typing import Any def lowerCAmelCase_ ( snake_case_,snake_case_ ): if not isinstance(_snake_case,_snake_case ) or len(_snake_case ) == 0: raise ValueError("""the string should be not empty string""" ) if not isinstance(_snake_case,_snake_case ) or not all( isinstance(_snake_case,_snake_case ) and len(_snake_case ) > 0 for item in words ): raise ValueError("""the words should be a list of non-empty strings""" ) # Build trie _A : int = {} _A : List[Any] = """WORD_KEEPER""" for word in words: _A : int = trie for c in word: if c not in trie_node: _A : str = {} _A : int = trie_node[c] _A : List[str] = True _A : Union[str, Any] = len(_snake_case ) # Dynamic programming method @functools.cache def is_breakable(snake_case_ ) -> bool: if index == len_string: return True _A : Optional[Any] = trie for i in range(_snake_case,_snake_case ): _A : str = trie_node.get(string[i],_snake_case ) if trie_node is None: return False if trie_node.get(_snake_case,_snake_case ) and is_breakable(i + 1 ): return True return False return is_breakable(0 ) if __name__ == "__main__": import doctest doctest.testmod()
26
"""simple docstring""" from scipy.stats import spearmanr import datasets a = ''' The Spearman rank-order correlation coefficient is a measure of the relationship between two datasets. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Positive correlations imply that as data in dataset x increases, so does data in dataset y. Negative correlations imply that as x increases, y decreases. Correlations of -1 or +1 imply an exact monotonic relationship. Unlike the Pearson correlation, the Spearman correlation does not assume that both datasets are normally distributed. The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Spearman correlation at least as extreme as the one computed from these datasets. The p-values are not entirely reliable but are probably reasonable for datasets larger than 500 or so. ''' a = ''' Args: predictions (`List[float]`): Predicted labels, as returned by a model. references (`List[float]`): Ground truth labels. return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns only the spearmanr score. Defaults to `False`. Returns: spearmanr (`float`): Spearman correlation coefficient. p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input. Examples: Example 1: >>> spearmanr_metric = datasets.load_metric("spearmanr") >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4]) >>> print(results) {\'spearmanr\': -0.7} Example 2: >>> spearmanr_metric = datasets.load_metric("spearmanr") >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], ... predictions=[10, 9, 2.5, 6, 4], ... return_pvalue=True) >>> print(results[\'spearmanr\']) -0.7 >>> print(round(results[\'spearmanr_pvalue\'], 2)) 0.19 ''' a = r'''\ @book{kokoska2000crc, title={CRC standard probability and statistics tables and formulae}, author={Kokoska, Stephen and Zwillinger, Daniel}, year={2000}, publisher={Crc Press} } @article{2020SciPy-NMeth, author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and Haberland, Matt and Reddy, Tyler and Cournapeau, David and Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and Bright, Jonathan and {van der Walt}, St{\'e}fan J. and Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and Kern, Robert and Larson, Eric and Carey, C J and Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and Harris, Charles R. and Archibald, Anne M. and Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and {van Mulbregt}, Paul and {SciPy 1.0 Contributors}}, title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific Computing in Python}}, journal = {Nature Methods}, year = {2020}, volume = {17}, pages = {261--272}, adsurl = {https://rdcu.be/b08Wh}, doi = {10.1038/s41592-019-0686-2}, } ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowercase_ ( datasets.Metric ): '''simple docstring''' def lowerCAmelCase_ ( self : Optional[int] ): return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { 'predictions': datasets.Value('float' ), 'references': datasets.Value('float' ), } ) , reference_urls=['https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html'] , ) def lowerCAmelCase_ ( self : Dict , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : int , _UpperCAmelCase : Optional[int]=False ): _A = spearmanr(_UpperCAmelCase , _UpperCAmelCase ) if return_pvalue: return {"spearmanr": results[0], "spearmanr_pvalue": results[1]} else: return {"spearmanr": results[0]}
315
0
from tempfile import TemporaryDirectory from unittest import TestCase from unittest.mock import MagicMock, patch from transformers import AutoModel, TFAutoModel from transformers.onnx import FeaturesManager from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch @require_torch @require_tf class snake_case__ (__lowerCAmelCase ): """simple docstring""" def SCREAMING_SNAKE_CASE__( self ) -> Dict: """simple docstring""" a__ : str = SMALL_MODEL_IDENTIFIER a__ : int = """pt""" a__ : List[str] = """tf""" def SCREAMING_SNAKE_CASE__( self , __lowercase ) -> Tuple: """simple docstring""" a__ : Union[str, Any] = AutoModel.from_pretrained(self.test_model ) model_pt.save_pretrained(_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__( self , __lowercase ) -> int: """simple docstring""" a__ : Tuple = TFAutoModel.from_pretrained(self.test_model , from_pt=_UpperCAmelCase ) model_tf.save_pretrained(_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__( self ) -> Optional[int]: """simple docstring""" a__ : List[str] = """mock_framework""" # Framework provided - return whatever the user provides a__ : List[Any] = FeaturesManager.determine_framework(self.test_model , _UpperCAmelCase ) self.assertEqual(_UpperCAmelCase , _UpperCAmelCase ) # Local checkpoint and framework provided - return provided framework # PyTorch checkpoint with TemporaryDirectory() as local_pt_ckpt: self._setup_pt_ckpt(_UpperCAmelCase ) a__ : Dict = FeaturesManager.determine_framework(_UpperCAmelCase , _UpperCAmelCase ) self.assertEqual(_UpperCAmelCase , _UpperCAmelCase ) # TensorFlow checkpoint with TemporaryDirectory() as local_tf_ckpt: self._setup_tf_ckpt(_UpperCAmelCase ) a__ : Optional[int] = FeaturesManager.determine_framework(_UpperCAmelCase , _UpperCAmelCase ) self.assertEqual(_UpperCAmelCase , _UpperCAmelCase ) def SCREAMING_SNAKE_CASE__( self ) -> Tuple: """simple docstring""" with TemporaryDirectory() as local_pt_ckpt: self._setup_pt_ckpt(_UpperCAmelCase ) a__ : Any = FeaturesManager.determine_framework(_UpperCAmelCase ) self.assertEqual(_UpperCAmelCase , self.framework_pt ) # TensorFlow checkpoint with TemporaryDirectory() as local_tf_ckpt: self._setup_tf_ckpt(_UpperCAmelCase ) a__ : Union[str, Any] = FeaturesManager.determine_framework(_UpperCAmelCase ) self.assertEqual(_UpperCAmelCase , self.framework_tf ) # Invalid local checkpoint with TemporaryDirectory() as local_invalid_ckpt: with self.assertRaises(_UpperCAmelCase ): a__ : int = FeaturesManager.determine_framework(_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__( self ) -> Tuple: """simple docstring""" a__ : int = MagicMock(return_value=_UpperCAmelCase ) with patch("""transformers.onnx.features.is_tf_available""" , _UpperCAmelCase ): a__ : Dict = FeaturesManager.determine_framework(self.test_model ) self.assertEqual(_UpperCAmelCase , self.framework_pt ) # PyTorch not in environment -> use TensorFlow a__ : Any = MagicMock(return_value=_UpperCAmelCase ) with patch("""transformers.onnx.features.is_torch_available""" , _UpperCAmelCase ): a__ : Optional[Any] = FeaturesManager.determine_framework(self.test_model ) self.assertEqual(_UpperCAmelCase , self.framework_tf ) # Both in environment -> use PyTorch a__ : str = MagicMock(return_value=_UpperCAmelCase ) a__ : Optional[int] = MagicMock(return_value=_UpperCAmelCase ) with patch("""transformers.onnx.features.is_tf_available""" , _UpperCAmelCase ), patch( """transformers.onnx.features.is_torch_available""" , _UpperCAmelCase ): a__ : Any = FeaturesManager.determine_framework(self.test_model ) self.assertEqual(_UpperCAmelCase , self.framework_pt ) # Both not in environment -> raise error a__ : Any = MagicMock(return_value=_UpperCAmelCase ) a__ : Tuple = MagicMock(return_value=_UpperCAmelCase ) with patch("""transformers.onnx.features.is_tf_available""" , _UpperCAmelCase ), patch( """transformers.onnx.features.is_torch_available""" , _UpperCAmelCase ): with self.assertRaises(_UpperCAmelCase ): a__ : Tuple = FeaturesManager.determine_framework(self.test_model )
170
"""simple docstring""" from collections.abc import Callable def _snake_case ( _snake_case : Callable[[float], float] , _snake_case : float , _snake_case : float ) -> float: '''simple docstring''' _A = a _A = b if function(_snake_case ) == 0: # one of the a or b is a root for the function return a elif function(_snake_case ) == 0: return b elif ( function(_snake_case ) * function(_snake_case ) > 0 ): # if none of these are root and they are both positive or negative, # then this algorithm can't find the root raise ValueError('could not find root in given interval.' ) else: _A = start + (end - start) / 2.0 while abs(start - mid ) > 10**-7: # until precisely equals to 10^-7 if function(_snake_case ) == 0: return mid elif function(_snake_case ) * function(_snake_case ) < 0: _A = mid else: _A = mid _A = start + (end - start) / 2.0 return mid def _snake_case ( _snake_case : float ) -> float: '''simple docstring''' return x**3 - 2 * x - 5 if __name__ == "__main__": print(bisection(f, 1, 1_000)) import doctest doctest.testmod()
315
0
'''simple docstring''' import torch import torch.nn as nn from transformers import CLIPConfig, CLIPVisionModel, PreTrainedModel from ...utils import logging UpperCAmelCase = logging.get_logger(__name__) def __UpperCamelCase ( lowercase__ : Dict, lowercase__ : int ): '''simple docstring''' __lowercase =nn.functional.normalize(_snake_case ) __lowercase =nn.functional.normalize(_snake_case ) return torch.mm(_snake_case, normalized_text_embeds.t() ) class lowerCAmelCase ( __lowerCAmelCase ): lowerCAmelCase_ = CLIPConfig lowerCAmelCase_ = ['''CLIPEncoderLayer'''] def __init__( self : Optional[int] , __lowercase : CLIPConfig ): """simple docstring""" super().__init__(_UpperCAmelCase ) __lowercase =CLIPVisionModel(config.vision_config ) __lowercase =nn.Linear(config.vision_config.hidden_size , config.projection_dim , bias=_UpperCAmelCase ) __lowercase =nn.Parameter(torch.ones(17 , config.projection_dim ) , requires_grad=_UpperCAmelCase ) __lowercase =nn.Parameter(torch.ones(3 , config.projection_dim ) , requires_grad=_UpperCAmelCase ) __lowercase =nn.Parameter(torch.ones(17 ) , requires_grad=_UpperCAmelCase ) __lowercase =nn.Parameter(torch.ones(3 ) , requires_grad=_UpperCAmelCase ) @torch.no_grad() def snake_case ( self : Optional[int] , __lowercase : Tuple , __lowercase : List[Any] ): """simple docstring""" __lowercase =self.vision_model(_UpperCAmelCase )[1] # pooled_output __lowercase =self.visual_projection(_UpperCAmelCase ) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 __lowercase =cosine_distance(_UpperCAmelCase , self.special_care_embeds ).cpu().float().numpy() __lowercase =cosine_distance(_UpperCAmelCase , self.concept_embeds ).cpu().float().numpy() __lowercase =[] __lowercase =image_embeds.shape[0] for i in range(_UpperCAmelCase ): __lowercase ={'special_scores': {}, 'special_care': [], 'concept_scores': {}, 'bad_concepts': []} # increase this value to create a stronger `nfsw` filter # at the cost of increasing the possibility of filtering benign images __lowercase =0.0 for concept_idx in range(len(special_cos_dist[0] ) ): __lowercase =special_cos_dist[i][concept_idx] __lowercase =self.special_care_embeds_weights[concept_idx].item() __lowercase =round(concept_cos - concept_threshold + adjustment , 3 ) if result_img["special_scores"][concept_idx] > 0: result_img["special_care"].append({concept_idx, result_img['special_scores'][concept_idx]} ) __lowercase =0.0_1 for concept_idx in range(len(cos_dist[0] ) ): __lowercase =cos_dist[i][concept_idx] __lowercase =self.concept_embeds_weights[concept_idx].item() __lowercase =round(concept_cos - concept_threshold + adjustment , 3 ) if result_img["concept_scores"][concept_idx] > 0: result_img["bad_concepts"].append(_UpperCAmelCase ) result.append(_UpperCAmelCase ) __lowercase =[len(res['bad_concepts'] ) > 0 for res in result] return images, has_nsfw_concepts @torch.no_grad() def snake_case ( self : Optional[Any] , __lowercase : torch.FloatTensor , __lowercase : torch.FloatTensor ): """simple docstring""" __lowercase =self.vision_model(_UpperCAmelCase )[1] # pooled_output __lowercase =self.visual_projection(_UpperCAmelCase ) __lowercase =cosine_distance(_UpperCAmelCase , self.special_care_embeds ) __lowercase =cosine_distance(_UpperCAmelCase , self.concept_embeds ) # increase this value to create a stronger `nsfw` filter # at the cost of increasing the possibility of filtering benign images __lowercase =0.0 __lowercase =special_cos_dist - self.special_care_embeds_weights + adjustment # special_scores = special_scores.round(decimals=3) __lowercase =torch.any(special_scores > 0 , dim=1 ) __lowercase =special_care * 0.0_1 __lowercase =special_adjustment.unsqueeze(1 ).expand(-1 , cos_dist.shape[1] ) __lowercase =(cos_dist - self.concept_embeds_weights) + special_adjustment # concept_scores = concept_scores.round(decimals=3) __lowercase =torch.any(concept_scores > 0 , dim=1 ) return images, has_nsfw_concepts
141
"""simple docstring""" import os import tempfile import unittest from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter from transformers.testing_utils import slow from transformers.utils import cached_property @unittest.skipUnless(os.path.exists(__lowerCAmelCase ) , '''Tatoeba directory does not exist.''' ) class lowercase_ ( unittest.TestCase ): '''simple docstring''' @cached_property def lowerCAmelCase_ ( self : Optional[Any] ): _A = tempfile.mkdtemp() return TatoebaConverter(save_dir=_UpperCAmelCase ) @slow def lowerCAmelCase_ ( self : Optional[int] ): self.resolver.convert_models(['heb-eng'] ) @slow def lowerCAmelCase_ ( self : Optional[Any] ): _A , _A = self.resolver.write_model_card('opus-mt-he-en' , dry_run=_UpperCAmelCase ) assert mmeta["long_pair"] == "heb-eng"
315
0
import argparse import logging import sys from unittest.mock import patch import run_glue_deebert from transformers.testing_utils import TestCasePlus, get_gpu_count, require_torch_non_multi_gpu, slow logging.basicConfig(level=logging.DEBUG) lowerCAmelCase_ = logging.getLogger() def snake_case( ) -> Optional[Any]: '''simple docstring''' lowercase : Optional[Any] = argparse.ArgumentParser() parser.add_argument('''-f''' ) lowercase : Dict = parser.parse_args() return args.f class _A ( __lowerCAmelCase ): def __a ( self : List[Any] ) -> Optional[int]: """simple docstring""" lowercase : List[Any] = logging.StreamHandler(sys.stdout ) logger.addHandler(_UpperCAmelCase ) def __a ( self : str , _A : Optional[int] ) -> Union[str, Any]: """simple docstring""" lowercase : List[str] = get_gpu_count() if n_gpu > 1: pass # XXX: doesn't quite work with n_gpu > 1 https://github.com/huggingface/transformers/issues/10560 # script = f"{self.examples_dir_str}/research_projects/deebert/run_glue_deebert.py" # distributed_args = f"-m torch.distributed.launch --nproc_per_node={n_gpu} {script}".split() # cmd = [sys.executable] + distributed_args + args # execute_subprocess_async(cmd, env=self.get_env()) # XXX: test the results - need to save them first into .json file else: args.insert(0 , '''run_glue_deebert.py''' ) with patch.object(_UpperCAmelCase , '''argv''' , _UpperCAmelCase ): lowercase : Optional[Any] = run_glue_deebert.main() for value in result.values(): self.assertGreaterEqual(_UpperCAmelCase , 0.666 ) @slow @require_torch_non_multi_gpu def __a ( self : Optional[Any] ) -> List[Any]: """simple docstring""" lowercase : List[str] = '''\n --model_type roberta\n --model_name_or_path roberta-base\n --task_name MRPC\n --do_train\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --max_seq_length 128\n --per_gpu_eval_batch_size=1\n --per_gpu_train_batch_size=8\n --learning_rate 2e-4\n --num_train_epochs 3\n --overwrite_output_dir\n --seed 42\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --save_steps 0\n --overwrite_cache\n --eval_after_first_stage\n '''.split() self.run_and_check(_UpperCAmelCase ) lowercase : Tuple = '''\n --model_type roberta\n --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --task_name MRPC\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --max_seq_length 128\n --eval_each_highway\n --eval_highway\n --overwrite_cache\n --per_gpu_eval_batch_size=1\n '''.split() self.run_and_check(_UpperCAmelCase ) lowercase : List[str] = '''\n --model_type roberta\n --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --task_name MRPC\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --max_seq_length 128\n --early_exit_entropy 0.1\n --eval_highway\n --overwrite_cache\n --per_gpu_eval_batch_size=1\n '''.split() self.run_and_check(_UpperCAmelCase )
308
"""simple docstring""" from ...utils import is_note_seq_available, is_transformers_available, is_torch_available from ...utils import OptionalDependencyNotAvailable try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .notes_encoder import SpectrogramNotesEncoder from .continous_encoder import SpectrogramContEncoder from .pipeline_spectrogram_diffusion import ( SpectrogramContEncoder, SpectrogramDiffusionPipeline, TaFilmDecoder, ) try: if not (is_transformers_available() and is_torch_available() and is_note_seq_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403 else: from .midi_utils import MidiProcessor
315
0
import argparse import re import numpy as np import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( SamConfig, SamImageProcessor, SamModel, SamProcessor, SamVisionConfig, ) UpperCAmelCase : Optional[int] = { "iou_prediction_head.layers.0": "iou_prediction_head.proj_in", "iou_prediction_head.layers.1": "iou_prediction_head.layers.0", "iou_prediction_head.layers.2": "iou_prediction_head.proj_out", "mask_decoder.output_upscaling.0": "mask_decoder.upscale_conv1", "mask_decoder.output_upscaling.1": "mask_decoder.upscale_layer_norm", "mask_decoder.output_upscaling.3": "mask_decoder.upscale_conv2", "mask_downscaling.0": "mask_embed.conv1", "mask_downscaling.1": "mask_embed.layer_norm1", "mask_downscaling.3": "mask_embed.conv2", "mask_downscaling.4": "mask_embed.layer_norm2", "mask_downscaling.6": "mask_embed.conv3", "point_embeddings": "point_embed", "pe_layer.positional_encoding_gaussian_matrix": "shared_embedding.positional_embedding", "image_encoder": "vision_encoder", "neck.0": "neck.conv1", "neck.1": "neck.layer_norm1", "neck.2": "neck.conv2", "neck.3": "neck.layer_norm2", "patch_embed.proj": "patch_embed.projection", ".norm": ".layer_norm", "blocks": "layers", } def __lowerCamelCase ( lowerCamelCase__ : Any ): '''simple docstring''' lowerCamelCase = {} state_dict.pop("""pixel_mean""" , _snake_case ) state_dict.pop("""pixel_std""" , _snake_case ) lowerCamelCase = R""".*.output_hypernetworks_mlps.(\d+).layers.(\d+).*""" for key, value in state_dict.items(): for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items(): if key_to_modify in key: lowerCamelCase = key.replace(_snake_case , _snake_case ) if re.match(_snake_case , _snake_case ): lowerCamelCase = int(re.match(_snake_case , _snake_case ).group(2 ) ) if layer_nb == 0: lowerCamelCase = key.replace("""layers.0""" , """proj_in""" ) elif layer_nb == 1: lowerCamelCase = key.replace("""layers.1""" , """layers.0""" ) elif layer_nb == 2: lowerCamelCase = key.replace("""layers.2""" , """proj_out""" ) lowerCamelCase = value lowerCamelCase = model_state_dict[ """prompt_encoder.shared_embedding.positional_embedding""" ] return model_state_dict def __lowerCamelCase ( lowerCamelCase__ : Any , lowerCamelCase__ : Dict , lowerCamelCase__ : List[str] , lowerCamelCase__ : str="ybelkada/segment-anything" ): '''simple docstring''' lowerCamelCase = hf_hub_download(_snake_case , f'checkpoints/{model_name}.pth' ) if "sam_vit_b" in model_name: lowerCamelCase = SamConfig() elif "sam_vit_l" in model_name: lowerCamelCase = SamVisionConfig( hidden_size=1024 , num_hidden_layers=24 , num_attention_heads=16 , global_attn_indexes=[5, 11, 17, 23] , ) lowerCamelCase = SamConfig( vision_config=_snake_case , ) elif "sam_vit_h" in model_name: lowerCamelCase = SamVisionConfig( hidden_size=1280 , num_hidden_layers=32 , num_attention_heads=16 , global_attn_indexes=[7, 15, 23, 31] , ) lowerCamelCase = SamConfig( vision_config=_snake_case , ) lowerCamelCase = torch.load(_snake_case , map_location="""cpu""" ) lowerCamelCase = replace_keys(_snake_case ) lowerCamelCase = SamImageProcessor() lowerCamelCase = SamProcessor(image_processor=_snake_case ) lowerCamelCase = SamModel(_snake_case ) hf_model.load_state_dict(_snake_case ) lowerCamelCase = hf_model.to("""cuda""" ) lowerCamelCase = """https://huggingface.co/ybelkada/segment-anything/resolve/main/assets/car.png""" lowerCamelCase = Image.open(requests.get(_snake_case , stream=_snake_case ).raw ).convert("""RGB""" ) lowerCamelCase = [[[400, 650]]] lowerCamelCase = [[1]] lowerCamelCase = processor(images=np.array(_snake_case ) , return_tensors="""pt""" ).to("""cuda""" ) with torch.no_grad(): lowerCamelCase = hf_model(**_snake_case ) lowerCamelCase = output.iou_scores.squeeze() if model_name == "sam_vit_h_4b8939": assert scores[-1].item() == 0.5_7_9_8_9_0_2_5_1_1_5_9_6_6_8 lowerCamelCase = processor( images=np.array(_snake_case ) , input_points=_snake_case , input_labels=_snake_case , return_tensors="""pt""" ).to("""cuda""" ) with torch.no_grad(): lowerCamelCase = hf_model(**_snake_case ) lowerCamelCase = output.iou_scores.squeeze() assert scores[-1].item() == 0.9_7_1_2_6_0_3_0_9_2_1_9_3_6_0_4 lowerCamelCase = ((75, 275, 1725, 850),) lowerCamelCase = processor(images=np.array(_snake_case ) , input_boxes=_snake_case , return_tensors="""pt""" ).to("""cuda""" ) with torch.no_grad(): lowerCamelCase = hf_model(**_snake_case ) lowerCamelCase = output.iou_scores.squeeze() assert scores[-1].item() == 0.8_6_8_6_0_1_5_6_0_5_9_2_6_5_1_4 # Test with 2 points and 1 image. lowerCamelCase = [[[400, 650], [800, 650]]] lowerCamelCase = [[1, 1]] lowerCamelCase = processor( images=np.array(_snake_case ) , input_points=_snake_case , input_labels=_snake_case , return_tensors="""pt""" ).to("""cuda""" ) with torch.no_grad(): lowerCamelCase = hf_model(**_snake_case ) lowerCamelCase = output.iou_scores.squeeze() assert scores[-1].item() == 0.9_9_3_6_0_4_7_7_9_2_4_3_4_6_9_2 if __name__ == "__main__": UpperCAmelCase : List[Any] = argparse.ArgumentParser() UpperCAmelCase : int = ["sam_vit_b_01ec64", "sam_vit_h_4b8939", "sam_vit_l_0b3195"] parser.add_argument( "--model_name", default="sam_vit_h_4b8939", choices=choices, type=str, help="Path to hf config.json of model to convert", ) parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument( "--push_to_hub", action="store_true", help="Whether to push the model and processor to the hub after converting", ) parser.add_argument( "--model_hub_id", default="ybelkada/segment-anything", choices=choices, type=str, help="Path to hf config.json of model to convert", ) UpperCAmelCase : int = parser.parse_args() convert_sam_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub, args.model_hub_id)
252
"""simple docstring""" from __future__ import annotations def _snake_case ( _snake_case : tuple[int, int] , _snake_case : int ) -> list[tuple[int, int]]: '''simple docstring''' _A , _A = position _A = [ (y + 1, x + 2), (y - 1, x + 2), (y + 1, x - 2), (y - 1, x - 2), (y + 2, x + 1), (y + 2, x - 1), (y - 2, x + 1), (y - 2, x - 1), ] _A = [] for position in positions: _A , _A = position if 0 <= y_test < n and 0 <= x_test < n: permissible_positions.append(_snake_case ) return permissible_positions def _snake_case ( _snake_case : list[list[int]] ) -> bool: '''simple docstring''' return not any(elem == 0 for row in board for elem in row ) def _snake_case ( _snake_case : list[list[int]] , _snake_case : tuple[int, int] , _snake_case : int ) -> bool: '''simple docstring''' if is_complete(_snake_case ): return True for position in get_valid_pos(_snake_case , len(_snake_case ) ): _A , _A = position if board[y][x] == 0: _A = curr + 1 if open_knight_tour_helper(_snake_case , _snake_case , curr + 1 ): return True _A = 0 return False def _snake_case ( _snake_case : int ) -> list[list[int]]: '''simple docstring''' _A = [[0 for i in range(_snake_case )] for j in range(_snake_case )] for i in range(_snake_case ): for j in range(_snake_case ): _A = 1 if open_knight_tour_helper(_snake_case , (i, j) , 1 ): return board _A = 0 _A = F'''Open Kight Tour cannot be performed on a board of size {n}''' raise ValueError(_snake_case ) if __name__ == "__main__": import doctest doctest.testmod()
315
0
import math from typing import Dict, Iterable, List, Optional, Tuple, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, get_image_size, is_torch_available, is_torch_tensor, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_torch_available(): import torch if is_vision_available(): import PIL __A = logging.get_logger(__name__) def lowerCAmelCase_ ( __a , __a , __a , __a ) -> Tuple[int, int]: """simple docstring""" def constraint_to_multiple_of(__a , __a , __a=0 , __a=None ): lowerCamelCase__: str =round(val / multiple ) * multiple if max_val is not None and x > max_val: lowerCamelCase__: Dict =math.floor(val / multiple ) * multiple if x < min_val: lowerCamelCase__: Optional[Any] =math.ceil(val / multiple ) * multiple return x lowerCamelCase__: str =(output_size, output_size) if isinstance(_snake_case , _snake_case ) else output_size lowerCamelCase__ , lowerCamelCase__: Optional[Any] =get_image_size(_snake_case ) lowerCamelCase__ , lowerCamelCase__: Tuple =output_size # determine new height and width lowerCamelCase__: List[Any] =output_height / input_height lowerCamelCase__: Optional[int] =output_width / input_width if keep_aspect_ratio: # scale as little as possible if abs(1 - scale_width ) < abs(1 - scale_height ): # fit width lowerCamelCase__: List[Any] =scale_width else: # fit height lowerCamelCase__: List[Any] =scale_height lowerCamelCase__: List[str] =constraint_to_multiple_of(scale_height * input_height , multiple=_snake_case ) lowerCamelCase__: Dict =constraint_to_multiple_of(scale_width * input_width , multiple=_snake_case ) return (new_height, new_width) class _SCREAMING_SNAKE_CASE ( __lowerCAmelCase ): '''simple docstring''' lowercase_ = ['''pixel_values'''] def __init__(self : str , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Dict[str, int] = None , UpperCAmelCase_ : PILImageResampling = PILImageResampling.BILINEAR , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : int = 1 , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Union[int, float] = 1 / 255 , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Optional[Union[float, List[float]]] = None , UpperCAmelCase_ : Optional[Union[float, List[float]]] = None , **UpperCAmelCase_ : Union[str, Any] , ) ->int: '''simple docstring''' super().__init__(**_UpperCAmelCase) lowerCamelCase__: int =size if size is not None else {"height": 384, "width": 384} lowerCamelCase__: Optional[Any] =get_size_dict(_UpperCAmelCase) lowerCamelCase__: List[Any] =do_resize lowerCamelCase__: Union[str, Any] =size lowerCamelCase__: Any =keep_aspect_ratio lowerCamelCase__: List[Any] =ensure_multiple_of lowerCamelCase__: Optional[Any] =resample lowerCamelCase__: Optional[int] =do_rescale lowerCamelCase__: Optional[int] =rescale_factor lowerCamelCase__: Tuple =do_normalize lowerCamelCase__: Any =image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN lowerCamelCase__: int =image_std if image_std is not None else IMAGENET_STANDARD_STD def SCREAMING_SNAKE_CASE_ (self : List[str] , UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : Dict[str, int] , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : int = 1 , UpperCAmelCase_ : PILImageResampling = PILImageResampling.BICUBIC , UpperCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase_ : List[Any] , ) ->Optional[Any]: '''simple docstring''' lowerCamelCase__: Dict =get_size_dict(_UpperCAmelCase) if "height" not in size or "width" not in size: raise ValueError(F"""The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}""") lowerCamelCase__: int =get_resize_output_image_size( _UpperCAmelCase , output_size=(size["height"], size["width"]) , keep_aspect_ratio=_UpperCAmelCase , multiple=_UpperCAmelCase , ) return resize(_UpperCAmelCase , size=_UpperCAmelCase , resample=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase) def SCREAMING_SNAKE_CASE_ (self : List[Any] , UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : Union[int, float] , UpperCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase_ : Any , ) ->str: '''simple docstring''' return rescale(_UpperCAmelCase , scale=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase) def SCREAMING_SNAKE_CASE_ (self : Any , UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : Union[float, List[float]] , UpperCAmelCase_ : Union[float, List[float]] , UpperCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase_ : List[str] , ) ->Union[str, Any]: '''simple docstring''' return normalize(_UpperCAmelCase , mean=_UpperCAmelCase , std=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase) def SCREAMING_SNAKE_CASE_ (self : Optional[int] , UpperCAmelCase_ : ImageInput , UpperCAmelCase_ : bool = None , UpperCAmelCase_ : int = None , UpperCAmelCase_ : bool = None , UpperCAmelCase_ : int = None , UpperCAmelCase_ : PILImageResampling = None , UpperCAmelCase_ : bool = None , UpperCAmelCase_ : float = None , UpperCAmelCase_ : bool = None , UpperCAmelCase_ : Optional[Union[float, List[float]]] = None , UpperCAmelCase_ : Optional[Union[float, List[float]]] = None , UpperCAmelCase_ : Optional[Union[str, TensorType]] = None , UpperCAmelCase_ : ChannelDimension = ChannelDimension.FIRST , **UpperCAmelCase_ : Any , ) ->Dict: '''simple docstring''' lowerCamelCase__: Optional[int] =do_resize if do_resize is not None else self.do_resize lowerCamelCase__: int =size if size is not None else self.size lowerCamelCase__: Tuple =get_size_dict(_UpperCAmelCase) lowerCamelCase__: List[Any] =keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio lowerCamelCase__: Optional[Any] =ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of lowerCamelCase__: str =resample if resample is not None else self.resample lowerCamelCase__: Optional[int] =do_rescale if do_rescale is not None else self.do_rescale lowerCamelCase__: List[str] =rescale_factor if rescale_factor is not None else self.rescale_factor lowerCamelCase__: List[Any] =do_normalize if do_normalize is not None else self.do_normalize lowerCamelCase__: Union[str, Any] =image_mean if image_mean is not None else self.image_mean lowerCamelCase__: List[Any] =image_std if image_std is not None else self.image_std lowerCamelCase__: int =make_list_of_images(_UpperCAmelCase) if not valid_images(_UpperCAmelCase): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray.") if do_resize and size is None or resample is None: raise ValueError("Size and resample must be specified if do_resize is True.") if do_rescale and rescale_factor is None: raise ValueError("Rescale factor must be specified if do_rescale is True.") if do_normalize and (image_mean is None or image_std is None): raise ValueError("Image mean and std must be specified if do_normalize is True.") # All transformations expect numpy arrays. lowerCamelCase__: Optional[int] =[to_numpy_array(_UpperCAmelCase) for image in images] if do_resize: lowerCamelCase__: Tuple =[self.resize(image=_UpperCAmelCase , size=_UpperCAmelCase , resample=_UpperCAmelCase) for image in images] if do_rescale: lowerCamelCase__: List[Any] =[self.rescale(image=_UpperCAmelCase , scale=_UpperCAmelCase) for image in images] if do_normalize: lowerCamelCase__: Dict =[self.normalize(image=_UpperCAmelCase , mean=_UpperCAmelCase , std=_UpperCAmelCase) for image in images] lowerCamelCase__: Optional[int] =[to_channel_dimension_format(_UpperCAmelCase , _UpperCAmelCase) for image in images] lowerCamelCase__: Optional[int] ={"pixel_values": images} return BatchFeature(data=_UpperCAmelCase , tensor_type=_UpperCAmelCase) def SCREAMING_SNAKE_CASE_ (self : List[Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : List[Tuple] = None) ->Optional[Any]: '''simple docstring''' lowerCamelCase__: Optional[Any] =outputs.logits # Resize logits and compute semantic segmentation maps if target_sizes is not None: if len(_UpperCAmelCase) != len(_UpperCAmelCase): raise ValueError( "Make sure that you pass in as many target sizes as the batch dimension of the logits") if is_torch_tensor(_UpperCAmelCase): lowerCamelCase__: str =target_sizes.numpy() lowerCamelCase__: int =[] for idx in range(len(_UpperCAmelCase)): lowerCamelCase__: List[Any] =torch.nn.functional.interpolate( logits[idx].unsqueeze(dim=0) , size=target_sizes[idx] , mode="bilinear" , align_corners=_UpperCAmelCase) lowerCamelCase__: Tuple =resized_logits[0].argmax(dim=0) semantic_segmentation.append(_UpperCAmelCase) else: lowerCamelCase__: Union[str, Any] =logits.argmax(dim=1) lowerCamelCase__: str =[semantic_segmentation[i] for i in range(semantic_segmentation.shape[0])] return semantic_segmentation
10
"""simple docstring""" import math import time from transformers import Trainer, is_torch_tpu_available from transformers.trainer_utils import PredictionOutput, speed_metrics if is_torch_tpu_available(check_device=False): import torch_xla.core.xla_model as xm import torch_xla.debug.metrics as met class lowercase_ ( __lowerCAmelCase ): '''simple docstring''' def __init__( self : int , *_UpperCAmelCase : Dict , _UpperCAmelCase : Optional[int]=None , _UpperCAmelCase : str=None , **_UpperCAmelCase : List[Any] ): super().__init__(*_UpperCAmelCase , **_UpperCAmelCase ) _A = eval_examples _A = post_process_function def lowerCAmelCase_ ( self : Tuple , _UpperCAmelCase : Optional[Any]=None , _UpperCAmelCase : str=None , _UpperCAmelCase : List[Any]=None , _UpperCAmelCase : str = "eval" ): _A = self.eval_dataset if eval_dataset is None else eval_dataset _A = self.get_eval_dataloader(_UpperCAmelCase ) _A = self.eval_examples if eval_examples is None else eval_examples # Temporarily disable metric computation, we will do it in the loop here. _A = self.compute_metrics _A = None _A = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop _A = time.time() try: _A = eval_loop( _UpperCAmelCase , description='Evaluation' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=_UpperCAmelCase , metric_key_prefix=_UpperCAmelCase , ) finally: _A = compute_metrics _A = self.args.eval_batch_size * self.args.world_size if F'''{metric_key_prefix}_jit_compilation_time''' in output.metrics: start_time += output.metrics[F'''{metric_key_prefix}_jit_compilation_time'''] output.metrics.update( speed_metrics( _UpperCAmelCase , _UpperCAmelCase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) ) if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save: # Only the main node write the results by default _A = self.post_process_function(_UpperCAmelCase , _UpperCAmelCase , output.predictions ) _A = self.compute_metrics(_UpperCAmelCase ) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys() ): if not key.startswith(F'''{metric_key_prefix}_''' ): _A = metrics.pop(_UpperCAmelCase ) metrics.update(output.metrics ) else: _A = output.metrics if self.args.should_log: # Only the main node log the results by default self.log(_UpperCAmelCase ) if self.args.tpu_metrics_debug or self.args.debug: # tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.) xm.master_print(met.metrics_report() ) _A = self.callback_handler.on_evaluate(self.args , self.state , self.control , _UpperCAmelCase ) return metrics def lowerCAmelCase_ ( self : List[Any] , _UpperCAmelCase : Any , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Optional[int]=None , _UpperCAmelCase : str = "test" ): _A = self.get_test_dataloader(_UpperCAmelCase ) # Temporarily disable metric computation, we will do it in the loop here. _A = self.compute_metrics _A = None _A = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop _A = time.time() try: _A = eval_loop( _UpperCAmelCase , description='Prediction' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=_UpperCAmelCase , metric_key_prefix=_UpperCAmelCase , ) finally: _A = compute_metrics _A = self.args.eval_batch_size * self.args.world_size if F'''{metric_key_prefix}_jit_compilation_time''' in output.metrics: start_time += output.metrics[F'''{metric_key_prefix}_jit_compilation_time'''] output.metrics.update( speed_metrics( _UpperCAmelCase , _UpperCAmelCase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) ) if self.post_process_function is None or self.compute_metrics is None: return output _A = self.post_process_function(_UpperCAmelCase , _UpperCAmelCase , output.predictions , 'predict' ) _A = self.compute_metrics(_UpperCAmelCase ) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys() ): if not key.startswith(F'''{metric_key_prefix}_''' ): _A = metrics.pop(_UpperCAmelCase ) metrics.update(output.metrics ) return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=_UpperCAmelCase )
315
0
import glob import os import random from string import ascii_lowercase, digits import cva __UpperCamelCase : str = "" __UpperCamelCase : List[str] = "" __UpperCamelCase : Union[str, Any] = "" __UpperCamelCase : Optional[Any] = 1 # (0 is vertical, 1 is horizontal) def _a ( ): """simple docstring""" UpperCamelCase__ , UpperCamelCase__ : Optional[Any] = get_dataset(_snake_case , _snake_case ) print('''Processing...''' ) UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ : int = update_image_and_anno(_snake_case , _snake_case , _snake_case ) for index, image in enumerate(_snake_case ): # Get random string code: '7b7ad245cdff75241935e4dd860f3bad' UpperCamelCase__ : Tuple = random_chars(32 ) UpperCamelCase__ : List[str] = paths[index].split(os.sep )[-1].rsplit('''.''' , 1 )[0] UpperCamelCase__ : Optional[int] = F"{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}" cva.imwrite(F"/{file_root}.jpg" , _snake_case , [cva.IMWRITE_JPEG_QUALITY, 85] ) print(F"Success {index+1}/{len(_snake_case )} with {file_name}" ) UpperCamelCase__ : Union[str, Any] = [] for anno in new_annos[index]: UpperCamelCase__ : Dict = F"{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}" annos_list.append(_snake_case ) with open(F"/{file_root}.txt" , '''w''' ) as outfile: outfile.write('''\n'''.join(line for line in annos_list ) ) def _a ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : str ): """simple docstring""" UpperCamelCase__ : Optional[int] = [] UpperCamelCase__ : int = [] for label_file in glob.glob(os.path.join(_snake_case , '''*.txt''' ) ): UpperCamelCase__ : List[Any] = label_file.split(os.sep )[-1].rsplit('''.''' , 1 )[0] with open(_snake_case ) as in_file: UpperCamelCase__ : str = in_file.readlines() UpperCamelCase__ : Optional[int] = os.path.join(_snake_case , F"{label_name}.jpg" ) UpperCamelCase__ : int = [] for obj_list in obj_lists: UpperCamelCase__ : Union[str, Any] = obj_list.rstrip('''\n''' ).split(''' ''' ) boxes.append( [ int(obj[0] ), float(obj[1] ), float(obj[2] ), float(obj[3] ), float(obj[4] ), ] ) if not boxes: continue img_paths.append(_snake_case ) labels.append(_snake_case ) return img_paths, labels def _a ( SCREAMING_SNAKE_CASE : list , SCREAMING_SNAKE_CASE : list , SCREAMING_SNAKE_CASE : int = 1 ): """simple docstring""" UpperCamelCase__ : int = [] UpperCamelCase__ : str = [] UpperCamelCase__ : Dict = [] for idx in range(len(_snake_case ) ): UpperCamelCase__ : Any = [] UpperCamelCase__ : Any = img_list[idx] path_list.append(_snake_case ) UpperCamelCase__ : Any = anno_list[idx] UpperCamelCase__ : Optional[Any] = cva.imread(_snake_case ) if flip_type == 1: UpperCamelCase__ : Optional[Any] = cva.flip(_snake_case , _snake_case ) for bbox in img_annos: UpperCamelCase__ : List[Any] = 1 - bbox[1] new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] ) elif flip_type == 0: UpperCamelCase__ : Any = cva.flip(_snake_case , _snake_case ) for bbox in img_annos: UpperCamelCase__ : str = 1 - bbox[2] new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] ) new_annos_lists.append(_snake_case ) new_imgs_list.append(_snake_case ) return new_imgs_list, new_annos_lists, path_list def _a ( SCREAMING_SNAKE_CASE : int = 32 ): """simple docstring""" assert number_char > 1, "The number of character should greater than 1" UpperCamelCase__ : str = ascii_lowercase + digits return "".join(random.choice(_snake_case ) for _ in range(_snake_case ) ) if __name__ == "__main__": main() print("DONE ✅")
146
"""simple docstring""" def _snake_case ( _snake_case : int , _snake_case : int ) -> bool: '''simple docstring''' return numa ^ numa < 0 if __name__ == "__main__": import doctest doctest.testmod()
315
0
"""simple docstring""" import json import os import subprocess import unittest from ast import literal_eval import pytest from parameterized import parameterized, parameterized_class from . import is_sagemaker_available if is_sagemaker_available(): from sagemaker import Session, TrainingJobAnalytics from sagemaker.huggingface import HuggingFace @pytest.mark.skipif( literal_eval(os.getenv("TEST_SAGEMAKER" , "False" ) ) is not True , reason="Skipping test because should only be run when releasing minor transformers version" , ) @pytest.mark.usefixtures("sm_env" ) @parameterized_class( [ { "framework": "pytorch", "script": "run_glue_model_parallelism.py", "model_name_or_path": "roberta-large", "instance_type": "ml.p3dn.24xlarge", "results": {"train_runtime": 16_00, "eval_accuracy": 0.3, "eval_loss": 1.2}, }, { "framework": "pytorch", "script": "run_glue.py", "model_name_or_path": "roberta-large", "instance_type": "ml.p3dn.24xlarge", "results": {"train_runtime": 16_00, "eval_accuracy": 0.3, "eval_loss": 1.2}, }, ] ) class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): """simple docstring""" def lowercase__ ( self ): """simple docstring""" if self.framework == "pytorch": subprocess.run( f"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding="utf-8" , check=_UpperCAmelCase , ) assert hasattr(self , "env" ) def lowercase__ ( self , snake_case__ ): """simple docstring""" lowerCAmelCase : Any = { "enabled": True, "processes_per_host": 8, } lowerCAmelCase : Any = { "enabled": True, "parameters": { "microbatches": 4, "placement_strategy": "spread", "pipeline": "interleaved", "optimize": "speed", "partitions": 4, "ddp": True, }, } lowerCAmelCase : Optional[int] = {"smdistributed": {"modelparallel": smp_options}, "mpi": mpi_options} lowerCAmelCase : str = "trainer" if self.script == "run_glue.py" else "smtrainer" # creates estimator return HuggingFace( entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=f"""{self.env.base_job_name}-{instance_count}-smp-{name_extension}""" , instance_count=_UpperCAmelCase , instance_type=self.instance_type , debugger_hook_config=_UpperCAmelCase , hyperparameters={ **self.env.hyperparameters, "model_name_or_path": self.model_name_or_path, "max_steps": 500, } , metric_definitions=self.env.metric_definitions , distribution=_UpperCAmelCase , py_version="py36" , ) def lowercase__ ( self , snake_case__ ): """simple docstring""" TrainingJobAnalytics(_UpperCAmelCase ).export_csv(f"""{self.env.test_path}/{job_name}_metrics.csv""" ) @parameterized.expand([(1,)] ) def lowercase__ ( self , snake_case__ ): """simple docstring""" lowerCAmelCase : Optional[Any] = self.create_estimator(_UpperCAmelCase ) # run training estimator.fit() # result dataframe lowerCAmelCase : Optional[Any] = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe() # extract kpis lowerCAmelCase : int = list(result_metrics_df[result_metrics_df.metric_name == "eval_accuracy"]["value"] ) lowerCAmelCase : List[str] = list(result_metrics_df[result_metrics_df.metric_name == "eval_loss"]["value"] ) # get train time from SageMaker job, this includes starting, preprocessing, stopping lowerCAmelCase : Union[str, Any] = ( Session().describe_training_job(estimator.latest_training_job.name ).get("TrainingTimeInSeconds" , 999_999 ) ) # assert kpis assert train_runtime <= self.results["train_runtime"] assert all(t >= self.results["eval_accuracy"] for t in eval_accuracy ) assert all(t <= self.results["eval_loss"] for t in eval_loss ) # dump tests result into json file to share in PR with open(f"""{estimator.latest_training_job.name}.json""" , "w" ) as outfile: json.dump({"train_time": train_runtime, "eval_accuracy": eval_accuracy, "eval_loss": eval_loss} , _UpperCAmelCase )
108
"""simple docstring""" import unittest from .lib import ( Matrix, Vector, axpy, square_zero_matrix, unit_basis_vector, zero_vector, ) class lowercase_ ( unittest.TestCase ): '''simple docstring''' def lowerCAmelCase_ ( self : int ): _A = Vector([1, 2, 3] ) self.assertEqual(x.component(0 ) , 1 ) self.assertEqual(x.component(2 ) , 3 ) _A = Vector() def lowerCAmelCase_ ( self : Optional[Any] ): _A = Vector([0, 0, 0, 0, 0, 1] ) self.assertEqual(str(_UpperCAmelCase ) , '(0,0,0,0,0,1)' ) def lowerCAmelCase_ ( self : Optional[int] ): _A = Vector([1, 2, 3, 4] ) self.assertEqual(len(_UpperCAmelCase ) , 4 ) def lowerCAmelCase_ ( self : int ): _A = Vector([1, 2] ) _A = Vector([1, 2, 3, 4, 5] ) _A = Vector([0, 0, 0, 0, 0, 0, 0, 0, 0, 0] ) _A = Vector([1, -1, 1, -1, 2, -3, 4, -5] ) self.assertAlmostEqual(x.euclidean_length() , 2.236 , 3 ) self.assertAlmostEqual(y.euclidean_length() , 7.416 , 3 ) self.assertEqual(z.euclidean_length() , 0 ) self.assertAlmostEqual(w.euclidean_length() , 7.616 , 3 ) def lowerCAmelCase_ ( self : str ): _A = Vector([1, 2, 3] ) _A = Vector([1, 1, 1] ) self.assertEqual((x + y).component(0 ) , 2 ) self.assertEqual((x + y).component(1 ) , 3 ) self.assertEqual((x + y).component(2 ) , 4 ) def lowerCAmelCase_ ( self : Optional[Any] ): _A = Vector([1, 2, 3] ) _A = Vector([1, 1, 1] ) self.assertEqual((x - y).component(0 ) , 0 ) self.assertEqual((x - y).component(1 ) , 1 ) self.assertEqual((x - y).component(2 ) , 2 ) def lowerCAmelCase_ ( self : int ): _A = Vector([1, 2, 3] ) _A = Vector([2, -1, 4] ) # for test of dot product _A = Vector([1, -2, -1] ) self.assertEqual(str(x * 3.0 ) , '(3.0,6.0,9.0)' ) self.assertEqual((a * b) , 0 ) def lowerCAmelCase_ ( self : Dict ): self.assertEqual(str(zero_vector(10 ) ).count('0' ) , 10 ) def lowerCAmelCase_ ( self : Tuple ): self.assertEqual(str(unit_basis_vector(3 , 1 ) ) , '(0,1,0)' ) def lowerCAmelCase_ ( self : Union[str, Any] ): _A = Vector([1, 2, 3] ) _A = Vector([1, 0, 1] ) self.assertEqual(str(axpy(2 , _UpperCAmelCase , _UpperCAmelCase ) ) , '(3,4,7)' ) def lowerCAmelCase_ ( self : Union[str, Any] ): _A = Vector([1, 0, 0, 0, 0, 0] ) _A = x.copy() self.assertEqual(str(_UpperCAmelCase ) , str(_UpperCAmelCase ) ) def lowerCAmelCase_ ( self : Optional[Any] ): _A = Vector([1, 0, 0] ) x.change_component(0 , 0 ) x.change_component(1 , 1 ) self.assertEqual(str(_UpperCAmelCase ) , '(0,1,0)' ) def lowerCAmelCase_ ( self : Any ): _A = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) self.assertEqual('|1,2,3|\n|2,4,5|\n|6,7,8|\n' , str(_UpperCAmelCase ) ) def lowerCAmelCase_ ( self : Any ): _A = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) _A = [[-3, -14, -10], [-5, -10, -5], [-2, -1, 0]] for x in range(a.height() ): for y in range(a.width() ): self.assertEqual(minors[x][y] , a.minor(_UpperCAmelCase , _UpperCAmelCase ) ) def lowerCAmelCase_ ( self : Any ): _A = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) _A = [[-3, 14, -10], [5, -10, 5], [-2, 1, 0]] for x in range(a.height() ): for y in range(a.width() ): self.assertEqual(cofactors[x][y] , a.cofactor(_UpperCAmelCase , _UpperCAmelCase ) ) def lowerCAmelCase_ ( self : str ): _A = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) self.assertEqual(-5 , a.determinant() ) def lowerCAmelCase_ ( self : Tuple ): _A = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]] , 3 , 3 ) _A = Vector([1, 2, 3] ) self.assertEqual('(14,32,50)' , str(a * x ) ) self.assertEqual('|2,4,6|\n|8,10,12|\n|14,16,18|\n' , str(a * 2 ) ) def lowerCAmelCase_ ( self : Any ): _A = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) a.change_component(0 , 2 , 5 ) self.assertEqual('|1,2,5|\n|2,4,5|\n|6,7,8|\n' , str(_UpperCAmelCase ) ) def lowerCAmelCase_ ( self : List[Any] ): _A = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) self.assertEqual(7 , a.component(2 , 1 ) , 0.01 ) def lowerCAmelCase_ ( self : Tuple ): _A = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) _A = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 ) self.assertEqual('|2,4,10|\n|4,8,10|\n|12,14,18|\n' , str(a + b ) ) def lowerCAmelCase_ ( self : Optional[Any] ): _A = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) _A = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 ) self.assertEqual('|0,0,-4|\n|0,0,0|\n|0,0,-2|\n' , str(a - b ) ) def lowerCAmelCase_ ( self : int ): self.assertEqual( '|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n' , str(square_zero_matrix(5 ) ) , ) if __name__ == "__main__": unittest.main()
315
0
import pytest import requests from datasets.utils.file_utils import http_head from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline @pytest.mark.integration def __lowerCamelCase ( ): """simple docstring""" with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ): with pytest.raises(_snake_case ): requests.request("GET" , "https://huggingface.co" ) with pytest.raises(requests.exceptions.ConnectTimeout ): requests.request("GET" , "https://huggingface.co" , timeout=1.0 ) @pytest.mark.integration def __lowerCamelCase ( ): """simple docstring""" with offline(OfflineSimulationMode.CONNECTION_FAILS ): with pytest.raises(requests.exceptions.ConnectionError ): requests.request("GET" , "https://huggingface.co" ) def __lowerCamelCase ( ): """simple docstring""" with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ): with pytest.raises(_snake_case ): http_head("https://huggingface.co" )
130
"""simple docstring""" import warnings from ...configuration_utils import PretrainedConfig from ...utils import logging a = logging.get_logger(__name__) a = { '''xlnet-base-cased''': '''https://huggingface.co/xlnet-base-cased/resolve/main/config.json''', '''xlnet-large-cased''': '''https://huggingface.co/xlnet-large-cased/resolve/main/config.json''', } class lowercase_ ( __lowerCAmelCase ): '''simple docstring''' UpperCAmelCase : int = '''xlnet''' UpperCAmelCase : List[Any] = ['''mems'''] UpperCAmelCase : Any = { '''n_token''': '''vocab_size''', # Backward compatibility '''hidden_size''': '''d_model''', '''num_attention_heads''': '''n_head''', '''num_hidden_layers''': '''n_layer''', } def __init__( self : Union[str, Any] , _UpperCAmelCase : Dict=32_000 , _UpperCAmelCase : List[str]=1_024 , _UpperCAmelCase : Any=24 , _UpperCAmelCase : Union[str, Any]=16 , _UpperCAmelCase : Union[str, Any]=4_096 , _UpperCAmelCase : Tuple="gelu" , _UpperCAmelCase : Any=True , _UpperCAmelCase : str="bi" , _UpperCAmelCase : int=0.02 , _UpperCAmelCase : Optional[Any]=1E-1_2 , _UpperCAmelCase : Optional[int]=0.1 , _UpperCAmelCase : Any=512 , _UpperCAmelCase : Dict=None , _UpperCAmelCase : int=True , _UpperCAmelCase : int=False , _UpperCAmelCase : Optional[Any]=False , _UpperCAmelCase : int=-1 , _UpperCAmelCase : Optional[int]=False , _UpperCAmelCase : Union[str, Any]="last" , _UpperCAmelCase : int=True , _UpperCAmelCase : str="tanh" , _UpperCAmelCase : str=0.1 , _UpperCAmelCase : Dict=5 , _UpperCAmelCase : Optional[Any]=5 , _UpperCAmelCase : Union[str, Any]=5 , _UpperCAmelCase : List[str]=1 , _UpperCAmelCase : Dict=2 , **_UpperCAmelCase : int , ): _A = vocab_size _A = d_model _A = n_layer _A = n_head if d_model % n_head != 0: raise ValueError(F'''\'d_model % n_head\' ({d_model % n_head}) should be equal to 0''' ) if "d_head" in kwargs: if kwargs["d_head"] != d_model // n_head: raise ValueError( F'''`d_head` ({kwargs["d_head"]}) should be equal to `d_model // n_head` ({d_model // n_head})''' ) _A = d_model // n_head _A = ff_activation _A = d_inner _A = untie_r _A = attn_type _A = initializer_range _A = layer_norm_eps _A = dropout _A = mem_len _A = reuse_len _A = bi_data _A = clamp_len _A = same_length _A = summary_type _A = summary_use_proj _A = summary_activation _A = summary_last_dropout _A = start_n_top _A = end_n_top _A = bos_token_id _A = pad_token_id _A = eos_token_id if "use_cache" in kwargs: warnings.warn( 'The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems_eval`' ' instead.' , _UpperCAmelCase , ) _A = kwargs['use_cache'] _A = use_mems_eval _A = use_mems_train super().__init__(pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase ) @property def lowerCAmelCase_ ( self : Tuple ): logger.info(F'''The model {self.model_type} is one of the few models that has no sequence length limit.''' ) return -1 @max_position_embeddings.setter def lowerCAmelCase_ ( self : Optional[Any] , _UpperCAmelCase : Optional[Any] ): # Message copied from Transformer-XL documentation raise NotImplementedError( F'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
315
0
from __future__ import annotations from collections.abc import Iterator from typing import Generic, TypeVar __lowerCamelCase : List[str] = TypeVar("""T""") class A__ ( Generic[T] ): def __init__( self , A_ ): '''simple docstring''' UpperCamelCase : List[str] = data UpperCamelCase : Tuple = None def __str__( self ): '''simple docstring''' return F"""{self.data}""" class A__ ( Generic[T] ): def __init__( self ): '''simple docstring''' UpperCamelCase : int = None def __iter__( self ): '''simple docstring''' UpperCamelCase : List[Any] = self.top while node: yield node.data UpperCamelCase : Tuple = node.next def __str__( self ): '''simple docstring''' return "->".join([str(_UpperCAmelCase ) for item in self] ) def __len__( self ): '''simple docstring''' return len(tuple(iter(self ) ) ) def __UpperCamelCase( self ): '''simple docstring''' return self.top is None def __UpperCamelCase( self , A_ ): '''simple docstring''' UpperCamelCase : List[str] = Node(_UpperCAmelCase ) if not self.is_empty(): UpperCamelCase : Union[str, Any] = self.top UpperCamelCase : Optional[Any] = node def __UpperCamelCase( self ): '''simple docstring''' if self.is_empty(): raise IndexError("pop from empty stack" ) assert isinstance(self.top , _UpperCAmelCase ) UpperCamelCase : List[Any] = self.top UpperCamelCase : int = self.top.next return pop_node.data def __UpperCamelCase( self ): '''simple docstring''' if self.is_empty(): raise IndexError("peek from empty stack" ) assert self.top is not None return self.top.data def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : int = None if __name__ == "__main__": from doctest import testmod testmod()
52
"""simple docstring""" import argparse import json import os import pickle import shutil import numpy as np import torch from distiller import Distiller from lm_seqs_dataset import LmSeqsDataset from transformers import ( BertConfig, BertForMaskedLM, BertTokenizer, DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer, GPTaConfig, GPTaLMHeadModel, GPTaTokenizer, RobertaConfig, RobertaForMaskedLM, RobertaTokenizer, ) from utils import git_log, init_gpu_params, logger, set_seed a = { '''distilbert''': (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer), '''roberta''': (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer), '''bert''': (BertConfig, BertForMaskedLM, BertTokenizer), '''gpt2''': (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer), } def _snake_case ( _snake_case : Tuple ) -> Dict: '''simple docstring''' assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0) assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0) if args.mlm: assert os.path.isfile(args.token_counts ) assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"]) else: assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"]) assert args.teacher_type == args.student_type or ( args.student_type == "distilbert" and args.teacher_type == "bert" ) assert os.path.isfile(args.student_config ) if args.student_pretrained_weights is not None: assert os.path.isfile(args.student_pretrained_weights ) if args.freeze_token_type_embds: assert args.student_type in ["roberta"] assert args.alpha_ce >= 0.0 assert args.alpha_mlm >= 0.0 assert args.alpha_clm >= 0.0 assert args.alpha_mse >= 0.0 assert args.alpha_cos >= 0.0 assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0 def _snake_case ( _snake_case : str , _snake_case : List[Any] ) -> Tuple: '''simple docstring''' if args.student_type == "roberta": _A = False elif args.student_type == "gpt2": _A = False def _snake_case ( _snake_case : str , _snake_case : int ) -> Tuple: '''simple docstring''' if args.student_type == "roberta": _A = False def _snake_case ( ) -> Tuple: '''simple docstring''' _A = argparse.ArgumentParser(description='Training' ) parser.add_argument('--force' , action='store_true' , help='Overwrite dump_path if it already exists.' ) parser.add_argument( '--dump_path' , type=_snake_case , required=_snake_case , help='The output directory (log, checkpoints, parameters, etc.)' ) parser.add_argument( '--data_file' , type=_snake_case , required=_snake_case , help='The binarized file (tokenized + tokens_to_ids) and grouped by sequence.' , ) parser.add_argument( '--student_type' , type=_snake_case , choices=['distilbert', 'roberta', 'gpt2'] , required=_snake_case , help='The student type (DistilBERT, RoBERTa).' , ) parser.add_argument('--student_config' , type=_snake_case , required=_snake_case , help='Path to the student configuration.' ) parser.add_argument( '--student_pretrained_weights' , default=_snake_case , type=_snake_case , help='Load student initialization checkpoint.' ) parser.add_argument( '--teacher_type' , choices=['bert', 'roberta', 'gpt2'] , required=_snake_case , help='Teacher type (BERT, RoBERTa).' ) parser.add_argument('--teacher_name' , type=_snake_case , required=_snake_case , help='The teacher model.' ) parser.add_argument('--temperature' , default=2.0 , type=_snake_case , help='Temperature for the softmax temperature.' ) parser.add_argument( '--alpha_ce' , default=0.5 , type=_snake_case , help='Linear weight for the distillation loss. Must be >=0.' ) parser.add_argument( '--alpha_mlm' , default=0.0 , type=_snake_case , help='Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag.' , ) parser.add_argument('--alpha_clm' , default=0.5 , type=_snake_case , help='Linear weight for the CLM loss. Must be >=0.' ) parser.add_argument('--alpha_mse' , default=0.0 , type=_snake_case , help='Linear weight of the MSE loss. Must be >=0.' ) parser.add_argument( '--alpha_cos' , default=0.0 , type=_snake_case , help='Linear weight of the cosine embedding loss. Must be >=0.' ) parser.add_argument( '--mlm' , action='store_true' , help='The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM.' ) parser.add_argument( '--mlm_mask_prop' , default=0.15 , type=_snake_case , help='Proportion of tokens for which we need to make a prediction.' , ) parser.add_argument('--word_mask' , default=0.8 , type=_snake_case , help='Proportion of tokens to mask out.' ) parser.add_argument('--word_keep' , default=0.1 , type=_snake_case , help='Proportion of tokens to keep.' ) parser.add_argument('--word_rand' , default=0.1 , type=_snake_case , help='Proportion of tokens to randomly replace.' ) parser.add_argument( '--mlm_smoothing' , default=0.7 , type=_snake_case , help='Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec).' , ) parser.add_argument('--token_counts' , type=_snake_case , help='The token counts in the data_file for MLM.' ) parser.add_argument( '--restrict_ce_to_mask' , action='store_true' , help='If true, compute the distillation loss only the [MLM] prediction distribution.' , ) parser.add_argument( '--freeze_pos_embs' , action='store_true' , help='Freeze positional embeddings during distillation. For student_type in [\'roberta\', \'gpt2\'] only.' , ) parser.add_argument( '--freeze_token_type_embds' , action='store_true' , help='Freeze token type embeddings during distillation if existent. For student_type in [\'roberta\'] only.' , ) parser.add_argument('--n_epoch' , type=_snake_case , default=3 , help='Number of pass on the whole dataset.' ) parser.add_argument('--batch_size' , type=_snake_case , default=5 , help='Batch size (for each process).' ) parser.add_argument( '--group_by_size' , action='store_false' , help='If true, group sequences that have similar length into the same batch. Default is true.' , ) parser.add_argument( '--gradient_accumulation_steps' , type=_snake_case , default=50 , help='Gradient accumulation for larger training batches.' , ) parser.add_argument('--warmup_prop' , default=0.05 , type=_snake_case , help='Linear warmup proportion.' ) parser.add_argument('--weight_decay' , default=0.0 , type=_snake_case , help='Weight decay if we apply some.' ) parser.add_argument('--learning_rate' , default=5E-4 , type=_snake_case , help='The initial learning rate for Adam.' ) parser.add_argument('--adam_epsilon' , default=1E-6 , type=_snake_case , help='Epsilon for Adam optimizer.' ) parser.add_argument('--max_grad_norm' , default=5.0 , type=_snake_case , help='Max gradient norm.' ) parser.add_argument('--initializer_range' , default=0.02 , type=_snake_case , help='Random initialization range.' ) parser.add_argument( '--fp16' , action='store_true' , help='Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit' , ) parser.add_argument( '--fp16_opt_level' , type=_snake_case , default='O1' , help=( 'For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].' 'See details at https://nvidia.github.io/apex/amp.html' ) , ) parser.add_argument('--n_gpu' , type=_snake_case , default=1 , help='Number of GPUs in the node.' ) parser.add_argument('--local_rank' , type=_snake_case , default=-1 , help='Distributed training - Local rank' ) parser.add_argument('--seed' , type=_snake_case , default=56 , help='Random seed' ) parser.add_argument('--log_interval' , type=_snake_case , default=5_00 , help='Tensorboard logging interval.' ) parser.add_argument('--checkpoint_interval' , type=_snake_case , default=40_00 , help='Checkpoint interval.' ) _A = parser.parse_args() sanity_checks(_snake_case ) # ARGS # init_gpu_params(_snake_case ) set_seed(_snake_case ) if args.is_master: if os.path.exists(args.dump_path ): if not args.force: raise ValueError( F'''Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite''' ' itUse `--force` if you want to overwrite it' ) else: shutil.rmtree(args.dump_path ) if not os.path.exists(args.dump_path ): os.makedirs(args.dump_path ) logger.info(F'''Experiment will be dumped and logged in {args.dump_path}''' ) # SAVE PARAMS # logger.info(F'''Param: {args}''' ) with open(os.path.join(args.dump_path , 'parameters.json' ) , 'w' ) as f: json.dump(vars(_snake_case ) , _snake_case , indent=4 ) git_log(args.dump_path ) _A , _A , _A = MODEL_CLASSES[args.student_type] _A , _A , _A = MODEL_CLASSES[args.teacher_type] # TOKENIZER # _A = teacher_tokenizer_class.from_pretrained(args.teacher_name ) _A = {} for tok_name, tok_symbol in tokenizer.special_tokens_map.items(): _A = tokenizer.all_special_tokens.index(_snake_case ) _A = tokenizer.all_special_ids[idx] logger.info(F'''Special tokens {special_tok_ids}''' ) _A = special_tok_ids _A = tokenizer.max_model_input_sizes[args.teacher_name] # DATA LOADER # logger.info(F'''Loading data from {args.data_file}''' ) with open(args.data_file , 'rb' ) as fp: _A = pickle.load(_snake_case ) if args.mlm: logger.info(F'''Loading token counts from {args.token_counts} (already pre-computed)''' ) with open(args.token_counts , 'rb' ) as fp: _A = pickle.load(_snake_case ) _A = np.maximum(_snake_case , 1 ) ** -args.mlm_smoothing for idx in special_tok_ids.values(): _A = 0.0 # do not predict special tokens _A = torch.from_numpy(_snake_case ) else: _A = None _A = LmSeqsDataset(params=_snake_case , data=_snake_case ) logger.info('Data loader created.' ) # STUDENT # logger.info(F'''Loading student config from {args.student_config}''' ) _A = student_config_class.from_pretrained(args.student_config ) _A = True if args.student_pretrained_weights is not None: logger.info(F'''Loading pretrained weights from {args.student_pretrained_weights}''' ) _A = student_model_class.from_pretrained(args.student_pretrained_weights , config=_snake_case ) else: _A = student_model_class(_snake_case ) if args.n_gpu > 0: student.to(F'''cuda:{args.local_rank}''' ) logger.info('Student loaded.' ) # TEACHER # _A = teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=_snake_case ) if args.n_gpu > 0: teacher.to(F'''cuda:{args.local_rank}''' ) logger.info(F'''Teacher loaded from {args.teacher_name}.''' ) # FREEZING # if args.freeze_pos_embs: freeze_pos_embeddings(_snake_case , _snake_case ) if args.freeze_token_type_embds: freeze_token_type_embeddings(_snake_case , _snake_case ) # SANITY CHECKS # assert student.config.vocab_size == teacher.config.vocab_size assert student.config.hidden_size == teacher.config.hidden_size assert student.config.max_position_embeddings == teacher.config.max_position_embeddings if args.mlm: assert token_probs.size(0 ) == stu_architecture_config.vocab_size # DISTILLER # torch.cuda.empty_cache() _A = Distiller( params=_snake_case , dataset=_snake_case , token_probs=_snake_case , student=_snake_case , teacher=_snake_case ) distiller.train() logger.info('Let\'s go get some drinks.' ) if __name__ == "__main__": main()
315
0
"""simple docstring""" import os from shutil import copyfile from typing import List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging UpperCamelCase_ = logging.get_logger(__name__) UpperCamelCase_ = {'vocab_file': 'sentencepiece.model'} UpperCamelCase_ = { 'vocab_file': { 'google/rembert': 'https://huggingface.co/google/rembert/resolve/main/sentencepiece.model', }, } UpperCamelCase_ = { 'google/rembert': 256, } class snake_case ( __lowerCAmelCase ): a_ : Optional[Any] = VOCAB_FILES_NAMES a_ : int = PRETRAINED_VOCAB_FILES_MAP a_ : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__( self , __UpperCAmelCase , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase="[CLS]" , __UpperCAmelCase="[SEP]" , __UpperCAmelCase="[UNK]" , __UpperCAmelCase="[SEP]" , __UpperCAmelCase="[PAD]" , __UpperCAmelCase="[CLS]" , __UpperCAmelCase="[MASK]" , **__UpperCAmelCase , ) ->Union[str, Any]: super().__init__( do_lower_case=_UpperCAmelCase , remove_space=_UpperCAmelCase , keep_accents=_UpperCAmelCase , bos_token=_UpperCAmelCase , eos_token=_UpperCAmelCase , unk_token=_UpperCAmelCase , sep_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , cls_token=_UpperCAmelCase , mask_token=_UpperCAmelCase , **_UpperCAmelCase , ) a_ = do_lower_case a_ = remove_space a_ = keep_accents a_ = vocab_file a_ = spm.SentencePieceProcessor() self.sp_model.Load(_UpperCAmelCase) @property def UpperCAmelCase__ ( self) ->Union[str, Any]: return len(self.sp_model) def UpperCAmelCase__ ( self) ->Optional[int]: a_ = {self.convert_ids_to_tokens(_UpperCAmelCase): i for i in range(self.vocab_size)} vocab.update(self.added_tokens_encoder) return vocab def __getstate__( self) ->Any: a_ = self.__dict__.copy() a_ = None return state def __setstate__( self , __UpperCAmelCase) ->int: a_ = d a_ = spm.SentencePieceProcessor() self.sp_model.Load(self.vocab_file) def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase=False) ->Optional[int]: a_ = self.sp_model.EncodeAsPieces(_UpperCAmelCase) return pieces def UpperCAmelCase__ ( self , __UpperCAmelCase) ->Tuple: return self.sp_model.PieceToId(_UpperCAmelCase) def UpperCAmelCase__ ( self , __UpperCAmelCase) ->List[Any]: return self.sp_model.IdToPiece(_UpperCAmelCase) def UpperCAmelCase__ ( self , __UpperCAmelCase) ->Optional[Any]: a_ = self.sp_model.decode_pieces(_UpperCAmelCase) return out_string def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase = None) ->List[Any]: a_ = [self.sep_token_id] a_ = [self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = False) ->str: if already_has_special_tokens: if token_ids_a is not None: raise ValueError( "You should not supply a second sequence if the provided sequence of " "ids is already formatted with special tokens for the model.") return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a] if token_ids_a is not None: return [1] + ([0] * len(_UpperCAmelCase)) + [1] + ([0] * len(_UpperCAmelCase)) + [1] return [1] + ([0] * len(_UpperCAmelCase)) + [1] def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase = None) ->Any: a_ = [self.sep_token_id] a_ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep) * [0] return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1] def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase = None) ->str: if not os.path.isdir(_UpperCAmelCase): logger.error("Vocabulary path ({}) should be a directory".format(_UpperCAmelCase)) return a_ = os.path.join( _UpperCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]) if os.path.abspath(self.vocab_file) != os.path.abspath(_UpperCAmelCase): copyfile(self.vocab_file , _UpperCAmelCase) return (out_vocab_file,)
243
"""simple docstring""" from manim import * class lowercase_ ( __lowerCAmelCase ): '''simple docstring''' def lowerCAmelCase_ ( self : Dict ): _A = Rectangle(height=0.5 , width=0.5 ) _A = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 ) _A = Rectangle(height=0.25 , width=0.25 ) _A = [mem.copy() for i in range(6 )] _A = [mem.copy() for i in range(6 )] _A = VGroup(*_UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 ) _A = VGroup(*_UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 ) _A = VGroup(_UpperCAmelCase , _UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 ) _A = Text('CPU' , font_size=24 ) _A = Group(_UpperCAmelCase , _UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0.5 , aligned_edge=_UpperCAmelCase ) cpu.move_to([-2.5, -0.5, 0] ) self.add(_UpperCAmelCase ) _A = [mem.copy() for i in range(4 )] _A = VGroup(*_UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 ) _A = Text('GPU' , font_size=24 ) _A = Group(_UpperCAmelCase , _UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0.5 , aligned_edge=_UpperCAmelCase ) gpu.move_to([-1, -1, 0] ) self.add(_UpperCAmelCase ) _A = [mem.copy() for i in range(6 )] _A = VGroup(*_UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 ) _A = Text('Model' , font_size=24 ) _A = Group(_UpperCAmelCase , _UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0.5 , aligned_edge=_UpperCAmelCase ) model.move_to([3, -1.0, 0] ) self.add(_UpperCAmelCase ) _A = [] _A = [] for i, rect in enumerate(_UpperCAmelCase ): _A = fill.copy().set_fill(_UpperCAmelCase , opacity=0.8 ) target.move_to(_UpperCAmelCase ) model_arr.append(_UpperCAmelCase ) _A = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(_UpperCAmelCase , opacity=0.8 ) cpu_target.move_to(cpu_left_col_base[i] ) model_cpu_arr.append(_UpperCAmelCase ) self.add(*_UpperCAmelCase , *_UpperCAmelCase ) _A = [meta_mem.copy() for i in range(6 )] _A = [meta_mem.copy() for i in range(6 )] _A = VGroup(*_UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 ) _A = VGroup(*_UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 ) _A = VGroup(_UpperCAmelCase , _UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 ) _A = Text('Disk' , font_size=24 ) _A = Group(_UpperCAmelCase , _UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0.5 , aligned_edge=_UpperCAmelCase ) disk.move_to([-4, -1.25, 0] ) self.add(_UpperCAmelCase , _UpperCAmelCase ) _A = Square(side_length=2.2 ) key.move_to([-5, 2, 0] ) _A = MarkupText( F'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' , font_size=18 , ) key_text.move_to([-5, 2.4, 0] ) self.add(_UpperCAmelCase , _UpperCAmelCase ) _A = MarkupText( F'''<span fgcolor=\'{BLUE}\'>●</span> Checkpoint''' , font_size=18 , ) blue_text.next_to(_UpperCAmelCase , DOWN * 2.4 , aligned_edge=key_text.get_left() ) self.add(_UpperCAmelCase ) _A = MarkupText( F'''Now watch as an input is passed through the model\nand how the memory is utilized and handled.''' , font_size=24 , ) step_a.move_to([2, 2, 0] ) self.play(Write(_UpperCAmelCase ) ) _A = Square(0.3 ) input.set_fill(_UpperCAmelCase , opacity=1.0 ) input.set_stroke(width=0.0 ) input.next_to(model_base[0] , _UpperCAmelCase , buff=0.5 ) self.play(Write(_UpperCAmelCase ) ) input.generate_target() input.target.next_to(model_arr[0] , direction=_UpperCAmelCase , buff=0.02 ) self.play(MoveToTarget(_UpperCAmelCase ) ) self.play(FadeOut(_UpperCAmelCase ) ) _A = Arrow(start=_UpperCAmelCase , end=_UpperCAmelCase , color=_UpperCAmelCase , buff=0.5 ) a.next_to(model_arr[0].get_left() , _UpperCAmelCase , buff=0.2 ) model_cpu_arr[0].generate_target() model_cpu_arr[0].target.move_to(gpu_rect[0] ) _A = MarkupText( F'''As the input reaches a layer, the hook triggers\nand weights are moved from the CPU\nto the GPU and back.''' , font_size=24 , ) step_a.move_to([2, 2, 0] ) self.play(Write(_UpperCAmelCase , run_time=3 ) ) _A = {'run_time': 1, 'fade_in': True, 'fade_out': True, 'buff': 0.02} self.play( Write(_UpperCAmelCase ) , Circumscribe(model_arr[0] , color=_UpperCAmelCase , **_UpperCAmelCase ) , Circumscribe(model_cpu_arr[0] , color=_UpperCAmelCase , **_UpperCAmelCase ) , Circumscribe(gpu_rect[0] , color=_UpperCAmelCase , **_UpperCAmelCase ) , ) self.play(MoveToTarget(model_cpu_arr[0] ) ) _A = a.copy() for i in range(6 ): a_c.next_to(model_arr[i].get_right() + 0.02 , _UpperCAmelCase , buff=0.2 ) input.generate_target() input.target.move_to(model_arr[i].get_right() + 0.02 ) _A = AnimationGroup( FadeOut(_UpperCAmelCase , run_time=0.5 ) , MoveToTarget(_UpperCAmelCase , run_time=0.5 ) , FadeIn(_UpperCAmelCase , run_time=0.5 ) , lag_ratio=0.2 ) self.play(_UpperCAmelCase ) model_cpu_arr[i].generate_target() model_cpu_arr[i].target.move_to(cpu_left_col_base[i] ) if i < 5: model_cpu_arr[i + 1].generate_target() model_cpu_arr[i + 1].target.move_to(gpu_rect[0] ) if i >= 1: _A = 0.7 self.play( Circumscribe(model_arr[i] , **_UpperCAmelCase ) , Circumscribe(cpu_left_col_base[i] , **_UpperCAmelCase ) , Circumscribe(cpu_left_col_base[i + 1] , color=_UpperCAmelCase , **_UpperCAmelCase ) , Circumscribe(gpu_rect[0] , color=_UpperCAmelCase , **_UpperCAmelCase ) , Circumscribe(model_arr[i + 1] , color=_UpperCAmelCase , **_UpperCAmelCase ) , ) if i < 1: self.play( MoveToTarget(model_cpu_arr[i] ) , MoveToTarget(model_cpu_arr[i + 1] ) , ) else: self.play( MoveToTarget(model_cpu_arr[i] , run_time=0.7 ) , MoveToTarget(model_cpu_arr[i + 1] , run_time=0.7 ) , ) else: model_cpu_arr[i].generate_target() model_cpu_arr[i].target.move_to(cpu_left_col_base[-1] ) input.generate_target() input.target.next_to(model_arr[-1].get_right() , RIGHT + 0.02 , buff=0.2 ) self.play( Circumscribe(model_arr[-1] , color=_UpperCAmelCase , **_UpperCAmelCase ) , Circumscribe(cpu_left_col_base[-1] , color=_UpperCAmelCase , **_UpperCAmelCase ) , Circumscribe(gpu_rect[0] , color=_UpperCAmelCase , **_UpperCAmelCase ) , ) self.play(MoveToTarget(model_cpu_arr[i] ) ) _A = a_c _A = a_c.copy() input.generate_target() input.target.next_to(model_base[-1] , RIGHT + 0.02 , buff=0.5 ) self.play( FadeOut(_UpperCAmelCase ) , FadeOut(_UpperCAmelCase , run_time=0.5 ) , ) _A = MarkupText(F'''Inference on a model too large for GPU memory\nis successfully completed.''' , font_size=24 ) step_a.move_to([2, 2, 0] ) self.play(Write(_UpperCAmelCase , run_time=3 ) , MoveToTarget(_UpperCAmelCase ) ) self.wait()
315
0
import os import time import numpy as np import onnxruntime as ort _snake_case = "1" _snake_case = "0" _snake_case = "1" _snake_case = ort.SessionOptions() _snake_case = ort.GraphOptimizationLevel.ORT_DISABLE_ALL print("Create inference session...") _snake_case = ["TensorrtExecutionProvider", "CUDAExecutionProvider"] _snake_case = ort.InferenceSession("model.onnx", sess_options=sess_opt, providers=execution_provider) _snake_case = ort.RunOptions() _snake_case = 128 _snake_case = 1 _snake_case = np.ones((batch, sequence), dtype=np.intaa) _snake_case = np.ones((batch, sequence), dtype=np.intaa) _snake_case = np.ones((batch, sequence), dtype=np.intaa) print("Warm up phase...") sess.run( None, { sess.get_inputs()[0].name: input_ids, sess.get_inputs()[1].name: attention_mask, sess.get_inputs()[2].name: token_type_ids, }, run_options=run_opt, ) print("Start inference...") _snake_case = time.time() _snake_case = 2000 _snake_case = {} for iter in range(max_iters): _snake_case = sess.run( None, { sess.get_inputs()[0].name: input_ids, sess.get_inputs()[1].name: attention_mask, sess.get_inputs()[2].name: token_type_ids, }, run_options=run_opt, ) print("Average Inference Time = {:.3f} ms".format((time.time() - start_time) * 1000 / max_iters))
26
"""simple docstring""" def _snake_case ( _snake_case : list , _snake_case : int = 0 ) -> list: '''simple docstring''' _A = length or len(_snake_case ) _A = False for i in range(length - 1 ): if list_data[i] > list_data[i + 1]: _A , _A = list_data[i + 1], list_data[i] _A = True return list_data if not swapped else bubble_sort(_snake_case , length - 1 ) if __name__ == "__main__": import doctest doctest.testmod()
315
0
from __future__ import annotations from itertools import permutations from random import randint from timeit import repeat def lowerCAmelCase_ ( ) -> tuple[list[int], int]: """simple docstring""" a__ : str = [randint(-1000 , 1000) for i in range(10)] a__ : Union[str, Any] = randint(-5000 , 5000) return (arr, r) _lowercase : Any =make_dataset() def lowerCAmelCase_ ( _lowercase : list[int] , _lowercase : int) -> tuple[int, ...]: """simple docstring""" for triplet in permutations(_snake_case , 3): if sum(_snake_case) == target: return tuple(sorted(_snake_case)) return (0, 0, 0) def lowerCAmelCase_ ( _lowercase : list[int] , _lowercase : int) -> tuple[int, int, int]: """simple docstring""" arr.sort() a__ : str = len(_snake_case) for i in range(n - 1): a__ , a__ : Union[str, Any] = i + 1, n - 1 while left < right: if arr[i] + arr[left] + arr[right] == target: return (arr[i], arr[left], arr[right]) elif arr[i] + arr[left] + arr[right] < target: left += 1 elif arr[i] + arr[left] + arr[right] > target: right -= 1 return (0, 0, 0) def lowerCAmelCase_ ( ) -> tuple[float, float]: """simple docstring""" a__ : List[str] = """\nfrom __main__ import dataset, triplet_sum1, triplet_sum2\n""" a__ : int = """\ntriplet_sum1(*dataset)\n""" a__ : List[str] = """\ntriplet_sum2(*dataset)\n""" a__ : Tuple = repeat(setup=_snake_case , stmt=_snake_case , repeat=5 , number=1_0000) a__ : Tuple = repeat(setup=_snake_case , stmt=_snake_case , repeat=5 , number=1_0000) return (min(_snake_case), min(_snake_case)) if __name__ == "__main__": from doctest import testmod testmod() _lowercase : Dict =solution_times() print(f'The time for naive implementation is {times[0]}.') print(f'The time for optimized implementation is {times[1]}.')
170
"""simple docstring""" import warnings from typing import Any, Dict, List, Optional, Union import numpy as np from ...audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import PaddingStrategy, TensorType, logging a = logging.get_logger(__name__) class lowercase_ ( __lowerCAmelCase ): '''simple docstring''' UpperCAmelCase : Any = ['''input_values''', '''attention_mask'''] def __init__( self : Dict , _UpperCAmelCase : int = 1 , _UpperCAmelCase : int = 16_000 , _UpperCAmelCase : float = 0.0 , _UpperCAmelCase : bool = False , _UpperCAmelCase : int = 80 , _UpperCAmelCase : int = 16 , _UpperCAmelCase : int = 64 , _UpperCAmelCase : str = "hann_window" , _UpperCAmelCase : float = 1.0 , _UpperCAmelCase : float = 80 , _UpperCAmelCase : float = 7_600 , _UpperCAmelCase : float = 1E-1_0 , _UpperCAmelCase : int = 2 , _UpperCAmelCase : bool = True , **_UpperCAmelCase : List[Any] , ): super().__init__(feature_size=_UpperCAmelCase , sampling_rate=_UpperCAmelCase , padding_value=_UpperCAmelCase , **_UpperCAmelCase ) _A = do_normalize _A = return_attention_mask _A = num_mel_bins _A = hop_length _A = win_length _A = win_function _A = frame_signal_scale _A = fmin _A = fmax _A = mel_floor _A = reduction_factor _A = win_length * sampling_rate // 1_000 _A = hop_length * sampling_rate // 1_000 _A = optimal_fft_length(self.sample_size ) _A = (self.n_fft // 2) + 1 _A = window_function(window_length=self.sample_size , name=self.win_function , periodic=_UpperCAmelCase ) _A = mel_filter_bank( num_frequency_bins=self.n_freqs , num_mel_filters=self.num_mel_bins , min_frequency=self.fmin , max_frequency=self.fmax , sampling_rate=self.sampling_rate , norm='slaney' , mel_scale='slaney' , ) if frame_signal_scale != 1.0: warnings.warn( 'The argument `frame_signal_scale` is deprecated and will be removed in version 4.30.0 of Transformers' , _UpperCAmelCase , ) if reduction_factor != 2.0: warnings.warn( 'The argument `reduction_factor` is deprecated and will be removed in version 4.30.0 of Transformers' , _UpperCAmelCase , ) @staticmethod # Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm def lowerCAmelCase_ ( _UpperCAmelCase : List[np.ndarray] , _UpperCAmelCase : List[np.ndarray] , _UpperCAmelCase : float = 0.0 ): if attention_mask is not None: _A = np.array(_UpperCAmelCase , np.intaa ) _A = [] for vector, length in zip(_UpperCAmelCase , attention_mask.sum(-1 ) ): _A = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1E-7 ) if length < normed_slice.shape[0]: _A = padding_value normed_input_values.append(_UpperCAmelCase ) else: _A = [(x - x.mean()) / np.sqrt(x.var() + 1E-7 ) for x in input_values] return normed_input_values def lowerCAmelCase_ ( self : Dict , _UpperCAmelCase : np.ndarray , ): _A = spectrogram( _UpperCAmelCase , window=self.window , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , mel_filters=self.mel_filters , mel_floor=self.mel_floor , log_mel='log10' , ) return log_mel_spec.T def __call__( self : int , _UpperCAmelCase : Optional[Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]]] = None , _UpperCAmelCase : Optional[Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]]] = None , _UpperCAmelCase : Union[bool, str, PaddingStrategy] = False , _UpperCAmelCase : Optional[int] = None , _UpperCAmelCase : bool = False , _UpperCAmelCase : Optional[int] = None , _UpperCAmelCase : Optional[bool] = None , _UpperCAmelCase : Optional[Union[str, TensorType]] = None , _UpperCAmelCase : Optional[int] = None , **_UpperCAmelCase : Optional[int] , ): if audio is None and audio_target is None: raise ValueError('You must provide either `audio` or `audio_target` values.' ) if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( F'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of''' F''' {self.sampling_rate}. Please make sure that the provided audio input was sampled with''' F''' {self.sampling_rate} and not {sampling_rate}.''' ) else: logger.warning( 'It is strongly recommended to pass the ``sampling_rate`` argument to this function. ' 'Failing to do so can result in silent errors that might be hard to debug.' ) if audio is not None: _A = self._process_audio( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase , ) else: _A = None if audio_target is not None: _A = self._process_audio( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase , ) if inputs is None: return inputs_target else: _A = inputs_target['input_values'] _A = inputs_target.get('attention_mask' ) if decoder_attention_mask is not None: _A = decoder_attention_mask return inputs def lowerCAmelCase_ ( self : Optional[int] , _UpperCAmelCase : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , _UpperCAmelCase : bool = False , _UpperCAmelCase : Union[bool, str, PaddingStrategy] = False , _UpperCAmelCase : Optional[int] = None , _UpperCAmelCase : bool = False , _UpperCAmelCase : Optional[int] = None , _UpperCAmelCase : Optional[bool] = None , _UpperCAmelCase : Optional[Union[str, TensorType]] = None , **_UpperCAmelCase : List[Any] , ): _A = isinstance(_UpperCAmelCase , np.ndarray ) and len(speech.shape ) > 1 if is_batched_numpy and len(speech.shape ) > 2: raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' ) _A = is_batched_numpy or ( isinstance(_UpperCAmelCase , (list, tuple) ) and (isinstance(speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: _A = [np.asarray(_UpperCAmelCase , dtype=np.floataa ) for speech in speech] elif not is_batched and not isinstance(_UpperCAmelCase , np.ndarray ): _A = np.asarray(_UpperCAmelCase , dtype=np.floataa ) elif isinstance(_UpperCAmelCase , np.ndarray ) and speech.dtype is np.dtype(np.floataa ): _A = speech.astype(np.floataa ) # always return batch if not is_batched: _A = [speech] # needed to make pad() work on spectrogram inputs _A = self.feature_size # convert into correct format for padding if is_target: _A = [self._extract_mel_features(_UpperCAmelCase ) for waveform in speech] _A = BatchFeature({'input_values': features} ) _A = self.num_mel_bins else: _A = BatchFeature({'input_values': speech} ) _A = self.pad( _UpperCAmelCase , padding=_UpperCAmelCase , max_length=_UpperCAmelCase , truncation=_UpperCAmelCase , pad_to_multiple_of=_UpperCAmelCase , return_attention_mask=_UpperCAmelCase , **_UpperCAmelCase , ) _A = feature_size_hack # convert input values to correct format _A = padded_inputs['input_values'] if not isinstance(input_values[0] , np.ndarray ): _A = [np.asarray(_UpperCAmelCase , dtype=np.floataa ) for array in input_values] elif ( not isinstance(_UpperCAmelCase , np.ndarray ) and isinstance(input_values[0] , np.ndarray ) and input_values[0].dtype is np.dtype(np.floataa ) ): _A = [array.astype(np.floataa ) for array in input_values] elif isinstance(_UpperCAmelCase , np.ndarray ) and input_values.dtype is np.dtype(np.floataa ): _A = input_values.astype(np.floataa ) # convert attention_mask to correct format _A = padded_inputs.get('attention_mask' ) if attention_mask is not None: _A = [np.asarray(_UpperCAmelCase , dtype=np.intaa ) for array in attention_mask] # zero-mean and unit-variance normalization if not is_target and self.do_normalize: _A = ( attention_mask if self._get_padding_strategies(_UpperCAmelCase , max_length=_UpperCAmelCase ) is not PaddingStrategy.DO_NOT_PAD else None ) _A = self.zero_mean_unit_var_norm( padded_inputs['input_values'] , attention_mask=_UpperCAmelCase , padding_value=self.padding_value ) if return_tensors is not None: _A = padded_inputs.convert_to_tensors(_UpperCAmelCase ) return padded_inputs def lowerCAmelCase_ ( self : Any ): _A = super().to_dict() # Don't serialize these as they are derived from the other properties. _A = ['window', 'mel_filters', 'sample_size', 'sample_stride', 'n_fft', 'n_freqs'] for name in names: if name in output: del output[name] return output
315
0
'''simple docstring''' import math class lowerCAmelCase : def snake_case ( self : Union[str, Any] , __lowercase : list[list[float]] , __lowercase : list[int] ): """simple docstring""" __lowercase =0.0 __lowercase =0.0 for i in range(len(_UpperCAmelCase ) ): da += math.pow((sample[i] - weights[0][i]) , 2 ) da += math.pow((sample[i] - weights[1][i]) , 2 ) return 0 if da > da else 1 return 0 def snake_case ( self : str , __lowercase : list[list[int | float]] , __lowercase : list[int] , __lowercase : int , __lowercase : float ): """simple docstring""" for i in range(len(_UpperCAmelCase ) ): weights[j][i] += alpha * (sample[i] - weights[j][i]) return weights def __UpperCamelCase ( ): '''simple docstring''' __lowercase =[[1, 1, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 0, 1, 1]] # weight initialization ( n, C ) __lowercase =[[0.2, 0.6, 0.5, 0.9], [0.8, 0.4, 0.7, 0.3]] # training __lowercase =SelfOrganizingMap() __lowercase =3 __lowercase =0.5 for _ in range(_snake_case ): for j in range(len(_snake_case ) ): # training sample __lowercase =training_samples[j] # Compute the winning vector __lowercase =self_organizing_map.get_winner(_snake_case, _snake_case ) # Update the winning vector __lowercase =self_organizing_map.update(_snake_case, _snake_case, _snake_case, _snake_case ) # classify test sample __lowercase =[0, 0, 0, 1] __lowercase =self_organizing_map.get_winner(_snake_case, _snake_case ) # results print(F'''Clusters that the test sample belongs to : {winner}''' ) print(F'''Weights that have been trained : {weights}''' ) # running the main() function if __name__ == "__main__": main()
141
"""simple docstring""" from __future__ import annotations def _snake_case ( _snake_case : int , _snake_case : int ) -> list[list[int]]: '''simple docstring''' _A = [] create_all_state(1 , _snake_case , _snake_case , [] , _snake_case ) return result def _snake_case ( _snake_case : int , _snake_case : int , _snake_case : int , _snake_case : list[int] , _snake_case : list[list[int]] , ) -> None: '''simple docstring''' if level == 0: total_list.append(current_list[:] ) return for i in range(_snake_case , total_number - level + 2 ): current_list.append(_snake_case ) create_all_state(i + 1 , _snake_case , level - 1 , _snake_case , _snake_case ) current_list.pop() def _snake_case ( _snake_case : list[list[int]] ) -> None: '''simple docstring''' for i in total_list: print(*_snake_case ) if __name__ == "__main__": a = 4 a = 2 a = generate_all_combinations(n, k) print_all_state(total_list)
315
0
import itertools import os import re lowerCAmelCase_ = re.compile(R'([A-Z]+)([A-Z][a-z])') lowerCAmelCase_ = re.compile(R'([a-z\d])([A-Z])') lowerCAmelCase_ = re.compile(R'(?<!_)_(?!_)') lowerCAmelCase_ = re.compile(R'(_{2,})') lowerCAmelCase_ = R'^\w+(\.\w+)*$' lowerCAmelCase_ = R'<>:/\|?*' def snake_case( __magic_name__ ) -> Dict: '''simple docstring''' lowercase : Optional[Any] = _uppercase_uppercase_re.sub(r'''\1_\2''' , _snake_case ) lowercase : int = _lowercase_uppercase_re.sub(r'''\1_\2''' , _snake_case ) return name.lower() def snake_case( __magic_name__ ) -> Dict: '''simple docstring''' lowercase : str = _single_underscore_re.split(_snake_case ) lowercase : str = [_multiple_underscores_re.split(_snake_case ) for n in name] return "".join(n.capitalize() for n in itertools.chain.from_iterable(_snake_case ) if n != '''''' ) def snake_case( __magic_name__ ) -> int: '''simple docstring''' if os.path.basename(_snake_case ) != name: raise ValueError(F"""Should be a dataset name, not a path: {name}""" ) return camelcase_to_snakecase(_snake_case ) def snake_case( __magic_name__ , __magic_name__ ) -> Dict: '''simple docstring''' if os.path.basename(_snake_case ) != name: raise ValueError(F"""Should be a dataset name, not a path: {name}""" ) if not re.match(_split_re , _snake_case ): raise ValueError(F"""Split name should match \'{_split_re}\'\' but got \'{split}\'.""" ) return F"""{filename_prefix_for_name(_snake_case )}-{split}""" def snake_case( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=None ) -> Optional[Any]: '''simple docstring''' lowercase : Any = filename_prefix_for_split(_snake_case , _snake_case ) if filetype_suffix: prefix += F""".{filetype_suffix}""" lowercase : str = os.path.join(_snake_case , _snake_case ) return F"""{filepath}*""" def snake_case( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=None , __magic_name__=None ) -> List[Any]: '''simple docstring''' lowercase : Optional[Any] = filename_prefix_for_split(_snake_case , _snake_case ) lowercase : List[str] = os.path.join(_snake_case , _snake_case ) if shard_lengths: lowercase : str = len(_snake_case ) lowercase : List[str] = [F"""{prefix}-{shard_id:05d}-of-{num_shards:05d}""" for shard_id in range(_snake_case )] if filetype_suffix: lowercase : Tuple = [filename + F""".{filetype_suffix}""" for filename in filenames] return filenames else: lowercase : Any = prefix if filetype_suffix: filename += F""".{filetype_suffix}""" return [filename]
308
"""simple docstring""" def _snake_case ( _snake_case : int = 10_00 ) -> int: '''simple docstring''' return sum(2 * a * ((a - 1) // 2) for a in range(3 , n + 1 ) ) if __name__ == "__main__": print(solution())
315
0
import argparse import datetime import json import time import warnings from logging import getLogger from pathlib import Path from typing import Dict, List import torch from tqdm import tqdm from transformers import AutoModelForSeqaSeqLM, AutoTokenizer from utils import calculate_bleu, calculate_rouge, chunks, parse_numeric_n_bool_cl_kwargs, use_task_specific_params UpperCAmelCase : List[str] = getLogger(__name__) UpperCAmelCase : List[str] = "cuda" if torch.cuda.is_available() else "cpu" def __lowerCamelCase ( lowerCamelCase__ : List[str] , lowerCamelCase__ : str , lowerCamelCase__ : str , lowerCamelCase__ : int = 8 , lowerCamelCase__ : str = DEFAULT_DEVICE , lowerCamelCase__ : int=False , lowerCamelCase__ : List[str]="summarization" , lowerCamelCase__ : List[str]=None , **lowerCamelCase__ : str , ): '''simple docstring''' lowerCamelCase = Path(_snake_case ).open("""w""" , encoding="""utf-8""" ) lowerCamelCase = str(_snake_case ) lowerCamelCase = AutoModelForSeqaSeqLM.from_pretrained(_snake_case ).to(_snake_case ) if fpaa: lowerCamelCase = model.half() lowerCamelCase = AutoTokenizer.from_pretrained(_snake_case ) logger.info(f'Inferred tokenizer type: {tokenizer.__class__}' ) # if this is wrong, check config.model_type. lowerCamelCase = time.time() # update config with task specific params use_task_specific_params(_snake_case , _snake_case ) if prefix is None: lowerCamelCase = prefix or getattr(model.config , """prefix""" , """""" ) or """""" for examples_chunk in tqdm(list(chunks(_snake_case , _snake_case ) ) ): lowerCamelCase = [prefix + text for text in examples_chunk] lowerCamelCase = tokenizer(_snake_case , return_tensors="""pt""" , truncation=_snake_case , padding="""longest""" ).to(_snake_case ) lowerCamelCase = model.generate( input_ids=batch.input_ids , attention_mask=batch.attention_mask , **_snake_case , ) lowerCamelCase = tokenizer.batch_decode(_snake_case , skip_special_tokens=_snake_case , clean_up_tokenization_spaces=_snake_case ) for hypothesis in dec: fout.write(hypothesis + """\n""" ) fout.flush() fout.close() lowerCamelCase = int(time.time() - start_time ) # seconds lowerCamelCase = len(_snake_case ) return {"n_obs": n_obs, "runtime": runtime, "seconds_per_sample": round(runtime / n_obs , 4 )} def __lowerCamelCase ( ): '''simple docstring''' return datetime.datetime.now().strftime("""%Y-%m-%d %H:%M:%S""" ) def __lowerCamelCase ( lowerCamelCase__ : List[Any]=True ): '''simple docstring''' lowerCamelCase = argparse.ArgumentParser() parser.add_argument("""model_name""" , type=_snake_case , help="""like facebook/bart-large-cnn,t5-base, etc.""" ) parser.add_argument("""input_path""" , type=_snake_case , help="""like cnn_dm/test.source""" ) parser.add_argument("""save_path""" , type=_snake_case , help="""where to save summaries""" ) parser.add_argument("""--reference_path""" , type=_snake_case , required=_snake_case , help="""like cnn_dm/test.target""" ) parser.add_argument("""--score_path""" , type=_snake_case , required=_snake_case , default="""metrics.json""" , help="""where to save metrics""" ) parser.add_argument("""--device""" , type=_snake_case , required=_snake_case , default=_snake_case , help="""cuda, cuda:1, cpu etc.""" ) parser.add_argument( """--prefix""" , type=_snake_case , required=_snake_case , default=_snake_case , help="""will be added to the begininng of src examples""" ) parser.add_argument("""--task""" , type=_snake_case , default="""summarization""" , help="""used for task_specific_params + metrics""" ) parser.add_argument("""--bs""" , type=_snake_case , default=8 , required=_snake_case , help="""batch size""" ) parser.add_argument( """--n_obs""" , type=_snake_case , default=-1 , required=_snake_case , help="""How many observations. Defaults to all.""" ) parser.add_argument("""--fp16""" , action="""store_true""" ) parser.add_argument("""--dump-args""" , action="""store_true""" , help="""print the custom hparams with the results""" ) parser.add_argument( """--info""" , nargs="""?""" , type=_snake_case , const=datetime_now() , help=( """use in conjunction w/ --dump-args to print with the results whatever other info you\'d like, e.g.""" """ lang=en-ru. If no value is passed, the current datetime string will be used.""" ) , ) # Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate lowerCamelCase , lowerCamelCase = parser.parse_known_args() lowerCamelCase = parse_numeric_n_bool_cl_kwargs(_snake_case ) if parsed_args and verbose: print(f'parsed the following generate kwargs: {parsed_args}' ) lowerCamelCase = [""" """ + x.rstrip() if """t5""" in args.model_name else x.rstrip() for x in open(args.input_path ).readlines()] if args.n_obs > 0: lowerCamelCase = examples[: args.n_obs] Path(args.save_path ).parent.mkdir(exist_ok=_snake_case ) if args.reference_path is None and Path(args.score_path ).exists(): warnings.warn(f'score_path {args.score_path} will be overwritten unless you type ctrl-c.' ) if args.device == "cpu" and args.fpaa: # this mix leads to RuntimeError: "threshold_cpu" not implemented for 'Half' raise ValueError("""Can\'t mix --fp16 and --device cpu""" ) lowerCamelCase = generate_summaries_or_translations( _snake_case , args.save_path , args.model_name , batch_size=args.bs , device=args.device , fpaa=args.fpaa , task=args.task , prefix=args.prefix , **_snake_case , ) if args.reference_path is None: return {} # Compute scores lowerCamelCase = calculate_bleu if """translation""" in args.task else calculate_rouge lowerCamelCase = [x.rstrip() for x in open(args.save_path ).readlines()] lowerCamelCase = [x.rstrip() for x in open(args.reference_path ).readlines()][: len(_snake_case )] lowerCamelCase = score_fn(_snake_case , _snake_case ) scores.update(_snake_case ) if args.dump_args: scores.update(_snake_case ) if args.info: lowerCamelCase = args.info if verbose: print(_snake_case ) if args.score_path is not None: json.dump(_snake_case , open(args.score_path , """w""" ) ) return scores if __name__ == "__main__": # Usage for MT: # python run_eval.py MODEL_NAME $DATA_DIR/test.source $save_dir/test_translations.txt --reference_path $DATA_DIR/test.target --score_path $save_dir/test_bleu.json --task translation $@ run_generate(verbose=True)
252
"""simple docstring""" import flax.linen as nn import jax.numpy as jnp from .attention_flax import FlaxTransformeraDModel from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD class lowercase_ ( nn.Module ): '''simple docstring''' UpperCAmelCase : int UpperCAmelCase : int UpperCAmelCase : float = 0.0 UpperCAmelCase : int = 1 UpperCAmelCase : int = 1 UpperCAmelCase : bool = True UpperCAmelCase : bool = False UpperCAmelCase : bool = False UpperCAmelCase : bool = False UpperCAmelCase : jnp.dtype = jnp.floataa def lowerCAmelCase_ ( self : List[str] ): _A = [] _A = [] for i in range(self.num_layers ): _A = self.in_channels if i == 0 else self.out_channels _A = FlaxResnetBlockaD( in_channels=_UpperCAmelCase , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(_UpperCAmelCase ) _A = FlaxTransformeraDModel( in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) attentions.append(_UpperCAmelCase ) _A = resnets _A = attentions if self.add_downsample: _A = FlaxDownsampleaD(self.out_channels , dtype=self.dtype ) def __call__( self : Dict , _UpperCAmelCase : int , _UpperCAmelCase : str , _UpperCAmelCase : List[str] , _UpperCAmelCase : Tuple=True ): _A = () for resnet, attn in zip(self.resnets , self.attentions ): _A = resnet(_UpperCAmelCase , _UpperCAmelCase , deterministic=_UpperCAmelCase ) _A = attn(_UpperCAmelCase , _UpperCAmelCase , deterministic=_UpperCAmelCase ) output_states += (hidden_states,) if self.add_downsample: _A = self.downsamplers_a(_UpperCAmelCase ) output_states += (hidden_states,) return hidden_states, output_states class lowercase_ ( nn.Module ): '''simple docstring''' UpperCAmelCase : int UpperCAmelCase : int UpperCAmelCase : float = 0.0 UpperCAmelCase : int = 1 UpperCAmelCase : bool = True UpperCAmelCase : jnp.dtype = jnp.floataa def lowerCAmelCase_ ( self : List[Any] ): _A = [] for i in range(self.num_layers ): _A = self.in_channels if i == 0 else self.out_channels _A = FlaxResnetBlockaD( in_channels=_UpperCAmelCase , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(_UpperCAmelCase ) _A = resnets if self.add_downsample: _A = FlaxDownsampleaD(self.out_channels , dtype=self.dtype ) def __call__( self : List[str] , _UpperCAmelCase : Tuple , _UpperCAmelCase : int , _UpperCAmelCase : List[str]=True ): _A = () for resnet in self.resnets: _A = resnet(_UpperCAmelCase , _UpperCAmelCase , deterministic=_UpperCAmelCase ) output_states += (hidden_states,) if self.add_downsample: _A = self.downsamplers_a(_UpperCAmelCase ) output_states += (hidden_states,) return hidden_states, output_states class lowercase_ ( nn.Module ): '''simple docstring''' UpperCAmelCase : int UpperCAmelCase : int UpperCAmelCase : int UpperCAmelCase : float = 0.0 UpperCAmelCase : int = 1 UpperCAmelCase : int = 1 UpperCAmelCase : bool = True UpperCAmelCase : bool = False UpperCAmelCase : bool = False UpperCAmelCase : bool = False UpperCAmelCase : jnp.dtype = jnp.floataa def lowerCAmelCase_ ( self : Any ): _A = [] _A = [] for i in range(self.num_layers ): _A = self.in_channels if (i == self.num_layers - 1) else self.out_channels _A = self.prev_output_channel if i == 0 else self.out_channels _A = FlaxResnetBlockaD( in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(_UpperCAmelCase ) _A = FlaxTransformeraDModel( in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) attentions.append(_UpperCAmelCase ) _A = resnets _A = attentions if self.add_upsample: _A = FlaxUpsampleaD(self.out_channels , dtype=self.dtype ) def __call__( self : Optional[Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : int , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Optional[Any]=True ): for resnet, attn in zip(self.resnets , self.attentions ): # pop res hidden states _A = res_hidden_states_tuple[-1] _A = res_hidden_states_tuple[:-1] _A = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 ) _A = resnet(_UpperCAmelCase , _UpperCAmelCase , deterministic=_UpperCAmelCase ) _A = attn(_UpperCAmelCase , _UpperCAmelCase , deterministic=_UpperCAmelCase ) if self.add_upsample: _A = self.upsamplers_a(_UpperCAmelCase ) return hidden_states class lowercase_ ( nn.Module ): '''simple docstring''' UpperCAmelCase : int UpperCAmelCase : int UpperCAmelCase : int UpperCAmelCase : float = 0.0 UpperCAmelCase : int = 1 UpperCAmelCase : bool = True UpperCAmelCase : jnp.dtype = jnp.floataa def lowerCAmelCase_ ( self : Any ): _A = [] for i in range(self.num_layers ): _A = self.in_channels if (i == self.num_layers - 1) else self.out_channels _A = self.prev_output_channel if i == 0 else self.out_channels _A = FlaxResnetBlockaD( in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(_UpperCAmelCase ) _A = resnets if self.add_upsample: _A = FlaxUpsampleaD(self.out_channels , dtype=self.dtype ) def __call__( self : int , _UpperCAmelCase : int , _UpperCAmelCase : Dict , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Optional[int]=True ): for resnet in self.resnets: # pop res hidden states _A = res_hidden_states_tuple[-1] _A = res_hidden_states_tuple[:-1] _A = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 ) _A = resnet(_UpperCAmelCase , _UpperCAmelCase , deterministic=_UpperCAmelCase ) if self.add_upsample: _A = self.upsamplers_a(_UpperCAmelCase ) return hidden_states class lowercase_ ( nn.Module ): '''simple docstring''' UpperCAmelCase : int UpperCAmelCase : float = 0.0 UpperCAmelCase : int = 1 UpperCAmelCase : int = 1 UpperCAmelCase : bool = False UpperCAmelCase : bool = False UpperCAmelCase : jnp.dtype = jnp.floataa def lowerCAmelCase_ ( self : Dict ): # there is always at least one resnet _A = [ FlaxResnetBlockaD( in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , ) ] _A = [] for _ in range(self.num_layers ): _A = FlaxTransformeraDModel( in_channels=self.in_channels , n_heads=self.num_attention_heads , d_head=self.in_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) attentions.append(_UpperCAmelCase ) _A = FlaxResnetBlockaD( in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(_UpperCAmelCase ) _A = resnets _A = attentions def __call__( self : str , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : str , _UpperCAmelCase : Optional[int]=True ): _A = self.resnets[0](_UpperCAmelCase , _UpperCAmelCase ) for attn, resnet in zip(self.attentions , self.resnets[1:] ): _A = attn(_UpperCAmelCase , _UpperCAmelCase , deterministic=_UpperCAmelCase ) _A = resnet(_UpperCAmelCase , _UpperCAmelCase , deterministic=_UpperCAmelCase ) return hidden_states
315
0
import numpy class _SCREAMING_SNAKE_CASE : '''simple docstring''' def __init__(self : Dict , UpperCAmelCase_ : numpy.ndarray , UpperCAmelCase_ : numpy.ndarray) ->Tuple: '''simple docstring''' lowerCamelCase__: Optional[int] =input_array # Random initial weights are assigned where first argument is the # number of nodes in previous layer and second argument is the # number of nodes in the next layer. # Random initial weights are assigned. # self.input_array.shape[1] is used to represent number of nodes in input layer. # First hidden layer consists of 4 nodes. lowerCamelCase__: List[Any] =numpy.random.rand( self.input_array.shape[1] , 4) # Random initial values for the first hidden layer. # First hidden layer has 4 nodes. # Second hidden layer has 3 nodes. lowerCamelCase__: Union[str, Any] =numpy.random.rand( 4 , 3) # Random initial values for the second hidden layer. # Second hidden layer has 3 nodes. # Output layer has 1 node. lowerCamelCase__: List[str] =numpy.random.rand(3 , 1) # Real output values provided. lowerCamelCase__: Dict =output_array # Predicted output values by the neural network. # Predicted_output array initially consists of zeroes. lowerCamelCase__: Union[str, Any] =numpy.zeros(output_array.shape) def SCREAMING_SNAKE_CASE_ (self : List[str]) ->Optional[Any]: '''simple docstring''' lowerCamelCase__: Any =sigmoid( numpy.dot(self.input_array , self.input_layer_and_first_hidden_layer_weights)) # layer_between_first_hidden_layer_and_second_hidden_layer is the layer # connecting the first hidden set of nodes with the second hidden set of nodes. lowerCamelCase__: Optional[Any] =sigmoid( numpy.dot( self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , )) # layer_between_second_hidden_layer_and_output is the layer connecting # second hidden layer with the output node. lowerCamelCase__: Tuple =sigmoid( numpy.dot( self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , )) return self.layer_between_second_hidden_layer_and_output def SCREAMING_SNAKE_CASE_ (self : Optional[int]) ->str: '''simple docstring''' lowerCamelCase__: List[str] =numpy.dot( self.layer_between_first_hidden_layer_and_second_hidden_layer.T , 2 * (self.output_array - self.predicted_output) * sigmoid_derivative(self.predicted_output) , ) lowerCamelCase__: Dict =numpy.dot( self.layer_between_input_and_first_hidden_layer.T , numpy.dot( 2 * (self.output_array - self.predicted_output) * sigmoid_derivative(self.predicted_output) , self.second_hidden_layer_and_output_layer_weights.T , ) * sigmoid_derivative( self.layer_between_first_hidden_layer_and_second_hidden_layer) , ) lowerCamelCase__: Dict =numpy.dot( self.input_array.T , numpy.dot( numpy.dot( 2 * (self.output_array - self.predicted_output) * sigmoid_derivative(self.predicted_output) , self.second_hidden_layer_and_output_layer_weights.T , ) * sigmoid_derivative( self.layer_between_first_hidden_layer_and_second_hidden_layer) , self.first_hidden_layer_and_second_hidden_layer_weights.T , ) * sigmoid_derivative(self.layer_between_input_and_first_hidden_layer) , ) self.input_layer_and_first_hidden_layer_weights += ( updated_input_layer_and_first_hidden_layer_weights ) self.first_hidden_layer_and_second_hidden_layer_weights += ( updated_first_hidden_layer_and_second_hidden_layer_weights ) self.second_hidden_layer_and_output_layer_weights += ( updated_second_hidden_layer_and_output_layer_weights ) def SCREAMING_SNAKE_CASE_ (self : Any , UpperCAmelCase_ : numpy.ndarray , UpperCAmelCase_ : int , UpperCAmelCase_ : bool) ->List[Any]: '''simple docstring''' for iteration in range(1 , iterations + 1): lowerCamelCase__: Union[str, Any] =self.feedforward() self.back_propagation() if give_loss: lowerCamelCase__: str =numpy.mean(numpy.square(output - self.feedforward())) print(F"""Iteration {iteration} Loss: {loss}""") def SCREAMING_SNAKE_CASE_ (self : Optional[int] , UpperCAmelCase_ : numpy.ndarray) ->Union[str, Any]: '''simple docstring''' lowerCamelCase__: Optional[Any] =input_arr lowerCamelCase__: Any =sigmoid( numpy.dot(self.array , self.input_layer_and_first_hidden_layer_weights)) lowerCamelCase__: str =sigmoid( numpy.dot( self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , )) lowerCamelCase__: Dict =sigmoid( numpy.dot( self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , )) return int(self.layer_between_second_hidden_layer_and_output > 0.6) def lowerCAmelCase_ ( __a ) -> numpy.ndarray: """simple docstring""" return 1 / (1 + numpy.exp(-value )) def lowerCAmelCase_ ( __a ) -> numpy.ndarray: """simple docstring""" return (value) * (1 - (value)) def lowerCAmelCase_ ( ) -> int: """simple docstring""" lowerCamelCase__: Dict =numpy.array( ( [0, 0, 0], [0, 0, 1], [0, 1, 0], [0, 1, 1], [1, 0, 0], [1, 0, 1], [1, 1, 0], [1, 1, 1], ) , dtype=numpy.floataa , ) # True output values for the given input values. lowerCamelCase__: Union[str, Any] =numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]) , dtype=numpy.floataa ) # Calling neural network class. lowerCamelCase__: int =TwoHiddenLayerNeuralNetwork( input_array=_snake_case , output_array=_snake_case ) # Calling training function. # Set give_loss to True if you want to see loss in every iteration. neural_network.train(output=_snake_case , iterations=10 , give_loss=_snake_case ) return neural_network.predict(numpy.array(([1, 1, 1]) , dtype=numpy.floataa ) ) if __name__ == "__main__": example()
10
"""simple docstring""" import numpy class lowercase_ : '''simple docstring''' def __init__( self : Dict , _UpperCAmelCase : numpy.ndarray , _UpperCAmelCase : numpy.ndarray ): _A = input_array # Random initial weights are assigned where first argument is the # number of nodes in previous layer and second argument is the # number of nodes in the next layer. # Random initial weights are assigned. # self.input_array.shape[1] is used to represent number of nodes in input layer. # First hidden layer consists of 4 nodes. _A = numpy.random.rand( self.input_array.shape[1] , 4 ) # Random initial values for the first hidden layer. # First hidden layer has 4 nodes. # Second hidden layer has 3 nodes. _A = numpy.random.rand( 4 , 3 ) # Random initial values for the second hidden layer. # Second hidden layer has 3 nodes. # Output layer has 1 node. _A = numpy.random.rand(3 , 1 ) # Real output values provided. _A = output_array # Predicted output values by the neural network. # Predicted_output array initially consists of zeroes. _A = numpy.zeros(output_array.shape ) def lowerCAmelCase_ ( self : List[str] ): _A = sigmoid( numpy.dot(self.input_array , self.input_layer_and_first_hidden_layer_weights ) ) # layer_between_first_hidden_layer_and_second_hidden_layer is the layer # connecting the first hidden set of nodes with the second hidden set of nodes. _A = sigmoid( numpy.dot( self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) ) # layer_between_second_hidden_layer_and_output is the layer connecting # second hidden layer with the output node. _A = sigmoid( numpy.dot( self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) ) return self.layer_between_second_hidden_layer_and_output def lowerCAmelCase_ ( self : Optional[int] ): _A = numpy.dot( self.layer_between_first_hidden_layer_and_second_hidden_layer.T , 2 * (self.output_array - self.predicted_output) * sigmoid_derivative(self.predicted_output ) , ) _A = numpy.dot( self.layer_between_input_and_first_hidden_layer.T , numpy.dot( 2 * (self.output_array - self.predicted_output) * sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , ) * sigmoid_derivative( self.layer_between_first_hidden_layer_and_second_hidden_layer ) , ) _A = numpy.dot( self.input_array.T , numpy.dot( numpy.dot( 2 * (self.output_array - self.predicted_output) * sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , ) * sigmoid_derivative( self.layer_between_first_hidden_layer_and_second_hidden_layer ) , self.first_hidden_layer_and_second_hidden_layer_weights.T , ) * sigmoid_derivative(self.layer_between_input_and_first_hidden_layer ) , ) self.input_layer_and_first_hidden_layer_weights += ( updated_input_layer_and_first_hidden_layer_weights ) self.first_hidden_layer_and_second_hidden_layer_weights += ( updated_first_hidden_layer_and_second_hidden_layer_weights ) self.second_hidden_layer_and_output_layer_weights += ( updated_second_hidden_layer_and_output_layer_weights ) def lowerCAmelCase_ ( self : Any , _UpperCAmelCase : numpy.ndarray , _UpperCAmelCase : int , _UpperCAmelCase : bool ): for iteration in range(1 , iterations + 1 ): _A = self.feedforward() self.back_propagation() if give_loss: _A = numpy.mean(numpy.square(output - self.feedforward() ) ) print(F'''Iteration {iteration} Loss: {loss}''' ) def lowerCAmelCase_ ( self : Optional[int] , _UpperCAmelCase : numpy.ndarray ): _A = input_arr _A = sigmoid( numpy.dot(self.array , self.input_layer_and_first_hidden_layer_weights ) ) _A = sigmoid( numpy.dot( self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) ) _A = sigmoid( numpy.dot( self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) ) return int(self.layer_between_second_hidden_layer_and_output > 0.6 ) def _snake_case ( _snake_case : numpy.ndarray ) -> numpy.ndarray: '''simple docstring''' return 1 / (1 + numpy.exp(-value )) def _snake_case ( _snake_case : numpy.ndarray ) -> numpy.ndarray: '''simple docstring''' return (value) * (1 - (value)) def _snake_case ( ) -> int: '''simple docstring''' _A = numpy.array( ( [0, 0, 0], [0, 0, 1], [0, 1, 0], [0, 1, 1], [1, 0, 0], [1, 0, 1], [1, 1, 0], [1, 1, 1], ) , dtype=numpy.floataa , ) # True output values for the given input values. _A = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]) , dtype=numpy.floataa ) # Calling neural network class. _A = TwoHiddenLayerNeuralNetwork( input_array=_snake_case , output_array=_snake_case ) # Calling training function. # Set give_loss to True if you want to see loss in every iteration. neural_network.train(output=_snake_case , iterations=10 , give_loss=_snake_case ) return neural_network.predict(numpy.array(([1, 1, 1]) , dtype=numpy.floataa ) ) if __name__ == "__main__": example()
315
0
import warnings from typing import List, Optional, Tuple, Union import numpy as np import PIL import torch from ...models import UNetaDModel from ...schedulers import RePaintScheduler from ...utils import PIL_INTERPOLATION, logging, randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput __UpperCamelCase : str = logging.get_logger(__name__) # pylint: disable=invalid-name def _a ( SCREAMING_SNAKE_CASE : Union[List, PIL.Image.Image, torch.Tensor] ): """simple docstring""" warnings.warn( '''The preprocess method is deprecated and will be removed in a future version. Please''' ''' use VaeImageProcessor.preprocess instead''' , _snake_case , ) if isinstance(_snake_case , torch.Tensor ): return image elif isinstance(_snake_case , PIL.Image.Image ): UpperCamelCase__ : Optional[int] = [image] if isinstance(image[0] , PIL.Image.Image ): UpperCamelCase__ , UpperCamelCase__ : List[str] = image[0].size UpperCamelCase__ , UpperCamelCase__ : Union[str, Any] = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8 UpperCamelCase__ : str = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION['''lanczos'''] ) )[None, :] for i in image] UpperCamelCase__ : List[Any] = np.concatenate(_snake_case , axis=0 ) UpperCamelCase__ : str = np.array(_snake_case ).astype(np.floataa ) / 255.0 UpperCamelCase__ : Dict = image.transpose(0 , 3 , 1 , 2 ) UpperCamelCase__ : int = 2.0 * image - 1.0 UpperCamelCase__ : Union[str, Any] = torch.from_numpy(_snake_case ) elif isinstance(image[0] , torch.Tensor ): UpperCamelCase__ : Union[str, Any] = torch.cat(_snake_case , dim=0 ) return image def _a ( SCREAMING_SNAKE_CASE : Union[List, PIL.Image.Image, torch.Tensor] ): """simple docstring""" if isinstance(_snake_case , torch.Tensor ): return mask elif isinstance(_snake_case , PIL.Image.Image ): UpperCamelCase__ : int = [mask] if isinstance(mask[0] , PIL.Image.Image ): UpperCamelCase__ , UpperCamelCase__ : str = mask[0].size UpperCamelCase__ , UpperCamelCase__ : List[Any] = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32 UpperCamelCase__ : List[str] = [np.array(m.convert('''L''' ).resize((w, h) , resample=PIL_INTERPOLATION['''nearest'''] ) )[None, :] for m in mask] UpperCamelCase__ : Any = np.concatenate(_snake_case , axis=0 ) UpperCamelCase__ : Tuple = mask.astype(np.floataa ) / 255.0 UpperCamelCase__ : List[Any] = 0 UpperCamelCase__ : str = 1 UpperCamelCase__ : Optional[Any] = torch.from_numpy(_snake_case ) elif isinstance(mask[0] , torch.Tensor ): UpperCamelCase__ : List[Any] = torch.cat(_snake_case , dim=0 ) return mask class __magic_name__ ( __lowerCAmelCase): A: UNetaDModel A: RePaintScheduler def __init__( self : Optional[int] , lowerCamelCase__ : str , lowerCamelCase__ : Dict ) -> Any: '''simple docstring''' super().__init__() self.register_modules(unet=_UpperCAmelCase , scheduler=_UpperCAmelCase ) @torch.no_grad() def __call__( self : List[Any] , lowerCamelCase__ : Union[torch.Tensor, PIL.Image.Image] , lowerCamelCase__ : Union[torch.Tensor, PIL.Image.Image] , lowerCamelCase__ : int = 250 , lowerCamelCase__ : float = 0.0 , lowerCamelCase__ : int = 10 , lowerCamelCase__ : int = 10 , lowerCamelCase__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowerCamelCase__ : Optional[str] = "pil" , lowerCamelCase__ : bool = True , ) -> List[str]: '''simple docstring''' UpperCamelCase__ : Tuple = image UpperCamelCase__ : Dict = _preprocess_image(_UpperCAmelCase ) UpperCamelCase__ : Dict = original_image.to(device=self.device , dtype=self.unet.dtype ) UpperCamelCase__ : Tuple = _preprocess_mask(_UpperCAmelCase ) UpperCamelCase__ : Tuple = mask_image.to(device=self.device , dtype=self.unet.dtype ) UpperCamelCase__ : Dict = original_image.shape[0] # sample gaussian noise to begin the loop if isinstance(_UpperCAmelCase , _UpperCAmelCase ) and len(_UpperCAmelCase ) != batch_size: raise ValueError( F"You have passed a list of generators of length {len(_UpperCAmelCase )}, but requested an effective batch" F" size of {batch_size}. Make sure the batch size matches the length of the generators." ) UpperCamelCase__ : Dict = original_image.shape UpperCamelCase__ : List[Any] = randn_tensor(_UpperCAmelCase , generator=_UpperCAmelCase , device=self.device , dtype=self.unet.dtype ) # set step values self.scheduler.set_timesteps(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , self.device ) UpperCamelCase__ : Dict = eta UpperCamelCase__ : List[str] = self.scheduler.timesteps[0] + 1 UpperCamelCase__ : Dict = generator[0] if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else generator for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ): if t < t_last: # predict the noise residual UpperCamelCase__ : Union[str, Any] = self.unet(_UpperCAmelCase , _UpperCAmelCase ).sample # compute previous image: x_t -> x_t-1 UpperCamelCase__ : List[Any] = self.scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ).prev_sample else: # compute the reverse: x_t-1 -> x_t UpperCamelCase__ : str = self.scheduler.undo_step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) UpperCamelCase__ : Optional[Any] = t UpperCamelCase__ : Tuple = (image / 2 + 0.5).clamp(0 , 1 ) UpperCamelCase__ : int = image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": UpperCamelCase__ : Any = self.numpy_to_pil(_UpperCAmelCase ) if not return_dict: return (image,) return ImagePipelineOutput(images=_UpperCAmelCase )
146
"""simple docstring""" from __future__ import annotations from collections.abc import Iterator from typing import Generic, TypeVar a = TypeVar('''T''') class lowercase_ ( Generic[T] ): '''simple docstring''' def __init__( self : Any , _UpperCAmelCase : T ): _A = data _A = None def __str__( self : str ): return F'''{self.data}''' class lowercase_ ( Generic[T] ): '''simple docstring''' def __init__( self : Tuple ): _A = None def __iter__( self : List[Any] ): _A = self.top while node: yield node.data _A = node.next def __str__( self : Union[str, Any] ): return "->".join([str(_UpperCAmelCase ) for item in self] ) def __len__( self : List[Any] ): return len(tuple(iter(self ) ) ) def lowerCAmelCase_ ( self : str ): return self.top is None def lowerCAmelCase_ ( self : Union[str, Any] , _UpperCAmelCase : T ): _A = Node(_UpperCAmelCase ) if not self.is_empty(): _A = self.top _A = node def lowerCAmelCase_ ( self : Dict ): if self.is_empty(): raise IndexError('pop from empty stack' ) assert isinstance(self.top , _UpperCAmelCase ) _A = self.top _A = self.top.next return pop_node.data def lowerCAmelCase_ ( self : Tuple ): if self.is_empty(): raise IndexError('peek from empty stack' ) assert self.top is not None return self.top.data def lowerCAmelCase_ ( self : Optional[Any] ): _A = None if __name__ == "__main__": from doctest import testmod testmod()
315
0
"""simple docstring""" from __future__ import annotations import math from collections import Counter from string import ascii_lowercase def a__ ( SCREAMING_SNAKE_CASE : str ): '''simple docstring''' lowerCAmelCase , lowerCAmelCase : str = analyze_text(_snake_case ) lowerCAmelCase : Optional[Any] = list(" " + ascii_lowercase ) # what is our total sum of probabilities. lowerCAmelCase : Any = sum(single_char_strings.values() ) # one length string lowerCAmelCase : List[str] = 0 # for each alpha we go in our dict and if it is in it we calculate entropy for ch in my_alphas: if ch in single_char_strings: lowerCAmelCase : Tuple = single_char_strings[ch] lowerCAmelCase : Tuple = my_str / all_sum my_fir_sum += prob * math.loga(_snake_case ) # entropy formula. # print entropy print(f"""{round(-1 * my_fir_sum ):.1f}""" ) # two len string lowerCAmelCase : Optional[int] = sum(two_char_strings.values() ) lowerCAmelCase : int = 0 # for each alpha (two in size) calculate entropy. for cha in my_alphas: for cha in my_alphas: lowerCAmelCase : Dict = cha + cha if sequence in two_char_strings: lowerCAmelCase : List[Any] = two_char_strings[sequence] lowerCAmelCase : List[str] = int(_snake_case ) / all_sum my_sec_sum += prob * math.loga(_snake_case ) # print second entropy print(f"""{round(-1 * my_sec_sum ):.1f}""" ) # print the difference between them print(f"""{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}""" ) def a__ ( SCREAMING_SNAKE_CASE : str ): '''simple docstring''' lowerCAmelCase : Optional[Any] = Counter() # type: ignore lowerCAmelCase : List[Any] = Counter() # type: ignore single_char_strings[text[-1]] += 1 # first case when we have space at start. two_char_strings[" " + text[0]] += 1 for i in range(0 , len(_snake_case ) - 1 ): single_char_strings[text[i]] += 1 two_char_strings[text[i : i + 2]] += 1 return single_char_strings, two_char_strings def a__ ( ): '''simple docstring''' import doctest doctest.testmod() # text = ( # "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark " # "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest " # "jointure saw horrible. He private he on be imagine suppose. Fertile " # "beloved evident through no service elderly is. Blind there if every no so " # "at. Own neglected you preferred way sincerity delivered his attempted. To " # "of message cottage windows do besides against uncivil. Delightful " # "unreserved impossible few estimating men favourable see entreaties. She " # "propriety immediate was improving. He or entrance humoured likewise " # "moderate. Much nor game son say feel. Fat make met can must form into " # "gate. Me we offending prevailed discovery. " # ) # calculate_prob(text) if __name__ == "__main__": main()
108
"""simple docstring""" import warnings from ...utils import logging from .image_processing_imagegpt import ImageGPTImageProcessor a = logging.get_logger(__name__) class lowercase_ ( __lowerCAmelCase ): '''simple docstring''' def __init__( self : Any , *_UpperCAmelCase : List[str] , **_UpperCAmelCase : Union[str, Any] ): warnings.warn( 'The class ImageGPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.' ' Please use ImageGPTImageProcessor instead.' , _UpperCAmelCase , ) super().__init__(*_UpperCAmelCase , **_UpperCAmelCase )
315
0
import unicodedata from dataclasses import dataclass from typing import Optional, Union import numpy as np from transformers.data.data_collator import DataCollatorMixin from transformers.file_utils import PaddingStrategy from transformers.tokenization_utils_base import PreTrainedTokenizerBase def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): """simple docstring""" if isinstance(_snake_case , _snake_case ): lowercase__ : str = np.full((len(_snake_case ), sequence_length, 2) , _snake_case ) else: lowercase__ : Any = np.full((len(_snake_case ), sequence_length) , _snake_case ) for i, tensor in enumerate(_snake_case ): if padding_side == "right": if isinstance(_snake_case , _snake_case ): lowercase__ : Any = tensor[:sequence_length] else: lowercase__ : Tuple = tensor[:sequence_length] else: if isinstance(_snake_case , _snake_case ): lowercase__ : int = tensor[:sequence_length] else: lowercase__ : Any = tensor[:sequence_length] return out_tensor.tolist() def __lowerCamelCase ( lowerCamelCase__ ): """simple docstring""" lowercase__ : Any = ord(_snake_case ) if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126): return True lowercase__ : List[str] = unicodedata.category(_snake_case ) if cat.startswith("P" ): return True return False @dataclass class snake_case__(__lowerCAmelCase ): """simple docstring""" lowercase_ = 42 lowercase_ = True lowercase_ = None lowercase_ = None lowercase_ = -1_0_0 lowercase_ = "pt" def snake_case ( self : Optional[Any] , SCREAMING_SNAKE_CASE : List[Any] ): import torch lowercase__ : str = "label" if "label" in features[0].keys() else "labels" lowercase__ : Optional[int] = [feature[label_name] for feature in features] if label_name in features[0].keys() else None lowercase__ : Union[str, Any] = self.tokenizer.pad( _UpperCAmelCase , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="pt" if labels is None else None , ) if labels is None: return batch lowercase__ : Optional[int] = torch.tensor(batch["entity_ids"] ).shape[1] lowercase__ : str = self.tokenizer.padding_side if padding_side == "right": lowercase__ : Tuple = [ list(_UpperCAmelCase ) + [self.label_pad_token_id] * (sequence_length - len(_UpperCAmelCase )) for label in labels ] else: lowercase__ : str = [ [self.label_pad_token_id] * (sequence_length - len(_UpperCAmelCase )) + list(_UpperCAmelCase ) for label in labels ] lowercase__ : List[str] = [feature["ner_tags"] for feature in features] lowercase__ : Any = padding_tensor(_UpperCAmelCase , -1 , _UpperCAmelCase , _UpperCAmelCase ) lowercase__ : str = [feature["original_entity_spans"] for feature in features] lowercase__ : Any = padding_tensor(_UpperCAmelCase , (-1, -1) , _UpperCAmelCase , _UpperCAmelCase ) lowercase__ : Dict = {k: torch.tensor(_UpperCAmelCase , dtype=torch.intaa ) for k, v in batch.items()} return batch
130
"""simple docstring""" from __future__ import annotations import collections import pprint from pathlib import Path def _snake_case ( _snake_case : str ) -> str: '''simple docstring''' return "".join(sorted(_snake_case ) ) def _snake_case ( _snake_case : str ) -> list[str]: '''simple docstring''' return word_by_signature[signature(_snake_case )] a = Path(__file__).parent.joinpath('''words.txt''').read_text(encoding='''utf-8''') a = sorted({word.strip().lower() for word in data.splitlines()}) a = collections.defaultdict(list) for word in word_list: word_by_signature[signature(word)].append(word) if __name__ == "__main__": a = {word: anagram(word) for word in word_list if len(anagram(word)) > 1} with open('''anagrams.txt''', '''w''') as file: file.write('''all_anagrams = \n ''') file.write(pprint.pformat(all_anagrams))
315
0
from __future__ import annotations import random # Maximum size of the population. Bigger could be faster but is more memory expensive. __lowerCamelCase : Optional[int] = 200 # Number of elements selected in every generation of evolution. The selection takes # place from best to worst of that generation and must be smaller than N_POPULATION. __lowerCamelCase : Any = 50 # Probability that an element of a generation can mutate, changing one of its genes. # This will guarantee that all genes will be used during evolution. __lowerCamelCase : Optional[int] = 0.4 # Just a seed to improve randomness required by the algorithm. random.seed(random.randint(0, 1000)) def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> tuple[str, float]: UpperCamelCase : str = len([g for position, g in enumerate(_snake_case ) if g == main_target[position]] ) return (item, float(_snake_case )) def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> tuple[str, str]: UpperCamelCase : str = random.randint(0 , len(_snake_case ) - 1 ) UpperCamelCase : Union[str, Any] = parent_a[:random_slice] + parent_a[random_slice:] UpperCamelCase : Tuple = parent_a[:random_slice] + parent_a[random_slice:] return (child_a, child_a) def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> str: UpperCamelCase : Optional[Any] = list(_snake_case ) if random.uniform(0 , 1 ) < MUTATION_PROBABILITY: UpperCamelCase : int = random.choice(_snake_case ) return "".join(_snake_case ) def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , ) -> list[str]: UpperCamelCase : Any = [] # Generate more children proportionally to the fitness score. UpperCamelCase : Optional[Any] = int(parent_a[1] * 100 ) + 1 UpperCamelCase : List[str] = 10 if child_n >= 10 else child_n for _ in range(_snake_case ): UpperCamelCase : Union[str, Any] = population_score[random.randint(0 , _snake_case )][0] UpperCamelCase , UpperCamelCase : int = crossover(parent_a[0] , _snake_case ) # Append new string to the population list. pop.append(mutate(_snake_case , _snake_case ) ) pop.append(mutate(_snake_case , _snake_case ) ) return pop def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = True ) -> tuple[int, int, str]: if N_POPULATION < N_SELECTED: UpperCamelCase : Dict = F"""{N_POPULATION} must be bigger than {N_SELECTED}""" raise ValueError(_snake_case ) # Verify that the target contains no genes besides the ones inside genes variable. UpperCamelCase : Optional[Any] = sorted({c for c in target if c not in genes} ) if not_in_genes_list: UpperCamelCase : List[Any] = F"""{not_in_genes_list} is not in genes list, evolution cannot converge""" raise ValueError(_snake_case ) # Generate random starting population. UpperCamelCase : List[str] = [] for _ in range(_snake_case ): population.append("".join([random.choice(_snake_case ) for i in range(len(_snake_case ) )] ) ) # Just some logs to know what the algorithms is doing. UpperCamelCase , UpperCamelCase : Optional[Any] = 0, 0 # This loop will end when we find a perfect match for our target. while True: generation += 1 total_population += len(_snake_case ) # Random population created. Now it's time to evaluate. # Adding a bit of concurrency can make everything faster, # # import concurrent.futures # population_score: list[tuple[str, float]] = [] # with concurrent.futures.ThreadPoolExecutor( # max_workers=NUM_WORKERS) as executor: # futures = {executor.submit(evaluate, item) for item in population} # concurrent.futures.wait(futures) # population_score = [item.result() for item in futures] # # but with a simple algorithm like this, it will probably be slower. # We just need to call evaluate for every item inside the population. UpperCamelCase : Optional[Any] = [evaluate(_snake_case , _snake_case ) for item in population] # Check if there is a matching evolution. UpperCamelCase : Dict = sorted(_snake_case , key=lambda _lowerCAmelCase : x[1] , reverse=_snake_case ) if population_score[0][0] == target: return (generation, total_population, population_score[0][0]) # Print the best result every 10 generation. # Just to know that the algorithm is working. if debug and generation % 10 == 0: print( F"""\nGeneration: {generation}""" F"""\nTotal Population:{total_population}""" F"""\nBest score: {population_score[0][1]}""" F"""\nBest string: {population_score[0][0]}""" ) # Flush the old population, keeping some of the best evolutions. # Keeping this avoid regression of evolution. UpperCamelCase : Optional[Any] = population[: int(N_POPULATION / 3 )] population.clear() population.extend(_snake_case ) # Normalize population score to be between 0 and 1. UpperCamelCase : Dict = [ (item, score / len(_snake_case )) for item, score in population_score ] # This is selection for i in range(_snake_case ): population.extend(select(population_score[int(_snake_case )] , _snake_case , _snake_case ) ) # Check if the population has already reached the maximum value and if so, # break the cycle. If this check is disabled, the algorithm will take # forever to compute large strings, but will also calculate small strings in # a far fewer generations. if len(_snake_case ) > N_POPULATION: break if __name__ == "__main__": __lowerCamelCase : List[Any] = ( """This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!""" ) __lowerCamelCase : List[Any] = list( """ ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm""" """nopqrstuvwxyz.,;!?+-*#@^\'èéòà€ù=)(&%$£/\\""" ) __lowerCamelCase , __lowerCamelCase , __lowerCamelCase : str = basic(target_str, genes_list) print( f"""\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}""" )
52
"""simple docstring""" import logging import os import sys from dataclasses import dataclass, field from typing import Optional import torch from datasets import load_dataset from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor from torchvision.transforms.functional import InterpolationMode import transformers from transformers import ( HfArgumentParser, Trainer, TrainingArguments, ViTImageProcessor, ViTMAEConfig, ViTMAEForPreTraining, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version a = logging.getLogger(__name__) # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version('''4.31.0''') require_version('''datasets>=1.8.0''', '''To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt''') @dataclass class lowercase_ : '''simple docstring''' UpperCAmelCase : Optional[str] = field( default='''cifar10''' , metadata={'''help''': '''Name of a dataset from the datasets package'''} ) UpperCAmelCase : Optional[str] = field( default=__lowerCAmelCase , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} ) UpperCAmelCase : Optional[str] = field( default=__lowerCAmelCase , metadata={'''help''': '''The column name of the images in the files.'''} ) UpperCAmelCase : Optional[str] = field(default=__lowerCAmelCase , metadata={'''help''': '''A folder containing the training data.'''} ) UpperCAmelCase : Optional[str] = field(default=__lowerCAmelCase , metadata={'''help''': '''A folder containing the validation data.'''} ) UpperCAmelCase : Optional[float] = field( default=0.15 , metadata={'''help''': '''Percent to split off of train for validation.'''} ) UpperCAmelCase : Optional[int] = field( default=__lowerCAmelCase , metadata={ '''help''': ( '''For debugging purposes or quicker training, truncate the number of training examples to this ''' '''value if set.''' ) } , ) UpperCAmelCase : Optional[int] = field( default=__lowerCAmelCase , metadata={ '''help''': ( '''For debugging purposes or quicker training, truncate the number of evaluation examples to this ''' '''value if set.''' ) } , ) def lowerCAmelCase_ ( self : Dict ): _A = {} if self.train_dir is not None: _A = self.train_dir if self.validation_dir is not None: _A = self.validation_dir _A = data_files if data_files else None @dataclass class lowercase_ : '''simple docstring''' UpperCAmelCase : str = field( default=__lowerCAmelCase , metadata={ '''help''': ( '''The model checkpoint for weights initialization.Don\'t set if you want to train a model from scratch.''' ) } , ) UpperCAmelCase : Optional[str] = field( default=__lowerCAmelCase , metadata={'''help''': '''Pretrained config name or path if not the same as model_name_or_path'''} ) UpperCAmelCase : Optional[str] = field( default=__lowerCAmelCase , metadata={ '''help''': ( '''Override some existing default config settings when a model is trained from scratch. Example: ''' '''n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index''' ) } , ) UpperCAmelCase : Optional[str] = field( default=__lowerCAmelCase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from s3'''} ) UpperCAmelCase : str = field( default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , ) UpperCAmelCase : str = field(default=__lowerCAmelCase , metadata={'''help''': '''Name or path of preprocessor config.'''} ) UpperCAmelCase : bool = field( default=__lowerCAmelCase , metadata={ '''help''': ( '''Will use the token generated when running `huggingface-cli login` (necessary to use this script ''' '''with private models).''' ) } , ) UpperCAmelCase : float = field( default=0.75 , metadata={'''help''': '''The ratio of the number of masked tokens in the input sequence.'''} ) UpperCAmelCase : bool = field( default=__lowerCAmelCase , metadata={'''help''': '''Whether or not to train with normalized pixel values as target.'''} ) @dataclass class lowercase_ ( __lowerCAmelCase ): '''simple docstring''' UpperCAmelCase : float = field( default=1E-3 , metadata={'''help''': '''Base learning rate: absolute_lr = base_lr * total_batch_size / 256.'''} ) def _snake_case ( _snake_case : int ) -> Optional[int]: '''simple docstring''' _A = torch.stack([example['pixel_values'] for example in examples] ) return {"pixel_values": pixel_values} def _snake_case ( ) -> List[str]: '''simple docstring''' _A = HfArgumentParser((ModelArguments, DataTrainingArguments, CustomTrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. _A , _A , _A = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: _A , _A , _A = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry('run_mae' , _snake_case , _snake_case ) # Setup logging logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() _A = training_args.get_process_log_level() logger.setLevel(_snake_case ) transformers.utils.logging.set_verbosity(_snake_case ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}''' + F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' ) logger.info(F'''Training/evaluation parameters {training_args}''' ) # Detecting last checkpoint. _A = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: _A = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( F'''Output directory ({training_args.output_dir}) already exists and is not empty. ''' 'Use --overwrite_output_dir to overcome.' ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change ''' 'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' ) # Initialize our dataset. _A = load_dataset( data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) # If we don't have a validation split, split off a percentage of train as validation. _A = None if 'validation' in ds.keys() else data_args.train_val_split if isinstance(data_args.train_val_split , _snake_case ) and data_args.train_val_split > 0.0: _A = ds['train'].train_test_split(data_args.train_val_split ) _A = split['train'] _A = split['test'] # Load pretrained model and image processor # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. _A = { 'cache_dir': model_args.cache_dir, 'revision': model_args.model_revision, 'use_auth_token': True if model_args.use_auth_token else None, } if model_args.config_name: _A = ViTMAEConfig.from_pretrained(model_args.config_name , **_snake_case ) elif model_args.model_name_or_path: _A = ViTMAEConfig.from_pretrained(model_args.model_name_or_path , **_snake_case ) else: _A = ViTMAEConfig() logger.warning('You are instantiating a new config instance from scratch.' ) if model_args.config_overrides is not None: logger.info(F'''Overriding config: {model_args.config_overrides}''' ) config.update_from_string(model_args.config_overrides ) logger.info(F'''New config: {config}''' ) # adapt config config.update( { 'mask_ratio': model_args.mask_ratio, 'norm_pix_loss': model_args.norm_pix_loss, } ) # create image processor if model_args.image_processor_name: _A = ViTImageProcessor.from_pretrained(model_args.image_processor_name , **_snake_case ) elif model_args.model_name_or_path: _A = ViTImageProcessor.from_pretrained(model_args.model_name_or_path , **_snake_case ) else: _A = ViTImageProcessor() # create model if model_args.model_name_or_path: _A = ViTMAEForPreTraining.from_pretrained( model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=_snake_case , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) else: logger.info('Training new model from scratch' ) _A = ViTMAEForPreTraining(_snake_case ) if training_args.do_train: _A = ds['train'].column_names else: _A = ds['validation'].column_names if data_args.image_column_name is not None: _A = data_args.image_column_name elif "image" in column_names: _A = 'image' elif "img" in column_names: _A = 'img' else: _A = column_names[0] # transformations as done in original MAE paper # source: https://github.com/facebookresearch/mae/blob/main/main_pretrain.py if "shortest_edge" in image_processor.size: _A = image_processor.size['shortest_edge'] else: _A = (image_processor.size['height'], image_processor.size['width']) _A = Compose( [ Lambda(lambda _snake_case : img.convert('RGB' ) if img.mode != "RGB" else img ), RandomResizedCrop(_snake_case , scale=(0.2, 1.0) , interpolation=InterpolationMode.BICUBIC ), RandomHorizontalFlip(), ToTensor(), Normalize(mean=image_processor.image_mean , std=image_processor.image_std ), ] ) def preprocess_images(_snake_case : List[Any] ): _A = [transforms(_snake_case ) for image in examples[image_column_name]] return examples if training_args.do_train: if "train" not in ds: raise ValueError('--do_train requires a train dataset' ) if data_args.max_train_samples is not None: _A = ds['train'].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) ) # Set the training transforms ds["train"].set_transform(_snake_case ) if training_args.do_eval: if "validation" not in ds: raise ValueError('--do_eval requires a validation dataset' ) if data_args.max_eval_samples is not None: _A = ( ds['validation'].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) ) ) # Set the validation transforms ds["validation"].set_transform(_snake_case ) # Compute absolute learning rate _A = ( training_args.train_batch_size * training_args.gradient_accumulation_steps * training_args.world_size ) if training_args.base_learning_rate is not None: _A = training_args.base_learning_rate * total_train_batch_size / 2_56 # Initialize our trainer _A = Trainer( model=_snake_case , args=_snake_case , train_dataset=ds['train'] if training_args.do_train else None , eval_dataset=ds['validation'] if training_args.do_eval else None , tokenizer=_snake_case , data_collator=_snake_case , ) # Training if training_args.do_train: _A = None if training_args.resume_from_checkpoint is not None: _A = training_args.resume_from_checkpoint elif last_checkpoint is not None: _A = last_checkpoint _A = trainer.train(resume_from_checkpoint=_snake_case ) trainer.save_model() trainer.log_metrics('train' , train_result.metrics ) trainer.save_metrics('train' , train_result.metrics ) trainer.save_state() # Evaluation if training_args.do_eval: _A = trainer.evaluate() trainer.log_metrics('eval' , _snake_case ) trainer.save_metrics('eval' , _snake_case ) # Write model card and (optionally) push to hub _A = { 'tasks': 'masked-auto-encoding', 'dataset': data_args.dataset_name, 'tags': ['masked-auto-encoding'], } if training_args.push_to_hub: trainer.push_to_hub(**_snake_case ) else: trainer.create_model_card(**_snake_case ) def _snake_case ( _snake_case : List[str] ) -> Optional[Any]: '''simple docstring''' main() if __name__ == "__main__": main()
315
0
"""simple docstring""" def UpperCamelCase ( UpperCAmelCase = 1_000 ) ->int: """simple docstring""" return sum(2 * a * ((a - 1) // 2) for a in range(3 , n + 1 ) ) if __name__ == "__main__": print(solution())
243
"""simple docstring""" import logging import re import pytorch_quantization import pytorch_quantization.nn as quant_nn import torch from pytorch_quantization import calib from pytorch_quantization.tensor_quant import QuantDescriptor a = logging.getLogger(__name__) a = 50 # max width of layer names a = 70 # max width of quantizer names def _snake_case ( _snake_case : int ) -> List[Any]: '''simple docstring''' _A = parser.add_argument_group('quant_trainer arguments' ) group.add_argument('--wprec' , type=_snake_case , default=8 , help='weight precision' ) group.add_argument('--aprec' , type=_snake_case , default=8 , help='activation precision' ) group.add_argument('--quant-per-tensor' , action='store_true' , help='per tensor weight scaling' ) group.add_argument('--quant-disable' , action='store_true' , help='disable all quantizers' ) group.add_argument('--quant-disable-embeddings' , action='store_true' , help='disable all embeddings quantizers' ) group.add_argument('--quant-disable-keyword' , type=_snake_case , nargs='+' , help='disable quantizers by keyword' ) group.add_argument('--quant-disable-layer-module' , type=_snake_case , help='disable quantizers by keyword under layer.' ) group.add_argument('--quant-enable-layer-module' , type=_snake_case , help='enable quantizers by keyword under layer' ) group.add_argument('--calibrator' , default='max' , help='which quantization range calibrator to use' ) group.add_argument('--percentile' , default=_snake_case , type=_snake_case , help='percentile for PercentileCalibrator' ) group.add_argument('--fuse-qkv' , action='store_true' , help='use the same scale factor for qkv' ) group.add_argument('--clip-gelu' , metavar='N' , type=_snake_case , help='clip gelu output maximum value to N' ) group.add_argument( '--recalibrate-weights' , action='store_true' , help=( 'recalibrate weight amaxes by taking the max of the weights.' ' amaxes will be computed with the current quantization granularity (axis).' ) , ) def _snake_case ( _snake_case : Dict ) -> Optional[Any]: '''simple docstring''' if args.calibrator == "max": _A = 'max' elif args.calibrator == "percentile": if args.percentile is None: raise ValueError('Specify --percentile when using percentile calibrator' ) _A = 'histogram' elif args.calibrator == "mse": _A = 'histogram' else: raise ValueError(F'''Invalid calibrator {args.calibrator}''' ) _A = QuantDescriptor(num_bits=args.aprec , calib_method=_snake_case ) _A = QuantDescriptor(num_bits=args.wprec , axis=(None if args.quant_per_tensor else (0,)) ) quant_nn.QuantLinear.set_default_quant_desc_input(_snake_case ) quant_nn.QuantLinear.set_default_quant_desc_weight(_snake_case ) def _snake_case ( _snake_case : Union[str, Any] , _snake_case : List[Any] , _snake_case : Any=False , _snake_case : Union[str, Any]=False ) -> Optional[int]: '''simple docstring''' logger.info('Configuring Model for Quantization' ) logger.info(F'''using quantization package {pytorch_quantization.__file__}''' ) if not calib: if args.quant_disable_embeddings: set_quantizer_by_name(_snake_case , ['embeddings'] , which='weight' , _disabled=_snake_case ) if args.quant_disable: set_quantizer_by_name(_snake_case , [''] , _disabled=_snake_case ) if args.quant_disable_keyword: set_quantizer_by_name(_snake_case , args.quant_disable_keyword , _disabled=_snake_case ) if args.quant_disable_layer_module: set_quantizer_by_name(_snake_case , [R'layer.\d+.' + args.quant_disable_layer_module] , _disabled=_snake_case ) if args.quant_enable_layer_module: set_quantizer_by_name(_snake_case , [R'layer.\d+.' + args.quant_enable_layer_module] , _disabled=_snake_case ) if args.recalibrate_weights: recalibrate_weights(_snake_case ) if args.fuse_qkv: fuse_qkv(_snake_case , _snake_case ) if args.clip_gelu: clip_gelu(_snake_case , args.clip_gelu ) # if args.local_rank in [-1, 0] and not calib: print_quant_summary(_snake_case ) def _snake_case ( _snake_case : str ) -> Any: '''simple docstring''' logger.info('Enabling Calibration' ) for name, module in model.named_modules(): if name.endswith('_quantizer' ): if module._calibrator is not None: module.disable_quant() module.enable_calib() else: module.disable() logger.info(F'''{name:80}: {module}''' ) def _snake_case ( _snake_case : List[Any] , _snake_case : List[Any] ) -> str: '''simple docstring''' logger.info('Loading calibrated amax' ) for name, module in model.named_modules(): if name.endswith('_quantizer' ): if module._calibrator is not None: if isinstance(module._calibrator , calib.MaxCalibrator ): module.load_calib_amax() else: module.load_calib_amax('percentile' , percentile=args.percentile ) module.enable_quant() module.disable_calib() else: module.enable() model.cuda() print_quant_summary(_snake_case ) def _snake_case ( _snake_case : str , _snake_case : int ) -> str: '''simple docstring''' def fusea(_snake_case : int , _snake_case : str , _snake_case : Optional[Any] ): for mod in [qq, qk, qv]: if not hasattr(_snake_case , '_amax' ): print(' WARNING: NO AMAX BUFFER' ) return _A = qq._amax.detach().item() _A = qk._amax.detach().item() _A = qv._amax.detach().item() _A = max(_snake_case , _snake_case , _snake_case ) qq._amax.fill_(_snake_case ) qk._amax.fill_(_snake_case ) qv._amax.fill_(_snake_case ) logger.info(F''' q={q:5.2f} k={k:5.2f} v={v:5.2f} -> {amax:5.2f}''' ) for name, mod in model.named_modules(): if name.endswith('.attention.self' ): logger.info(F'''FUSE_QKV: {name:{name_width}}''' ) fusea(mod.matmul_q_input_quantizer , mod.matmul_k_input_quantizer , mod.matmul_v_input_quantizer ) if args.quant_per_tensor: fusea(mod.query._weight_quantizer , mod.key._weight_quantizer , mod.value._weight_quantizer ) def _snake_case ( _snake_case : int , _snake_case : str ) -> Union[str, Any]: '''simple docstring''' for name, mod in model.named_modules(): if name.endswith('.output.dense' ) and not name.endswith('attention.output.dense' ): _A = mod._input_quantizer._amax.data.detach().item() mod._input_quantizer._amax.data.detach().clamp_(max=_snake_case ) _A = mod._input_quantizer._amax.data.detach().item() logger.info(F'''CLIP_GELU: {name:{name_width}} amax: {amax_init:5.2f} -> {amax:5.2f}''' ) def _snake_case ( _snake_case : List[str] ) -> List[str]: '''simple docstring''' for name, mod in model.named_modules(): if hasattr(_snake_case , '_weight_quantizer' ) and mod._weight_quantizer.axis is not None: _A = mod.weight.shape[0] _A = mod._weight_quantizer._amax.detach() _A = torch.ones(_snake_case , dtype=amax.dtype , device=amax.device ) * amax print(F'''expanding {name} {amax} -> {mod._weight_quantizer._amax}''' ) def _snake_case ( _snake_case : Dict ) -> Tuple: '''simple docstring''' for name, mod in model.named_modules(): if hasattr(_snake_case , '_weight_quantizer' ): if not hasattr(mod.weight_quantizer , '_amax' ): print('RECALIB: {name:{name_width}} WARNING: NO AMAX BUFFER' ) continue # determine which axes to reduce across # e.g. a 4D tensor quantized per axis 0 should reduce over (1,2,3) _A = set() if mod._weight_quantizer.axis is None else set(mod._weight_quantizer.axis ) _A = set(range(len(mod.weight.size() ) ) ) - axis_set _A = pytorch_quantization.utils.reduce_amax(mod.weight , axis=_snake_case , keepdims=_snake_case ).detach() logger.info(F'''RECALIB: {name:{name_width}} {mod._weight_quantizer._amax.flatten()} -> {amax.flatten()}''' ) _A = amax def _snake_case ( _snake_case : Tuple , _snake_case : List[str]=25 , _snake_case : str=1_80 , _snake_case : int=None ) -> List[Any]: '''simple docstring''' if ignore is None: _A = [] elif not isinstance(_snake_case , _snake_case ): _A = [ignore] _A = 0 for name, mod in model.named_modules(): if not hasattr(_snake_case , 'weight' ): continue _A = max(_snake_case , len(_snake_case ) ) for name, mod in model.named_modules(): _A = getattr(_snake_case , '_input_quantizer' , _snake_case ) _A = getattr(_snake_case , '_weight_quantizer' , _snake_case ) if not hasattr(_snake_case , 'weight' ): continue if type(_snake_case ) in ignore: continue if [True for s in ignore if type(_snake_case ) is str and s in name]: continue _A = F'''Act:{input_q.extra_repr()}''' _A = F'''Wgt:{weight_q.extra_repr()}''' _A = F'''{name:{name_width}} {act_str} {wgt_str}''' if len(_snake_case ) <= line_width: logger.info(_snake_case ) else: logger.info(F'''{name:{name_width}} {act_str}''' ) logger.info(F'''{" ":{name_width}} {wgt_str}''' ) def _snake_case ( _snake_case : Dict ) -> int: '''simple docstring''' _A = 0 for name, mod in model.named_modules(): if isinstance(_snake_case , pytorch_quantization.nn.TensorQuantizer ): print(F'''{name:80} {mod}''' ) count += 1 print(F'''{count} TensorQuantizers found in model''' ) def _snake_case ( _snake_case : str , _snake_case : Dict , _snake_case : List[Any] , _snake_case : Union[str, Any] , _snake_case : Any ) -> int: '''simple docstring''' _A = getattr(_snake_case , _snake_case , _snake_case ) if quantizer_mod is not None: assert hasattr(_snake_case , _snake_case ) setattr(_snake_case , _snake_case , _snake_case ) else: logger.warning(F'''{name} has no {quantizer}''' ) def _snake_case ( _snake_case : Dict , _snake_case : Optional[int] , _snake_case : str="both" , **_snake_case : List[Any] ) -> str: '''simple docstring''' _A = F'''Warning: changing {which} quantizers of {name:{qname_width}}''' for k, v in kwargs.items(): s += F''' {k}={v}''' if which in ["input", "both"]: set_quantizer(_snake_case , _snake_case , '_input_quantizer' , _snake_case , _snake_case ) if which in ["weight", "both"]: set_quantizer(_snake_case , _snake_case , '_weight_quantizer' , _snake_case , _snake_case ) logger.info(_snake_case ) def _snake_case ( _snake_case : Any , _snake_case : int , **_snake_case : Dict ) -> List[str]: '''simple docstring''' for name, mod in model.named_modules(): if hasattr(_snake_case , '_input_quantizer' ) or hasattr(_snake_case , '_weight_quantizer' ): for n in names: if re.search(_snake_case , _snake_case ): set_quantizers(_snake_case , _snake_case , **_snake_case ) elif name.endswith('_quantizer' ): for n in names: if re.search(_snake_case , _snake_case ): _A = F'''Warning: changing {name:{name_width}}''' for k, v in kwargs.items(): s += F''' {k}={v}''' setattr(_snake_case , _snake_case , _snake_case ) logger.info(_snake_case )
315
0
from collections.abc import Sequence def lowerCAmelCase_ ( snake_case_,snake_case_ ): return sum(c * (x**i) for i, c in enumerate(_snake_case ) ) def lowerCAmelCase_ ( snake_case_,snake_case_ ): _A : Any = 0.0 for coeff in reversed(_snake_case ): _A : Any = result * x + coeff return result if __name__ == "__main__": _snake_case = (0.0, 0.0, 5.0, 9.3, 7.0) _snake_case = 10.0 print(evaluate_poly(poly, x)) print(horner(poly, x))
26
"""simple docstring""" from scipy.stats import spearmanr import datasets a = ''' The Spearman rank-order correlation coefficient is a measure of the relationship between two datasets. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Positive correlations imply that as data in dataset x increases, so does data in dataset y. Negative correlations imply that as x increases, y decreases. Correlations of -1 or +1 imply an exact monotonic relationship. Unlike the Pearson correlation, the Spearman correlation does not assume that both datasets are normally distributed. The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Spearman correlation at least as extreme as the one computed from these datasets. The p-values are not entirely reliable but are probably reasonable for datasets larger than 500 or so. ''' a = ''' Args: predictions (`List[float]`): Predicted labels, as returned by a model. references (`List[float]`): Ground truth labels. return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns only the spearmanr score. Defaults to `False`. Returns: spearmanr (`float`): Spearman correlation coefficient. p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input. Examples: Example 1: >>> spearmanr_metric = datasets.load_metric("spearmanr") >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4]) >>> print(results) {\'spearmanr\': -0.7} Example 2: >>> spearmanr_metric = datasets.load_metric("spearmanr") >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], ... predictions=[10, 9, 2.5, 6, 4], ... return_pvalue=True) >>> print(results[\'spearmanr\']) -0.7 >>> print(round(results[\'spearmanr_pvalue\'], 2)) 0.19 ''' a = r'''\ @book{kokoska2000crc, title={CRC standard probability and statistics tables and formulae}, author={Kokoska, Stephen and Zwillinger, Daniel}, year={2000}, publisher={Crc Press} } @article{2020SciPy-NMeth, author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and Haberland, Matt and Reddy, Tyler and Cournapeau, David and Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and Bright, Jonathan and {van der Walt}, St{\'e}fan J. and Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and Kern, Robert and Larson, Eric and Carey, C J and Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and Harris, Charles R. and Archibald, Anne M. and Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and {van Mulbregt}, Paul and {SciPy 1.0 Contributors}}, title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific Computing in Python}}, journal = {Nature Methods}, year = {2020}, volume = {17}, pages = {261--272}, adsurl = {https://rdcu.be/b08Wh}, doi = {10.1038/s41592-019-0686-2}, } ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowercase_ ( datasets.Metric ): '''simple docstring''' def lowerCAmelCase_ ( self : Optional[int] ): return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { 'predictions': datasets.Value('float' ), 'references': datasets.Value('float' ), } ) , reference_urls=['https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html'] , ) def lowerCAmelCase_ ( self : Dict , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : int , _UpperCAmelCase : Optional[int]=False ): _A = spearmanr(_UpperCAmelCase , _UpperCAmelCase ) if return_pvalue: return {"spearmanr": results[0], "spearmanr_pvalue": results[1]} else: return {"spearmanr": results[0]}
315
0
import os import sys import unittest _lowercase : str =os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, "utils")) import get_test_info # noqa: E402 from get_test_info import ( # noqa: E402 get_model_to_test_mapping, get_model_to_tester_mapping, get_test_to_tester_mapping, ) _lowercase : Tuple =os.path.join("tests", "models", "bert", "test_modeling_bert.py") _lowercase : Tuple =os.path.join("tests", "models", "blip", "test_modeling_blip.py") class snake_case__ (unittest.TestCase ): """simple docstring""" def SCREAMING_SNAKE_CASE__( self ) -> str: """simple docstring""" a__ : str = get_test_to_tester_mapping(_UpperCAmelCase ) a__ : Dict = get_test_to_tester_mapping(_UpperCAmelCase ) a__ : List[str] = {"""BertModelTest""": """BertModelTester"""} a__ : Union[str, Any] = { """BlipModelTest""": """BlipModelTester""", """BlipTextImageModelTest""": """BlipTextImageModelsModelTester""", """BlipTextModelTest""": """BlipTextModelTester""", """BlipTextRetrievalModelTest""": """BlipTextRetrievalModelTester""", """BlipVQAModelTest""": """BlipVQAModelTester""", """BlipVisionModelTest""": """BlipVisionModelTester""", } self.assertEqual(get_test_info.to_json(_UpperCAmelCase ) , _UpperCAmelCase ) self.assertEqual(get_test_info.to_json(_UpperCAmelCase ) , _UpperCAmelCase ) def SCREAMING_SNAKE_CASE__( self ) -> Dict: """simple docstring""" a__ : Optional[int] = get_model_to_test_mapping(_UpperCAmelCase ) a__ : List[Any] = get_model_to_test_mapping(_UpperCAmelCase ) a__ : Optional[Any] = { """BertForMaskedLM""": ["""BertModelTest"""], """BertForMultipleChoice""": ["""BertModelTest"""], """BertForNextSentencePrediction""": ["""BertModelTest"""], """BertForPreTraining""": ["""BertModelTest"""], """BertForQuestionAnswering""": ["""BertModelTest"""], """BertForSequenceClassification""": ["""BertModelTest"""], """BertForTokenClassification""": ["""BertModelTest"""], """BertLMHeadModel""": ["""BertModelTest"""], """BertModel""": ["""BertModelTest"""], } a__ : List[str] = { """BlipForConditionalGeneration""": ["""BlipTextImageModelTest"""], """BlipForImageTextRetrieval""": ["""BlipTextRetrievalModelTest"""], """BlipForQuestionAnswering""": ["""BlipVQAModelTest"""], """BlipModel""": ["""BlipModelTest"""], """BlipTextModel""": ["""BlipTextModelTest"""], """BlipVisionModel""": ["""BlipVisionModelTest"""], } self.assertEqual(get_test_info.to_json(_UpperCAmelCase ) , _UpperCAmelCase ) self.assertEqual(get_test_info.to_json(_UpperCAmelCase ) , _UpperCAmelCase ) def SCREAMING_SNAKE_CASE__( self ) -> Tuple: """simple docstring""" a__ : List[str] = get_model_to_tester_mapping(_UpperCAmelCase ) a__ : List[Any] = get_model_to_tester_mapping(_UpperCAmelCase ) a__ : Any = { """BertForMaskedLM""": ["""BertModelTester"""], """BertForMultipleChoice""": ["""BertModelTester"""], """BertForNextSentencePrediction""": ["""BertModelTester"""], """BertForPreTraining""": ["""BertModelTester"""], """BertForQuestionAnswering""": ["""BertModelTester"""], """BertForSequenceClassification""": ["""BertModelTester"""], """BertForTokenClassification""": ["""BertModelTester"""], """BertLMHeadModel""": ["""BertModelTester"""], """BertModel""": ["""BertModelTester"""], } a__ : List[Any] = { """BlipForConditionalGeneration""": ["""BlipTextImageModelsModelTester"""], """BlipForImageTextRetrieval""": ["""BlipTextRetrievalModelTester"""], """BlipForQuestionAnswering""": ["""BlipVQAModelTester"""], """BlipModel""": ["""BlipModelTester"""], """BlipTextModel""": ["""BlipTextModelTester"""], """BlipVisionModel""": ["""BlipVisionModelTester"""], } self.assertEqual(get_test_info.to_json(_UpperCAmelCase ) , _UpperCAmelCase ) self.assertEqual(get_test_info.to_json(_UpperCAmelCase ) , _UpperCAmelCase )
170
"""simple docstring""" from collections.abc import Callable def _snake_case ( _snake_case : Callable[[float], float] , _snake_case : float , _snake_case : float ) -> float: '''simple docstring''' _A = a _A = b if function(_snake_case ) == 0: # one of the a or b is a root for the function return a elif function(_snake_case ) == 0: return b elif ( function(_snake_case ) * function(_snake_case ) > 0 ): # if none of these are root and they are both positive or negative, # then this algorithm can't find the root raise ValueError('could not find root in given interval.' ) else: _A = start + (end - start) / 2.0 while abs(start - mid ) > 10**-7: # until precisely equals to 10^-7 if function(_snake_case ) == 0: return mid elif function(_snake_case ) * function(_snake_case ) < 0: _A = mid else: _A = mid _A = start + (end - start) / 2.0 return mid def _snake_case ( _snake_case : float ) -> float: '''simple docstring''' return x**3 - 2 * x - 5 if __name__ == "__main__": print(bisection(f, 1, 1_000)) import doctest doctest.testmod()
315
0
"""simple docstring""" from math import ceil, sqrt def A ( snake_case :int = 1_0_0_0_0_0_0 ) -> int: __UpperCamelCase = 0 for outer_width in range(3 , (limit // 4) + 2 ): if outer_width**2 > limit: __UpperCamelCase = max(ceil(sqrt(outer_width**2 - limit ) ) , 1 ) else: __UpperCamelCase = 1 if (outer_width - hole_width_lower_bound) % 2: hole_width_lower_bound += 1 answer += (outer_width - hole_width_lower_bound - 2) // 2 + 1 return answer if __name__ == "__main__": print(f'''{solution() = }''')
316
"""simple docstring""" def A ( snake_case :int = 1_0 , snake_case :int = 2_2 ) -> int: __UpperCamelCase = range(1 , snake_case ) __UpperCamelCase = range(1 , snake_case ) return sum( 1 for power in powers for base in bases if len(str(base**power ) ) == power ) if __name__ == "__main__": print(f'''{solution(1_0, 2_2) = }''')
316
1
"""simple docstring""" import torch from transformers import AutoModel class __lowerCAmelCase ( torch.nn.Module ): def __init__( self , __UpperCAmelCase="sayef/fsner-bert-base-uncased" ): '''simple docstring''' super(__UpperCAmelCase , self ).__init__() __UpperCamelCase = AutoModel.from_pretrained(__UpperCAmelCase , return_dict=__UpperCAmelCase ) __UpperCamelCase = torch.nn.CosineSimilarity(3 , 1E-08 ) __UpperCamelCase = torch.nn.Softmax(dim=1 ) def UpperCAmelCase ( self , **__UpperCAmelCase ): '''simple docstring''' return self.bert(**__UpperCAmelCase ).last_hidden_state def UpperCAmelCase ( self , __UpperCAmelCase ): '''simple docstring''' return token_embeddings.sum(2 , keepdim=__UpperCAmelCase ) def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=1 ): '''simple docstring''' return self.softmax(T * self.cos(__UpperCAmelCase , __UpperCAmelCase ) ) def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' __UpperCamelCase = W_supports['sizes'].tolist() __UpperCamelCase = W_supports['start_token_id'].item() __UpperCamelCase = W_supports['end_token_id'].item() del W_supports["sizes"] del W_supports["start_token_id"] del W_supports["end_token_id"] __UpperCamelCase = self.BERT(**__UpperCAmelCase ) __UpperCamelCase = self.BERT(**__UpperCAmelCase ) __UpperCamelCase = None __UpperCamelCase = None __UpperCamelCase = W_supports['input_ids'] == start_token_id __UpperCamelCase = W_supports['input_ids'] == end_token_id for i, size in enumerate(__UpperCAmelCase ): if i == 0: __UpperCamelCase = 0 else: __UpperCamelCase = support_sizes[i - 1] __UpperCamelCase = S[s : s + size][start_token_masks[s : s + size]] __UpperCamelCase = S[s : s + size][end_token_masks[s : s + size]] __UpperCamelCase = torch.matmul(q[i] , s_start.T ).sum(1 ).softmax(0 ) __UpperCamelCase = torch.matmul(q[i] , s_end.T ).sum(1 ).softmax(0 ) if p_starts is not None: __UpperCamelCase = torch.vstack((p_starts, p_start) ) __UpperCamelCase = torch.vstack((p_ends, p_end) ) else: __UpperCamelCase = p_start __UpperCamelCase = p_end return p_starts, p_ends
316
"""simple docstring""" # this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.: # python ./utils/get_modified_files.py utils src tests examples # # it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered # since the output of this script is fed into Makefile commands it doesn't print a newline after the results import re import subprocess import sys UpperCamelCase : Union[str, Any] = subprocess.check_output("git merge-base main HEAD".split()).decode("utf-8") UpperCamelCase : Any = subprocess.check_output(f'''git diff --name-only {fork_point_sha}'''.split()).decode("utf-8").split() UpperCamelCase : Tuple = "|".join(sys.argv[1:]) UpperCamelCase : Optional[int] = re.compile(Rf'''^({joined_dirs}).*?\.py$''') UpperCamelCase : Optional[Any] = [x for x in modified_files if regex.match(x)] print(" ".join(relevant_modified_files), end="")
316
1
"""simple docstring""" import functools from typing import Any def A ( snake_case :str , snake_case :list[str] ) -> bool: # Validation if not isinstance(snake_case , snake_case ) or len(snake_case ) == 0: raise ValueError('the string should be not empty string' ) if not isinstance(snake_case , snake_case ) or not all( isinstance(snake_case , snake_case ) and len(snake_case ) > 0 for item in words ): raise ValueError('the words should be a list of non-empty strings' ) # Build trie __UpperCamelCase = {} __UpperCamelCase = 'WORD_KEEPER' for word in words: __UpperCamelCase = trie for c in word: if c not in trie_node: __UpperCamelCase = {} __UpperCamelCase = trie_node[c] __UpperCamelCase = True __UpperCamelCase = len(snake_case ) # Dynamic programming method @functools.cache def is_breakable(snake_case :int ) -> bool: if index == len_string: return True __UpperCamelCase = trie for i in range(snake_case , snake_case ): __UpperCamelCase = trie_node.get(string[i] , snake_case ) if trie_node is None: return False if trie_node.get(snake_case , snake_case ) and is_breakable(i + 1 ): return True return False return is_breakable(0 ) if __name__ == "__main__": import doctest doctest.testmod()
316
"""simple docstring""" from typing import Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images from ...utils import TensorType, logging UpperCamelCase : Any = logging.get_logger(__name__) class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): lowercase = ["pixel_values"] def __init__( self , __UpperCAmelCase = True , __UpperCAmelCase = 1 / 255 , __UpperCAmelCase = True , __UpperCAmelCase = 8 , **__UpperCAmelCase , ): '''simple docstring''' super().__init__(**__UpperCAmelCase ) __UpperCamelCase = do_rescale __UpperCamelCase = rescale_factor __UpperCamelCase = do_pad __UpperCamelCase = pad_size def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , **__UpperCAmelCase ): '''simple docstring''' return rescale(__UpperCAmelCase , scale=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase ) def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None ): '''simple docstring''' __UpperCamelCase , __UpperCamelCase = get_image_size(__UpperCAmelCase ) __UpperCamelCase = (old_height // size + 1) * size - old_height __UpperCamelCase = (old_width // size + 1) * size - old_width return pad(__UpperCAmelCase , ((0, pad_height), (0, pad_width)) , mode='symmetric' , data_format=__UpperCAmelCase ) def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = ChannelDimension.FIRST , **__UpperCAmelCase , ): '''simple docstring''' __UpperCamelCase = do_rescale if do_rescale is not None else self.do_rescale __UpperCamelCase = rescale_factor if rescale_factor is not None else self.rescale_factor __UpperCamelCase = do_pad if do_pad is not None else self.do_pad __UpperCamelCase = pad_size if pad_size is not None else self.pad_size __UpperCamelCase = make_list_of_images(__UpperCAmelCase ) if not valid_images(__UpperCAmelCase ): raise ValueError( 'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ' 'torch.Tensor, tf.Tensor or jax.ndarray.' ) if do_rescale and rescale_factor is None: raise ValueError('Rescale factor must be specified if do_rescale is True.' ) # All transformations expect numpy arrays. __UpperCamelCase = [to_numpy_array(__UpperCAmelCase ) for image in images] if do_rescale: __UpperCamelCase = [self.rescale(image=__UpperCAmelCase , scale=__UpperCAmelCase ) for image in images] if do_pad: __UpperCamelCase = [self.pad(__UpperCAmelCase , size=__UpperCAmelCase ) for image in images] __UpperCamelCase = [to_channel_dimension_format(__UpperCAmelCase , __UpperCAmelCase ) for image in images] __UpperCamelCase = {'pixel_values': images} return BatchFeature(data=__UpperCAmelCase , tensor_type=__UpperCAmelCase )
316
1
"""simple docstring""" import enum import warnings from ..tokenization_utils import TruncationStrategy from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging from .base import PIPELINE_INIT_ARGS, Pipeline if is_tf_available(): import tensorflow as tf from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING UpperCamelCase : Optional[int] = logging.get_logger(__name__) class __lowerCAmelCase ( enum.Enum ): lowercase = 0 lowercase = 1 @add_end_docstrings(__SCREAMING_SNAKE_CASE ) class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): lowercase = "generated" def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ): '''simple docstring''' super().__init__(*__UpperCAmelCase , **__UpperCAmelCase ) self.check_model_type( TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING if self.framework == 'tf' else MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING ) def UpperCAmelCase ( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase , ): '''simple docstring''' __UpperCamelCase = {} if truncation is not None: __UpperCamelCase = truncation __UpperCamelCase = generate_kwargs __UpperCamelCase = {} if return_tensors is not None and return_type is None: __UpperCamelCase = ReturnType.TENSORS if return_tensors else ReturnType.TEXT if return_type is not None: __UpperCamelCase = return_type if clean_up_tokenization_spaces is not None: __UpperCamelCase = clean_up_tokenization_spaces if stop_sequence is not None: __UpperCamelCase = self.tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ) if len(__UpperCAmelCase ) > 1: warnings.warn( 'Stopping on a multiple token sequence is not yet supported on transformers. The first token of' ' the stop sequence will be used as the stop sequence string in the interim.' ) __UpperCamelCase = stop_sequence_ids[0] return preprocess_params, forward_params, postprocess_params def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' return True def UpperCAmelCase ( self , *__UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' __UpperCamelCase = self.model.config.prefix if self.model.config.prefix is not None else '' if isinstance(args[0] , __UpperCAmelCase ): if self.tokenizer.pad_token_id is None: raise ValueError('Please make sure that the tokenizer has a pad_token_id when using a batch input' ) __UpperCamelCase = ([prefix + arg for arg in args[0]],) __UpperCamelCase = True elif isinstance(args[0] , __UpperCAmelCase ): __UpperCamelCase = (prefix + args[0],) __UpperCamelCase = False else: raise ValueError( F' `args[0]`: {args[0]} have the wrong format. The should be either of type `str` or type `list`' ) __UpperCamelCase = self.tokenizer(*__UpperCAmelCase , padding=__UpperCAmelCase , truncation=__UpperCAmelCase , return_tensors=self.framework ) # This is produced by tokenizers but is an invalid generate kwargs if "token_type_ids" in inputs: del inputs["token_type_ids"] return inputs def __call__( self , *__UpperCAmelCase , **__UpperCAmelCase ): '''simple docstring''' __UpperCamelCase = super().__call__(*__UpperCAmelCase , **__UpperCAmelCase ) if ( isinstance(args[0] , __UpperCAmelCase ) and all(isinstance(__UpperCAmelCase , __UpperCAmelCase ) for el in args[0] ) and all(len(__UpperCAmelCase ) == 1 for res in result ) ): return [res[0] for res in result] return result def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase=TruncationStrategy.DO_NOT_TRUNCATE , **__UpperCAmelCase ): '''simple docstring''' __UpperCamelCase = self._parse_and_tokenize(__UpperCAmelCase , truncation=__UpperCAmelCase , **__UpperCAmelCase ) return inputs def UpperCAmelCase ( self , __UpperCAmelCase , **__UpperCAmelCase ): '''simple docstring''' if self.framework == "pt": __UpperCamelCase , __UpperCamelCase = model_inputs['input_ids'].shape elif self.framework == "tf": __UpperCamelCase , __UpperCamelCase = tf.shape(model_inputs['input_ids'] ).numpy() __UpperCamelCase = generate_kwargs.get('min_length' , self.model.config.min_length ) __UpperCamelCase = generate_kwargs.get('max_length' , self.model.config.max_length ) self.check_inputs(__UpperCAmelCase , generate_kwargs['min_length'] , generate_kwargs['max_length'] ) __UpperCamelCase = self.model.generate(**__UpperCAmelCase , **__UpperCAmelCase ) __UpperCamelCase = output_ids.shape[0] if self.framework == "pt": __UpperCamelCase = output_ids.reshape(__UpperCAmelCase , out_b // in_b , *output_ids.shape[1:] ) elif self.framework == "tf": __UpperCamelCase = tf.reshape(__UpperCAmelCase , (in_b, out_b // in_b, *output_ids.shape[1:]) ) return {"output_ids": output_ids} def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase=ReturnType.TEXT , __UpperCAmelCase=False ): '''simple docstring''' __UpperCamelCase = [] for output_ids in model_outputs["output_ids"][0]: if return_type == ReturnType.TENSORS: __UpperCamelCase = {F'{self.return_name}_token_ids': output_ids} elif return_type == ReturnType.TEXT: __UpperCamelCase = { F'{self.return_name}_text': self.tokenizer.decode( __UpperCAmelCase , skip_special_tokens=__UpperCAmelCase , clean_up_tokenization_spaces=__UpperCAmelCase , ) } records.append(__UpperCAmelCase ) return records @add_end_docstrings(__SCREAMING_SNAKE_CASE ) class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): lowercase = "summary" def __call__( self , *__UpperCAmelCase , **__UpperCAmelCase ): '''simple docstring''' return super().__call__(*__UpperCAmelCase , **__UpperCAmelCase ) def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' if max_length < min_length: logger.warning(F'Your min_length={min_length} must be inferior than your max_length={max_length}.' ) if input_length < max_length: logger.warning( F'Your max_length is set to {max_length}, but your input_length is only {input_length}. Since this is ' 'a summarization task, where outputs shorter than the input are typically wanted, you might ' F'consider decreasing max_length manually, e.g. summarizer(\'...\', max_length={input_length//2})' ) @add_end_docstrings(__SCREAMING_SNAKE_CASE ) class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): lowercase = "translation" def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' if input_length > 0.9 * max_length: logger.warning( F'Your input_length: {input_length} is bigger than 0.9 * max_length: {max_length}. You might consider ' 'increasing your max_length manually, e.g. translator(\'...\', max_length=400)' ) return True def UpperCAmelCase ( self , *__UpperCAmelCase , __UpperCAmelCase=TruncationStrategy.DO_NOT_TRUNCATE , __UpperCAmelCase=None , __UpperCAmelCase=None ): '''simple docstring''' if getattr(self.tokenizer , '_build_translation_inputs' , __UpperCAmelCase ): return self.tokenizer._build_translation_inputs( *__UpperCAmelCase , return_tensors=self.framework , truncation=__UpperCAmelCase , src_lang=__UpperCAmelCase , tgt_lang=__UpperCAmelCase ) else: return super()._parse_and_tokenize(*__UpperCAmelCase , truncation=__UpperCAmelCase ) def UpperCAmelCase ( self , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase ): '''simple docstring''' __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = super()._sanitize_parameters(**__UpperCAmelCase ) if src_lang is not None: __UpperCamelCase = src_lang if tgt_lang is not None: __UpperCamelCase = tgt_lang if src_lang is None and tgt_lang is None: # Backward compatibility, direct arguments use is preferred. __UpperCamelCase = kwargs.get('task' , self.task ) __UpperCamelCase = task.split('_' ) if task and len(__UpperCAmelCase ) == 4: # translation, XX, to YY __UpperCamelCase = items[1] __UpperCamelCase = items[3] return preprocess_params, forward_params, postprocess_params def __call__( self , *__UpperCAmelCase , **__UpperCAmelCase ): '''simple docstring''' return super().__call__(*__UpperCAmelCase , **__UpperCAmelCase )
316
"""simple docstring""" from __future__ import annotations import unittest from transformers import is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import numpy as np import tensorflow as tf from transformers import ( TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST, FlaubertConfig, TFFlaubertForMultipleChoice, TFFlaubertForQuestionAnsweringSimple, TFFlaubertForSequenceClassification, TFFlaubertForTokenClassification, TFFlaubertModel, TFFlaubertWithLMHeadModel, ) class __lowerCAmelCase : def __init__( self , __UpperCAmelCase , ): '''simple docstring''' __UpperCamelCase = parent __UpperCamelCase = 13 __UpperCamelCase = 7 __UpperCamelCase = True __UpperCamelCase = True __UpperCamelCase = True __UpperCamelCase = True __UpperCamelCase = True __UpperCamelCase = False __UpperCamelCase = False __UpperCamelCase = False __UpperCamelCase = 2 __UpperCamelCase = 99 __UpperCamelCase = 0 __UpperCamelCase = 32 __UpperCamelCase = 2 __UpperCamelCase = 4 __UpperCamelCase = 0.1 __UpperCamelCase = 0.1 __UpperCamelCase = 512 __UpperCamelCase = 16 __UpperCamelCase = 2 __UpperCamelCase = 0.0_2 __UpperCamelCase = 3 __UpperCamelCase = 4 __UpperCamelCase = 'last' __UpperCamelCase = True __UpperCamelCase = None __UpperCamelCase = 0 def UpperCAmelCase ( self ): '''simple docstring''' __UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] , dtype=tf.floataa ) __UpperCamelCase = None if self.use_input_lengths: __UpperCamelCase = ( ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2 ) # small variation of seq_length __UpperCamelCase = None if self.use_token_type_ids: __UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.n_langs ) __UpperCamelCase = None __UpperCamelCase = None __UpperCamelCase = None if self.use_labels: __UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __UpperCamelCase = ids_tensor([self.batch_size] , 2 , dtype=tf.floataa ) __UpperCamelCase = ids_tensor([self.batch_size] , self.num_choices ) __UpperCamelCase = FlaubertConfig( vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , bos_token_id=self.bos_token_id , ) return ( config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ) def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ): '''simple docstring''' __UpperCamelCase = TFFlaubertModel(config=__UpperCAmelCase ) __UpperCamelCase = {'input_ids': input_ids, 'lengths': input_lengths, 'langs': token_type_ids} __UpperCamelCase = model(__UpperCAmelCase ) __UpperCamelCase = [input_ids, input_mask] __UpperCamelCase = model(__UpperCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ): '''simple docstring''' __UpperCamelCase = TFFlaubertWithLMHeadModel(__UpperCAmelCase ) __UpperCamelCase = {'input_ids': input_ids, 'lengths': input_lengths, 'langs': token_type_ids} __UpperCamelCase = model(__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ): '''simple docstring''' __UpperCamelCase = TFFlaubertForQuestionAnsweringSimple(__UpperCAmelCase ) __UpperCamelCase = {'input_ids': input_ids, 'lengths': input_lengths} __UpperCamelCase = model(__UpperCAmelCase ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ): '''simple docstring''' __UpperCamelCase = TFFlaubertForSequenceClassification(__UpperCAmelCase ) __UpperCamelCase = {'input_ids': input_ids, 'lengths': input_lengths} __UpperCamelCase = model(__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ): '''simple docstring''' __UpperCamelCase = self.num_labels __UpperCamelCase = TFFlaubertForTokenClassification(config=__UpperCAmelCase ) __UpperCamelCase = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids} __UpperCamelCase = model(__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ): '''simple docstring''' __UpperCamelCase = self.num_choices __UpperCamelCase = TFFlaubertForMultipleChoice(config=__UpperCAmelCase ) __UpperCamelCase = tf.tile(tf.expand_dims(__UpperCAmelCase , 1 ) , (1, self.num_choices, 1) ) __UpperCamelCase = tf.tile(tf.expand_dims(__UpperCAmelCase , 1 ) , (1, self.num_choices, 1) ) __UpperCamelCase = tf.tile(tf.expand_dims(__UpperCAmelCase , 1 ) , (1, self.num_choices, 1) ) __UpperCamelCase = { 'input_ids': multiple_choice_inputs_ids, 'attention_mask': multiple_choice_input_mask, 'token_type_ids': multiple_choice_token_type_ids, } __UpperCamelCase = model(__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def UpperCAmelCase ( self ): '''simple docstring''' __UpperCamelCase = self.prepare_config_and_inputs() ( ( __UpperCamelCase ) , ( __UpperCamelCase ) , ( __UpperCamelCase ) , ( __UpperCamelCase ) , ( __UpperCamelCase ) , ( __UpperCamelCase ) , ( __UpperCamelCase ) , ( __UpperCamelCase ) , ( __UpperCamelCase ) , ) = config_and_inputs __UpperCamelCase = { 'input_ids': input_ids, 'token_type_ids': token_type_ids, 'langs': token_type_ids, 'lengths': input_lengths, } return config, inputs_dict @require_tf class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ): lowercase = ( ( TFFlaubertModel, TFFlaubertWithLMHeadModel, TFFlaubertForSequenceClassification, TFFlaubertForQuestionAnsweringSimple, TFFlaubertForTokenClassification, TFFlaubertForMultipleChoice, ) if is_tf_available() else () ) lowercase = ( (TFFlaubertWithLMHeadModel,) if is_tf_available() else () ) # TODO (PVP): Check other models whether language generation is also applicable lowercase = ( { "feature-extraction": TFFlaubertModel, "fill-mask": TFFlaubertWithLMHeadModel, "question-answering": TFFlaubertForQuestionAnsweringSimple, "text-classification": TFFlaubertForSequenceClassification, "token-classification": TFFlaubertForTokenClassification, "zero-shot": TFFlaubertForSequenceClassification, } if is_tf_available() else {} ) lowercase = False lowercase = False def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' if ( pipeline_test_casse_name == "QAPipelineTests" and tokenizer_name is not None and not tokenizer_name.endswith('Fast' ) ): # `QAPipelineTests` fails for a few models when the slower tokenizer are used. # (The slower tokenizers were never used for pipeline tests before the pipeline testing rework) # TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer return True return False def UpperCAmelCase ( self ): '''simple docstring''' __UpperCamelCase = TFFlaubertModelTester(self ) __UpperCamelCase = ConfigTester(self , config_class=__UpperCAmelCase , emb_dim=37 ) def UpperCAmelCase ( self ): '''simple docstring''' self.config_tester.run_common_tests() def UpperCAmelCase ( self ): '''simple docstring''' __UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_model(*__UpperCAmelCase ) def UpperCAmelCase ( self ): '''simple docstring''' __UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_lm_head(*__UpperCAmelCase ) def UpperCAmelCase ( self ): '''simple docstring''' __UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_qa(*__UpperCAmelCase ) def UpperCAmelCase ( self ): '''simple docstring''' __UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_sequence_classif(*__UpperCAmelCase ) def UpperCAmelCase ( self ): '''simple docstring''' __UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_for_token_classification(*__UpperCAmelCase ) def UpperCAmelCase ( self ): '''simple docstring''' __UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_for_multiple_choice(*__UpperCAmelCase ) @slow def UpperCAmelCase ( self ): '''simple docstring''' for model_name in TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __UpperCamelCase = TFFlaubertModel.from_pretrained(__UpperCAmelCase ) self.assertIsNotNone(__UpperCAmelCase ) @require_tf @require_sentencepiece @require_tokenizers class __lowerCAmelCase ( unittest.TestCase ): @slow def UpperCAmelCase ( self ): '''simple docstring''' __UpperCamelCase = TFFlaubertModel.from_pretrained('jplu/tf-flaubert-small-cased' ) __UpperCamelCase = tf.convert_to_tensor( [[0, 158, 735, 2592, 1424, 6727, 82, 1]] , dtype=tf.intaa , ) # "J'aime flaubert !" __UpperCamelCase = model(__UpperCAmelCase )[0] __UpperCamelCase = tf.TensorShape((1, 8, 512) ) self.assertEqual(output.shape , __UpperCAmelCase ) # compare the actual values for a slice. __UpperCamelCase = tf.convert_to_tensor( [ [ [-1.8_7_6_8_7_7_3, -1.5_6_6_5_5_5, 0.2_7_0_7_2_4_1_8], [-1.6_9_2_0_0_3_8, -0.5_8_7_3_5_0_5, 1.9_3_2_9_5_9_9], [-2.9_5_6_3_9_8_5, -1.6_9_9_3_8_3_5, 1.7_9_7_2_0_5_2], ] ] , dtype=tf.floataa , ) self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
316
1
"""simple docstring""" import tempfile import torch from diffusers import ( DEISMultistepScheduler, DPMSolverMultistepScheduler, DPMSolverSinglestepScheduler, UniPCMultistepScheduler, ) from .test_schedulers import SchedulerCommonTest class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): lowercase = (UniPCMultistepScheduler,) lowercase = (("num_inference_steps", 25),) def UpperCAmelCase ( self , **__UpperCAmelCase ): '''simple docstring''' __UpperCamelCase = { 'num_train_timesteps': 1000, 'beta_start': 0.0_0_0_1, 'beta_end': 0.0_2, 'beta_schedule': 'linear', 'solver_order': 2, 'solver_type': 'bh2', } config.update(**__UpperCAmelCase ) return config def UpperCAmelCase ( self , __UpperCAmelCase=0 , **__UpperCAmelCase ): '''simple docstring''' __UpperCamelCase = dict(self.forward_default_kwargs ) __UpperCamelCase = kwargs.pop('num_inference_steps' , __UpperCAmelCase ) __UpperCamelCase = self.dummy_sample __UpperCamelCase = 0.1 * sample __UpperCamelCase = [residual + 0.2, residual + 0.1_5, residual + 0.1_0] for scheduler_class in self.scheduler_classes: __UpperCamelCase = self.get_scheduler_config(**__UpperCAmelCase ) __UpperCamelCase = scheduler_class(**__UpperCAmelCase ) scheduler.set_timesteps(__UpperCAmelCase ) # copy over dummy past residuals __UpperCamelCase = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(__UpperCAmelCase ) __UpperCamelCase = scheduler_class.from_pretrained(__UpperCAmelCase ) new_scheduler.set_timesteps(__UpperCAmelCase ) # copy over dummy past residuals __UpperCamelCase = dummy_past_residuals[: new_scheduler.config.solver_order] __UpperCamelCase , __UpperCamelCase = sample, sample for t in range(__UpperCAmelCase , time_step + scheduler.config.solver_order + 1 ): __UpperCamelCase = scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase ).prev_sample __UpperCamelCase = new_scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def UpperCAmelCase ( self , __UpperCAmelCase=0 , **__UpperCAmelCase ): '''simple docstring''' __UpperCamelCase = dict(self.forward_default_kwargs ) __UpperCamelCase = kwargs.pop('num_inference_steps' , __UpperCAmelCase ) __UpperCamelCase = self.dummy_sample __UpperCamelCase = 0.1 * sample __UpperCamelCase = [residual + 0.2, residual + 0.1_5, residual + 0.1_0] for scheduler_class in self.scheduler_classes: __UpperCamelCase = self.get_scheduler_config() __UpperCamelCase = scheduler_class(**__UpperCAmelCase ) scheduler.set_timesteps(__UpperCAmelCase ) # copy over dummy past residuals (must be after setting timesteps) __UpperCamelCase = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(__UpperCAmelCase ) __UpperCamelCase = scheduler_class.from_pretrained(__UpperCAmelCase ) # copy over dummy past residuals new_scheduler.set_timesteps(__UpperCAmelCase ) # copy over dummy past residual (must be after setting timesteps) __UpperCamelCase = dummy_past_residuals[: new_scheduler.config.solver_order] __UpperCamelCase = scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase ).prev_sample __UpperCamelCase = new_scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def UpperCAmelCase ( self , __UpperCAmelCase=None , **__UpperCAmelCase ): '''simple docstring''' if scheduler is None: __UpperCamelCase = self.scheduler_classes[0] __UpperCamelCase = self.get_scheduler_config(**__UpperCAmelCase ) __UpperCamelCase = scheduler_class(**__UpperCAmelCase ) __UpperCamelCase = self.scheduler_classes[0] __UpperCamelCase = self.get_scheduler_config(**__UpperCAmelCase ) __UpperCamelCase = scheduler_class(**__UpperCAmelCase ) __UpperCamelCase = 10 __UpperCamelCase = self.dummy_model() __UpperCamelCase = self.dummy_sample_deter scheduler.set_timesteps(__UpperCAmelCase ) for i, t in enumerate(scheduler.timesteps ): __UpperCamelCase = model(__UpperCAmelCase , __UpperCAmelCase ) __UpperCamelCase = scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ).prev_sample return sample def UpperCAmelCase ( self ): '''simple docstring''' __UpperCamelCase = dict(self.forward_default_kwargs ) __UpperCamelCase = kwargs.pop('num_inference_steps' , __UpperCAmelCase ) for scheduler_class in self.scheduler_classes: __UpperCamelCase = self.get_scheduler_config() __UpperCamelCase = scheduler_class(**__UpperCAmelCase ) __UpperCamelCase = self.dummy_sample __UpperCamelCase = 0.1 * sample if num_inference_steps is not None and hasattr(__UpperCAmelCase , 'set_timesteps' ): scheduler.set_timesteps(__UpperCAmelCase ) elif num_inference_steps is not None and not hasattr(__UpperCAmelCase , 'set_timesteps' ): __UpperCamelCase = num_inference_steps # copy over dummy past residuals (must be done after set_timesteps) __UpperCamelCase = [residual + 0.2, residual + 0.1_5, residual + 0.1_0] __UpperCamelCase = dummy_past_residuals[: scheduler.config.solver_order] __UpperCamelCase = scheduler.timesteps[5] __UpperCamelCase = scheduler.timesteps[6] __UpperCamelCase = scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase ).prev_sample __UpperCamelCase = scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) def UpperCAmelCase ( self ): '''simple docstring''' __UpperCamelCase = UniPCMultistepScheduler(**self.get_scheduler_config() ) __UpperCamelCase = self.full_loop(scheduler=__UpperCAmelCase ) __UpperCamelCase = torch.mean(torch.abs(__UpperCAmelCase ) ) assert abs(result_mean.item() - 0.2_4_6_4 ) < 1E-3 __UpperCamelCase = DPMSolverSinglestepScheduler.from_config(scheduler.config ) __UpperCamelCase = DEISMultistepScheduler.from_config(scheduler.config ) __UpperCamelCase = DPMSolverMultistepScheduler.from_config(scheduler.config ) __UpperCamelCase = UniPCMultistepScheduler.from_config(scheduler.config ) __UpperCamelCase = self.full_loop(scheduler=__UpperCAmelCase ) __UpperCamelCase = torch.mean(torch.abs(__UpperCAmelCase ) ) assert abs(result_mean.item() - 0.2_4_6_4 ) < 1E-3 def UpperCAmelCase ( self ): '''simple docstring''' for timesteps in [25, 50, 100, 999, 1000]: self.check_over_configs(num_train_timesteps=__UpperCAmelCase ) def UpperCAmelCase ( self ): '''simple docstring''' self.check_over_configs(thresholding=__UpperCAmelCase ) for order in [1, 2, 3]: for solver_type in ["bh1", "bh2"]: for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( thresholding=__UpperCAmelCase , prediction_type=__UpperCAmelCase , sample_max_value=__UpperCAmelCase , solver_order=__UpperCAmelCase , solver_type=__UpperCAmelCase , ) def UpperCAmelCase ( self ): '''simple docstring''' for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=__UpperCAmelCase ) def UpperCAmelCase ( self ): '''simple docstring''' for solver_type in ["bh1", "bh2"]: for order in [1, 2, 3]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( solver_order=__UpperCAmelCase , solver_type=__UpperCAmelCase , prediction_type=__UpperCAmelCase , ) __UpperCamelCase = self.full_loop( solver_order=__UpperCAmelCase , solver_type=__UpperCAmelCase , prediction_type=__UpperCAmelCase , ) assert not torch.isnan(__UpperCAmelCase ).any(), "Samples have nan numbers" def UpperCAmelCase ( self ): '''simple docstring''' self.check_over_configs(lower_order_final=__UpperCAmelCase ) self.check_over_configs(lower_order_final=__UpperCAmelCase ) def UpperCAmelCase ( self ): '''simple docstring''' for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]: self.check_over_forward(num_inference_steps=__UpperCAmelCase , time_step=0 ) def UpperCAmelCase ( self ): '''simple docstring''' __UpperCamelCase = self.full_loop() __UpperCamelCase = torch.mean(torch.abs(__UpperCAmelCase ) ) assert abs(result_mean.item() - 0.2_4_6_4 ) < 1E-3 def UpperCAmelCase ( self ): '''simple docstring''' __UpperCamelCase = self.full_loop(prediction_type='v_prediction' ) __UpperCamelCase = torch.mean(torch.abs(__UpperCAmelCase ) ) assert abs(result_mean.item() - 0.1_0_1_4 ) < 1E-3 def UpperCAmelCase ( self ): '''simple docstring''' __UpperCamelCase = self.scheduler_classes[0] __UpperCamelCase = self.get_scheduler_config(thresholding=__UpperCAmelCase , dynamic_thresholding_ratio=0 ) __UpperCamelCase = scheduler_class(**__UpperCAmelCase ) __UpperCamelCase = 10 __UpperCamelCase = self.dummy_model() __UpperCamelCase = self.dummy_sample_deter.half() scheduler.set_timesteps(__UpperCAmelCase ) for i, t in enumerate(scheduler.timesteps ): __UpperCamelCase = model(__UpperCAmelCase , __UpperCAmelCase ) __UpperCamelCase = scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ).prev_sample assert sample.dtype == torch.floataa def UpperCAmelCase ( self , **__UpperCAmelCase ): '''simple docstring''' for scheduler_class in self.scheduler_classes: __UpperCamelCase = self.get_scheduler_config(**__UpperCAmelCase ) __UpperCamelCase = scheduler_class(**__UpperCAmelCase ) scheduler.set_timesteps(scheduler.config.num_train_timesteps ) assert len(scheduler.timesteps.unique() ) == scheduler.num_inference_steps
316
"""simple docstring""" import argparse import os import torch from transformers import FlavaImageCodebook, FlavaImageCodebookConfig def A ( snake_case :Union[str, Any] , snake_case :Any , snake_case :Union[str, Any] , snake_case :Any ) -> str: __UpperCamelCase = s.rsplit(snake_case , snake_case ) return new.join(snake_case ) def A ( snake_case :List[Any] ) -> int: # encoder.embeddings are double copied in original FLAVA return sum(param.float().sum() if 'encoder.embeddings' not in key else 0 for key, param in state_dict.items() ) def A ( snake_case :str ) -> Union[str, Any]: __UpperCamelCase = {} __UpperCamelCase = ['group_1', 'group_2', 'group_3', 'group_4'] for key, value in state_dict.items(): for group_key in group_keys: if group_key in key: __UpperCamelCase = key.replace(f'{group_key}.' , f'{group_key}.group.' ) if "res_path" in key: __UpperCamelCase = key.replace('res_path.' , 'res_path.path.' ) if key.endswith('.w' ): __UpperCamelCase = rreplace(snake_case , '.w' , '.weight' , 1 ) if key.endswith('.b' ): __UpperCamelCase = rreplace(snake_case , '.b' , '.bias' , 1 ) __UpperCamelCase = value.float() return upgrade @torch.no_grad() def A ( snake_case :List[str] , snake_case :Tuple , snake_case :List[Any]=None , snake_case :str=True ) -> int: from dall_e import Encoder __UpperCamelCase = Encoder() if os.path.exists(snake_case ): __UpperCamelCase = torch.load(snake_case ) else: __UpperCamelCase = torch.hub.load_state_dict_from_url(snake_case ) if isinstance(snake_case , snake_case ): __UpperCamelCase = ckpt.state_dict() encoder.load_state_dict(snake_case ) if config_path is not None: __UpperCamelCase = FlavaImageCodebookConfig.from_pretrained(snake_case ) else: __UpperCamelCase = FlavaImageCodebookConfig() __UpperCamelCase = FlavaImageCodebook(snake_case ).eval() __UpperCamelCase = encoder.state_dict() __UpperCamelCase = upgrade_state_dict(snake_case ) hf_model.load_state_dict(snake_case ) __UpperCamelCase = hf_model.state_dict() __UpperCamelCase = count_parameters(snake_case ) __UpperCamelCase = count_parameters(snake_case ) assert torch.allclose(snake_case , snake_case , atol=1e-3 ) if save_checkpoint: hf_model.save_pretrained(snake_case ) else: return hf_state_dict if __name__ == "__main__": UpperCamelCase : Any = argparse.ArgumentParser() parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to flava checkpoint") parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert") UpperCamelCase : int = parser.parse_args() convert_dalle_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
316
1
"""simple docstring""" class __lowerCAmelCase : def __init__( self , __UpperCAmelCase ): '''simple docstring''' __UpperCamelCase = len(__UpperCAmelCase ) __UpperCamelCase = [0] * len_array if len_array > 0: __UpperCamelCase = array[0] for i in range(1 , __UpperCAmelCase ): __UpperCamelCase = self.prefix_sum[i - 1] + array[i] def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' if start == 0: return self.prefix_sum[end] return self.prefix_sum[end] - self.prefix_sum[start - 1] def UpperCAmelCase ( self , __UpperCAmelCase ): '''simple docstring''' __UpperCamelCase = {0} for sum_item in self.prefix_sum: if sum_item - target_sum in sums: return True sums.add(__UpperCAmelCase ) return False if __name__ == "__main__": import doctest doctest.testmod()
316
"""simple docstring""" import logging from dataclasses import dataclass, field from pathlib import Path from typing import Optional, Union from .generation.configuration_utils import GenerationConfig from .training_args import TrainingArguments from .utils import add_start_docstrings UpperCamelCase : str = logging.getLogger(__name__) @dataclass @add_start_docstrings(TrainingArguments.__doc__ ) class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): lowercase = field(default=__SCREAMING_SNAKE_CASE , metadata={"help": "Whether to use SortishSampler or not."} ) lowercase = field( default=__SCREAMING_SNAKE_CASE , metadata={"help": "Whether to use generate to calculate generative metrics (ROUGE, BLEU)."} ) lowercase = field( default=__SCREAMING_SNAKE_CASE , metadata={ "help": ( "The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default " "to the `max_length` value of the model configuration." ) } , ) lowercase = field( default=__SCREAMING_SNAKE_CASE , metadata={ "help": ( "The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default " "to the `num_beams` value of the model configuration." ) } , ) lowercase = field( default=__SCREAMING_SNAKE_CASE , metadata={ "help": "Model id, file path or url pointing to a GenerationConfig json file, to use during prediction." } , ) def UpperCAmelCase ( self ): '''simple docstring''' __UpperCamelCase = super().to_dict() for k, v in d.items(): if isinstance(__UpperCAmelCase , __UpperCAmelCase ): __UpperCamelCase = v.to_dict() return d
316
1
"""simple docstring""" import argparse import json from collections import OrderedDict from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import PoolFormerConfig, PoolFormerForImageClassification, PoolFormerImageProcessor from transformers.utils import logging logging.set_verbosity_info() UpperCamelCase : List[Any] = logging.get_logger(__name__) def A ( snake_case :List[Any] , snake_case :int , snake_case :Dict , snake_case :Optional[Any] ) -> Tuple: __UpperCamelCase = original_name.split('.' )[0] __UpperCamelCase = key.split('.' ) __UpperCamelCase = int(key_list[key_list.index(snake_case ) - 2] ) __UpperCamelCase = int(key_list[key_list.index(snake_case ) - 1] ) __UpperCamelCase = orig_block_num - offset __UpperCamelCase = key.replace(f'{orig_block_num}.{layer_num}.{original_name}' , f'block.{new_block_num}.{layer_num}.{new_name}' ) return key def A ( snake_case :Optional[int] ) -> List[Any]: __UpperCamelCase = OrderedDict() __UpperCamelCase , __UpperCamelCase = 0, 0 for key, value in state_dict.items(): if key.startswith('network' ): __UpperCamelCase = key.replace('network' , 'poolformer.encoder' ) if "proj" in key: # Works for the first embedding as well as the internal embedding layers if key.endswith('bias' ) and "patch_embed" not in key: patch_emb_offset += 1 __UpperCamelCase = key[: key.find('proj' )] __UpperCamelCase = key.replace(snake_case , f'patch_embeddings.{total_embed_found}.' ) __UpperCamelCase = key.replace('proj' , 'projection' ) if key.endswith('bias' ): total_embed_found += 1 if "patch_embeddings" in key: __UpperCamelCase = 'poolformer.encoder.' + key if "mlp.fc1" in key: __UpperCamelCase = replace_key_with_offset(snake_case , snake_case , 'mlp.fc1' , 'output.conv1' ) if "mlp.fc2" in key: __UpperCamelCase = replace_key_with_offset(snake_case , snake_case , 'mlp.fc2' , 'output.conv2' ) if "norm1" in key: __UpperCamelCase = replace_key_with_offset(snake_case , snake_case , 'norm1' , 'before_norm' ) if "norm2" in key: __UpperCamelCase = replace_key_with_offset(snake_case , snake_case , 'norm2' , 'after_norm' ) if "layer_scale_1" in key: __UpperCamelCase = replace_key_with_offset(snake_case , snake_case , 'layer_scale_1' , 'layer_scale_1' ) if "layer_scale_2" in key: __UpperCamelCase = replace_key_with_offset(snake_case , snake_case , 'layer_scale_2' , 'layer_scale_2' ) if "head" in key: __UpperCamelCase = key.replace('head' , 'classifier' ) __UpperCamelCase = value return new_state_dict def A ( ) -> str: __UpperCamelCase = 'http://images.cocodataset.org/val2017/000000039769.jpg' __UpperCamelCase = Image.open(requests.get(snake_case , stream=snake_case ).raw ) return image @torch.no_grad() def A ( snake_case :int , snake_case :str , snake_case :str ) -> int: __UpperCamelCase = PoolFormerConfig() # set attributes based on model_name __UpperCamelCase = 'huggingface/label-files' __UpperCamelCase = model_name[-3:] __UpperCamelCase = 1_0_0_0 __UpperCamelCase = 'imagenet-1k-id2label.json' __UpperCamelCase = (1, 1_0_0_0) # set config attributes __UpperCamelCase = json.load(open(hf_hub_download(snake_case , snake_case , repo_type='dataset' ) , 'r' ) ) __UpperCamelCase = {int(snake_case ): v for k, v in idalabel.items()} __UpperCamelCase = idalabel __UpperCamelCase = {v: k for k, v in idalabel.items()} if size == "s12": __UpperCamelCase = [2, 2, 6, 2] __UpperCamelCase = [6_4, 1_2_8, 3_2_0, 5_1_2] __UpperCamelCase = 4.0 __UpperCamelCase = 0.9 elif size == "s24": __UpperCamelCase = [4, 4, 1_2, 4] __UpperCamelCase = [6_4, 1_2_8, 3_2_0, 5_1_2] __UpperCamelCase = 4.0 __UpperCamelCase = 0.9 elif size == "s36": __UpperCamelCase = [6, 6, 1_8, 6] __UpperCamelCase = [6_4, 1_2_8, 3_2_0, 5_1_2] __UpperCamelCase = 4.0 __UpperCamelCase = 1e-6 __UpperCamelCase = 0.9 elif size == "m36": __UpperCamelCase = [6, 6, 1_8, 6] __UpperCamelCase = [9_6, 1_9_2, 3_8_4, 7_6_8] __UpperCamelCase = 4.0 __UpperCamelCase = 1e-6 __UpperCamelCase = 0.95 elif size == "m48": __UpperCamelCase = [8, 8, 2_4, 8] __UpperCamelCase = [9_6, 1_9_2, 3_8_4, 7_6_8] __UpperCamelCase = 4.0 __UpperCamelCase = 1e-6 __UpperCamelCase = 0.95 else: raise ValueError(f'Size {size} not supported' ) # load image processor __UpperCamelCase = PoolFormerImageProcessor(crop_pct=snake_case ) # Prepare image __UpperCamelCase = prepare_img() __UpperCamelCase = image_processor(images=snake_case , return_tensors='pt' ).pixel_values logger.info(f'Converting model {model_name}...' ) # load original state dict __UpperCamelCase = torch.load(snake_case , map_location=torch.device('cpu' ) ) # rename keys __UpperCamelCase = rename_keys(snake_case ) # create HuggingFace model and load state dict __UpperCamelCase = PoolFormerForImageClassification(snake_case ) model.load_state_dict(snake_case ) model.eval() # Define image processor __UpperCamelCase = PoolFormerImageProcessor(crop_pct=snake_case ) __UpperCamelCase = image_processor(images=prepare_img() , return_tensors='pt' ).pixel_values # forward pass __UpperCamelCase = model(snake_case ) __UpperCamelCase = outputs.logits # define expected logit slices for different models if size == "s12": __UpperCamelCase = torch.tensor([-0.3_045, -0.6_758, -0.4_869] ) elif size == "s24": __UpperCamelCase = torch.tensor([0.4_402, -0.1_374, -0.8_045] ) elif size == "s36": __UpperCamelCase = torch.tensor([-0.6_080, -0.5_133, -0.5_898] ) elif size == "m36": __UpperCamelCase = torch.tensor([0.3_952, 0.2_263, -1.2_668] ) elif size == "m48": __UpperCamelCase = torch.tensor([0.1_167, -0.0_656, -0.3_423] ) else: raise ValueError(f'Size {size} not supported' ) # verify logits assert logits.shape == expected_shape assert torch.allclose(logits[0, :3] , snake_case , atol=1e-2 ) # finally, save model and image processor logger.info(f'Saving PyTorch model and image processor to {pytorch_dump_folder_path}...' ) Path(snake_case ).mkdir(exist_ok=snake_case ) model.save_pretrained(snake_case ) print(f'Saving image processor to {pytorch_dump_folder_path}' ) image_processor.save_pretrained(snake_case ) if __name__ == "__main__": UpperCamelCase : List[Any] = argparse.ArgumentParser() parser.add_argument( "--model_name", default="poolformer_s12", type=str, help="Name of the model you'd like to convert.", ) parser.add_argument( "--checkpoint_path", default=None, type=str, help="Path to the original PyTorch checkpoint (.pth file)." ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model." ) UpperCamelCase : Tuple = parser.parse_args() convert_poolformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
316
"""simple docstring""" from collections.abc import Iterator, MutableMapping from dataclasses import dataclass from typing import Generic, TypeVar UpperCamelCase : List[str] = TypeVar("KEY") UpperCamelCase : List[str] = TypeVar("VAL") @dataclass(frozen=__SCREAMING_SNAKE_CASE , slots=__SCREAMING_SNAKE_CASE ) class __lowerCAmelCase ( Generic[KEY, VAL] ): lowercase = 42 lowercase = 42 class __lowerCAmelCase ( _Item ): def __init__( self ): '''simple docstring''' super().__init__(__UpperCAmelCase , __UpperCAmelCase ) def __bool__( self ): '''simple docstring''' return False UpperCamelCase : Any = _DeletedItem() class __lowerCAmelCase ( MutableMapping[KEY, VAL] ): def __init__( self , __UpperCAmelCase = 8 , __UpperCAmelCase = 0.7_5 ): '''simple docstring''' __UpperCamelCase = initial_block_size __UpperCamelCase = [None] * initial_block_size assert 0.0 < capacity_factor < 1.0 __UpperCamelCase = capacity_factor __UpperCamelCase = 0 def UpperCAmelCase ( self , __UpperCAmelCase ): '''simple docstring''' return hash(__UpperCAmelCase ) % len(self._buckets ) def UpperCAmelCase ( self , __UpperCAmelCase ): '''simple docstring''' return (ind + 1) % len(self._buckets ) def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' __UpperCamelCase = self._buckets[ind] if not stored: __UpperCamelCase = _Item(__UpperCAmelCase , __UpperCAmelCase ) self._len += 1 return True elif stored.key == key: __UpperCamelCase = _Item(__UpperCAmelCase , __UpperCAmelCase ) return True else: return False def UpperCAmelCase ( self ): '''simple docstring''' __UpperCamelCase = len(self._buckets ) * self._capacity_factor return len(self ) >= int(__UpperCAmelCase ) def UpperCAmelCase ( self ): '''simple docstring''' if len(self._buckets ) <= self._initial_block_size: return False __UpperCamelCase = len(self._buckets ) * self._capacity_factor / 2 return len(self ) < limit def UpperCAmelCase ( self , __UpperCAmelCase ): '''simple docstring''' __UpperCamelCase = self._buckets __UpperCamelCase = [None] * new_size __UpperCamelCase = 0 for item in old_buckets: if item: self._add_item(item.key , item.val ) def UpperCAmelCase ( self ): '''simple docstring''' self._resize(len(self._buckets ) * 2 ) def UpperCAmelCase ( self ): '''simple docstring''' self._resize(len(self._buckets ) // 2 ) def UpperCAmelCase ( self , __UpperCAmelCase ): '''simple docstring''' __UpperCamelCase = self._get_bucket_index(__UpperCAmelCase ) for _ in range(len(self._buckets ) ): yield ind __UpperCamelCase = self._get_next_ind(__UpperCAmelCase ) def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' for ind in self._iterate_buckets(__UpperCAmelCase ): if self._try_set(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): break def __setitem__( self , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' if self._is_full(): self._size_up() self._add_item(__UpperCAmelCase , __UpperCAmelCase ) def __delitem__( self , __UpperCAmelCase ): '''simple docstring''' for ind in self._iterate_buckets(__UpperCAmelCase ): __UpperCamelCase = self._buckets[ind] if item is None: raise KeyError(__UpperCAmelCase ) if item is _deleted: continue if item.key == key: __UpperCamelCase = _deleted self._len -= 1 break if self._is_sparse(): self._size_down() def __getitem__( self , __UpperCAmelCase ): '''simple docstring''' for ind in self._iterate_buckets(__UpperCAmelCase ): __UpperCamelCase = self._buckets[ind] if item is None: break if item is _deleted: continue if item.key == key: return item.val raise KeyError(__UpperCAmelCase ) def __len__( self ): '''simple docstring''' return self._len def __iter__( self ): '''simple docstring''' yield from (item.key for item in self._buckets if item) def __repr__( self ): '''simple docstring''' __UpperCamelCase = ' ,'.join( F'{item.key}: {item.val}' for item in self._buckets if item ) return F'HashMap({val_string})'
316
1
"""simple docstring""" import argparse import pathlib import fairseq import torch from fairseq.models.roberta import RobertaModel as FairseqRobertaModel from fairseq.modules import TransformerSentenceEncoderLayer from packaging import version from transformers import XLMRobertaConfig, XLMRobertaXLForMaskedLM, XLMRobertaXLForSequenceClassification from transformers.models.bert.modeling_bert import ( BertIntermediate, BertLayer, BertOutput, BertSelfAttention, BertSelfOutput, ) from transformers.models.roberta.modeling_roberta import RobertaAttention from transformers.utils import logging if version.parse(fairseq.__version__) < version.parse("1.0.0a"): raise Exception("requires fairseq >= 1.0.0a") logging.set_verbosity_info() UpperCamelCase : Optional[Any] = logging.get_logger(__name__) UpperCamelCase : Optional[int] = "Hello world! cécé herlolip" def A ( snake_case :str , snake_case :str , snake_case :bool ) -> Any: __UpperCamelCase = FairseqRobertaModel.from_pretrained(snake_case ) roberta.eval() # disable dropout __UpperCamelCase = roberta.model.encoder.sentence_encoder __UpperCamelCase = XLMRobertaConfig( vocab_size=roberta_sent_encoder.embed_tokens.num_embeddings , hidden_size=roberta.cfg.model.encoder_embed_dim , num_hidden_layers=roberta.cfg.model.encoder_layers , num_attention_heads=roberta.cfg.model.encoder_attention_heads , intermediate_size=roberta.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=5_1_4 , type_vocab_size=1 , layer_norm_eps=1e-5 , ) if classification_head: __UpperCamelCase = roberta.model.classification_heads['mnli'].out_proj.weight.shape[0] print('Our RoBERTa config:' , snake_case ) __UpperCamelCase = XLMRobertaXLForSequenceClassification(snake_case ) if classification_head else XLMRobertaXLForMaskedLM(snake_case ) model.eval() # Now let's copy all the weights. # Embeddings __UpperCamelCase = roberta_sent_encoder.embed_tokens.weight __UpperCamelCase = roberta_sent_encoder.embed_positions.weight __UpperCamelCase = torch.zeros_like( model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c RoBERTa doesn't use them. __UpperCamelCase = roberta_sent_encoder.layer_norm.weight __UpperCamelCase = roberta_sent_encoder.layer_norm.bias for i in range(config.num_hidden_layers ): # Encoder: start of layer __UpperCamelCase = model.roberta.encoder.layer[i] __UpperCamelCase = roberta_sent_encoder.layers[i] __UpperCamelCase = layer.attention __UpperCamelCase = roberta_layer.self_attn_layer_norm.weight __UpperCamelCase = roberta_layer.self_attn_layer_norm.bias # self attention __UpperCamelCase = layer.attention.self assert ( roberta_layer.self_attn.k_proj.weight.data.shape == roberta_layer.self_attn.q_proj.weight.data.shape == roberta_layer.self_attn.v_proj.weight.data.shape == torch.Size((config.hidden_size, config.hidden_size) ) ) __UpperCamelCase = roberta_layer.self_attn.q_proj.weight __UpperCamelCase = roberta_layer.self_attn.q_proj.bias __UpperCamelCase = roberta_layer.self_attn.k_proj.weight __UpperCamelCase = roberta_layer.self_attn.k_proj.bias __UpperCamelCase = roberta_layer.self_attn.v_proj.weight __UpperCamelCase = roberta_layer.self_attn.v_proj.bias # self-attention output __UpperCamelCase = layer.attention.output assert self_output.dense.weight.shape == roberta_layer.self_attn.out_proj.weight.shape __UpperCamelCase = roberta_layer.self_attn.out_proj.weight __UpperCamelCase = roberta_layer.self_attn.out_proj.bias # this one is final layer norm __UpperCamelCase = roberta_layer.final_layer_norm.weight __UpperCamelCase = roberta_layer.final_layer_norm.bias # intermediate __UpperCamelCase = layer.intermediate assert intermediate.dense.weight.shape == roberta_layer.fca.weight.shape __UpperCamelCase = roberta_layer.fca.weight __UpperCamelCase = roberta_layer.fca.bias # output __UpperCamelCase = layer.output assert bert_output.dense.weight.shape == roberta_layer.fca.weight.shape __UpperCamelCase = roberta_layer.fca.weight __UpperCamelCase = roberta_layer.fca.bias # end of layer if classification_head: __UpperCamelCase = roberta.model.classification_heads['mnli'].dense.weight __UpperCamelCase = roberta.model.classification_heads['mnli'].dense.bias __UpperCamelCase = roberta.model.classification_heads['mnli'].out_proj.weight __UpperCamelCase = roberta.model.classification_heads['mnli'].out_proj.bias else: # LM Head __UpperCamelCase = roberta.model.encoder.lm_head.dense.weight __UpperCamelCase = roberta.model.encoder.lm_head.dense.bias __UpperCamelCase = roberta.model.encoder.lm_head.layer_norm.weight __UpperCamelCase = roberta.model.encoder.lm_head.layer_norm.bias __UpperCamelCase = roberta.model.encoder.lm_head.weight __UpperCamelCase = roberta.model.encoder.lm_head.bias # Let's check that we get the same results. __UpperCamelCase = roberta.encode(snake_case ).unsqueeze(0 ) # batch of size 1 __UpperCamelCase = model(snake_case )[0] if classification_head: __UpperCamelCase = roberta.model.classification_heads['mnli'](roberta.extract_features(snake_case ) ) else: __UpperCamelCase = roberta.model(snake_case )[0] print(our_output.shape , their_output.shape ) __UpperCamelCase = torch.max(torch.abs(our_output - their_output ) ).item() print(f'max_absolute_diff = {max_absolute_diff}' ) # ~ 1e-7 __UpperCamelCase = torch.allclose(snake_case , snake_case , atol=1e-3 ) print('Do both models output the same tensors?' , '🔥' if success else '💩' ) if not success: raise Exception('Something went wRoNg' ) pathlib.Path(snake_case ).mkdir(parents=snake_case , exist_ok=snake_case ) print(f'Saving model to {pytorch_dump_folder_path}' ) model.save_pretrained(snake_case ) if __name__ == "__main__": UpperCamelCase : Optional[int] = argparse.ArgumentParser() # Required parameters parser.add_argument( "--roberta_checkpoint_path", default=None, type=str, required=True, help="Path the official PyTorch dump." ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) parser.add_argument( "--classification_head", action="store_true", help="Whether to convert a final classification head." ) UpperCamelCase : str = parser.parse_args() convert_xlm_roberta_xl_checkpoint_to_pytorch( args.roberta_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head )
316
"""simple docstring""" def A ( snake_case :int , snake_case :int ) -> bool: return numa ^ numa < 0 if __name__ == "__main__": import doctest doctest.testmod()
316
1
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCamelCase : Optional[Any] = logging.get_logger(__name__) UpperCamelCase : Optional[Any] = { "alibaba-damo/mgp-str-base": "https://huggingface.co/alibaba-damo/mgp-str-base/resolve/main/config.json", } class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): lowercase = "mgp-str" def __init__( self , __UpperCAmelCase=[32, 128] , __UpperCAmelCase=4 , __UpperCAmelCase=3 , __UpperCAmelCase=27 , __UpperCAmelCase=38 , __UpperCAmelCase=5_0257 , __UpperCAmelCase=3_0522 , __UpperCAmelCase=768 , __UpperCAmelCase=12 , __UpperCAmelCase=12 , __UpperCAmelCase=4.0 , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase=1E-5 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=False , __UpperCAmelCase=0.0_2 , **__UpperCAmelCase , ): '''simple docstring''' super().__init__(**__UpperCAmelCase ) __UpperCamelCase = image_size __UpperCamelCase = patch_size __UpperCamelCase = num_channels __UpperCamelCase = max_token_length __UpperCamelCase = num_character_labels __UpperCamelCase = num_bpe_labels __UpperCamelCase = num_wordpiece_labels __UpperCamelCase = hidden_size __UpperCamelCase = num_hidden_layers __UpperCamelCase = num_attention_heads __UpperCamelCase = mlp_ratio __UpperCamelCase = distilled __UpperCamelCase = layer_norm_eps __UpperCamelCase = drop_rate __UpperCamelCase = qkv_bias __UpperCamelCase = attn_drop_rate __UpperCamelCase = drop_path_rate __UpperCamelCase = output_aa_attentions __UpperCamelCase = initializer_range
316
"""simple docstring""" from typing import List, Optional, Tuple, Union import torch from ...models import UNetaDModel from ...schedulers import ScoreSdeVeScheduler from ...utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): lowercase = 42 lowercase = 42 def __init__( self , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' super().__init__() self.register_modules(unet=__UpperCAmelCase , scheduler=__UpperCAmelCase ) @torch.no_grad() def __call__( self , __UpperCAmelCase = 1 , __UpperCAmelCase = 2000 , __UpperCAmelCase = None , __UpperCAmelCase = "pil" , __UpperCAmelCase = True , **__UpperCAmelCase , ): '''simple docstring''' __UpperCamelCase = self.unet.config.sample_size __UpperCamelCase = (batch_size, 3, img_size, img_size) __UpperCamelCase = self.unet __UpperCamelCase = randn_tensor(__UpperCAmelCase , generator=__UpperCAmelCase ) * self.scheduler.init_noise_sigma __UpperCamelCase = sample.to(self.device ) self.scheduler.set_timesteps(__UpperCAmelCase ) self.scheduler.set_sigmas(__UpperCAmelCase ) for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ): __UpperCamelCase = self.scheduler.sigmas[i] * torch.ones(shape[0] , device=self.device ) # correction step for _ in range(self.scheduler.config.correct_steps ): __UpperCamelCase = self.unet(__UpperCAmelCase , __UpperCAmelCase ).sample __UpperCamelCase = self.scheduler.step_correct(__UpperCAmelCase , __UpperCAmelCase , generator=__UpperCAmelCase ).prev_sample # prediction step __UpperCamelCase = model(__UpperCAmelCase , __UpperCAmelCase ).sample __UpperCamelCase = self.scheduler.step_pred(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , generator=__UpperCAmelCase ) __UpperCamelCase , __UpperCamelCase = output.prev_sample, output.prev_sample_mean __UpperCamelCase = sample_mean.clamp(0 , 1 ) __UpperCamelCase = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": __UpperCamelCase = self.numpy_to_pil(__UpperCAmelCase ) if not return_dict: return (sample,) return ImagePipelineOutput(images=__UpperCAmelCase )
316
1
"""simple docstring""" import numpy as np def A ( snake_case :np.array ) -> np.array: return (2 / (1 + np.exp(-2 * vector ))) - 1 if __name__ == "__main__": import doctest doctest.testmod()
316
"""simple docstring""" def A ( snake_case :list[int] , snake_case :int ) -> bool: __UpperCamelCase = len(snake_case ) __UpperCamelCase = [[False] * (required_sum + 1) for _ in range(arr_len + 1 )] # for each arr value, a sum of zero(0) can be formed by not taking any element # hence True/1 for i in range(arr_len + 1 ): __UpperCamelCase = True # sum is not zero and set is empty then false for i in range(1 , required_sum + 1 ): __UpperCamelCase = False for i in range(1 , arr_len + 1 ): for j in range(1 , required_sum + 1 ): if arr[i - 1] > j: __UpperCamelCase = subset[i - 1][j] if arr[i - 1] <= j: __UpperCamelCase = subset[i - 1][j] or subset[i - 1][j - arr[i - 1]] return subset[arr_len][required_sum] if __name__ == "__main__": import doctest doctest.testmod()
316
1
"""simple docstring""" def A ( snake_case :int ) -> bool: if num < 0: return False __UpperCamelCase = num __UpperCamelCase = 0 while num > 0: __UpperCamelCase = rev_num * 1_0 + (num % 1_0) num //= 1_0 return num_copy == rev_num if __name__ == "__main__": import doctest doctest.testmod()
316
"""simple docstring""" import argparse import logging import os from pathlib import Path from typing import Any, Dict import pytorch_lightning as pl from pytorch_lightning.utilities import rank_zero_info from transformers import ( AdamW, AutoConfig, AutoModel, AutoModelForPreTraining, AutoModelForQuestionAnswering, AutoModelForSeqaSeqLM, AutoModelForSequenceClassification, AutoModelForTokenClassification, AutoModelWithLMHead, AutoTokenizer, PretrainedConfig, PreTrainedTokenizer, ) from transformers.optimization import ( Adafactor, get_cosine_schedule_with_warmup, get_cosine_with_hard_restarts_schedule_with_warmup, get_linear_schedule_with_warmup, get_polynomial_decay_schedule_with_warmup, ) from transformers.utils.versions import require_version UpperCamelCase : Union[str, Any] = logging.getLogger(__name__) require_version("pytorch_lightning>=1.0.4") UpperCamelCase : int = { "base": AutoModel, "sequence-classification": AutoModelForSequenceClassification, "question-answering": AutoModelForQuestionAnswering, "pretraining": AutoModelForPreTraining, "token-classification": AutoModelForTokenClassification, "language-modeling": AutoModelWithLMHead, "summarization": AutoModelForSeqaSeqLM, "translation": AutoModelForSeqaSeqLM, } # update this and the import above to support new schedulers from transformers.optimization UpperCamelCase : Optional[Any] = { "linear": get_linear_schedule_with_warmup, "cosine": get_cosine_schedule_with_warmup, "cosine_w_restarts": get_cosine_with_hard_restarts_schedule_with_warmup, "polynomial": get_polynomial_decay_schedule_with_warmup, # '': get_constant_schedule, # not supported for now # '': get_constant_schedule_with_warmup, # not supported for now } UpperCamelCase : str = sorted(arg_to_scheduler.keys()) UpperCamelCase : List[str] = "{" + ", ".join(arg_to_scheduler_choices) + "}" class __lowerCAmelCase ( pl.LightningModule ): def __init__( self , __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase="base" , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase , ): '''simple docstring''' super().__init__() # TODO: move to self.save_hyperparameters() # self.save_hyperparameters() # can also expand arguments into trainer signature for easier reading self.save_hyperparameters(__UpperCAmelCase ) __UpperCamelCase = 0 __UpperCamelCase = Path(self.hparams.output_dir ) __UpperCamelCase = self.hparams.cache_dir if self.hparams.cache_dir else None if config is None: __UpperCamelCase = AutoConfig.from_pretrained( self.hparams.config_name if self.hparams.config_name else self.hparams.model_name_or_path , **({'num_labels': num_labels} if num_labels is not None else {}) , cache_dir=__UpperCAmelCase , **__UpperCAmelCase , ) else: __UpperCamelCase = config __UpperCamelCase = ('encoder_layerdrop', 'decoder_layerdrop', 'dropout', 'attention_dropout') for p in extra_model_params: if getattr(self.hparams , __UpperCAmelCase , __UpperCAmelCase ): assert hasattr(self.config , __UpperCAmelCase ), F'model config doesn\'t have a `{p}` attribute' setattr(self.config , __UpperCAmelCase , getattr(self.hparams , __UpperCAmelCase ) ) if tokenizer is None: __UpperCamelCase = AutoTokenizer.from_pretrained( self.hparams.tokenizer_name if self.hparams.tokenizer_name else self.hparams.model_name_or_path , cache_dir=__UpperCAmelCase , ) else: __UpperCamelCase = tokenizer __UpperCamelCase = MODEL_MODES[mode] if model is None: __UpperCamelCase = self.model_type.from_pretrained( self.hparams.model_name_or_path , from_tf=bool('.ckpt' in self.hparams.model_name_or_path ) , config=self.config , cache_dir=__UpperCAmelCase , ) else: __UpperCamelCase = model def UpperCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase ): '''simple docstring''' __UpperCamelCase = self.model_type.from_pretrained(*__UpperCAmelCase , **__UpperCAmelCase ) def UpperCAmelCase ( self ): '''simple docstring''' __UpperCamelCase = arg_to_scheduler[self.hparams.lr_scheduler] __UpperCamelCase = get_schedule_func( self.opt , num_warmup_steps=self.hparams.warmup_steps , num_training_steps=self.total_steps() ) __UpperCamelCase = {'scheduler': scheduler, 'interval': 'step', 'frequency': 1} return scheduler def UpperCAmelCase ( self ): '''simple docstring''' __UpperCamelCase = self.model __UpperCamelCase = ['bias', 'LayerNorm.weight'] __UpperCamelCase = [ { 'params': [ p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay ) ], # check this named paramters 'weight_decay': self.hparams.weight_decay, }, { 'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay )], 'weight_decay': 0.0, }, ] if self.hparams.adafactor: __UpperCamelCase = Adafactor( __UpperCAmelCase , lr=self.hparams.learning_rate , scale_parameter=__UpperCAmelCase , relative_step=__UpperCAmelCase ) else: __UpperCamelCase = AdamW( __UpperCAmelCase , lr=self.hparams.learning_rate , eps=self.hparams.adam_epsilon ) __UpperCamelCase = optimizer __UpperCamelCase = self.get_lr_scheduler() return [optimizer], [scheduler] def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' return self.validation_step(__UpperCAmelCase , __UpperCAmelCase ) def UpperCAmelCase ( self , __UpperCAmelCase ): '''simple docstring''' return self.validation_end(__UpperCAmelCase ) def UpperCAmelCase ( self ): '''simple docstring''' __UpperCamelCase = max(1 , self.hparams.gpus ) # TODO: consider num_tpu_cores __UpperCamelCase = self.hparams.train_batch_size * self.hparams.accumulate_grad_batches * num_devices return (self.dataset_size / effective_batch_size) * self.hparams.max_epochs def UpperCAmelCase ( self , __UpperCAmelCase ): '''simple docstring''' if stage == "test": __UpperCamelCase = len(self.test_dataloader().dataset ) else: __UpperCamelCase = self.get_dataloader('train' , self.hparams.train_batch_size , shuffle=__UpperCAmelCase ) __UpperCamelCase = len(self.train_dataloader().dataset ) def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = False ): '''simple docstring''' raise NotImplementedError('You must implement this for your task' ) def UpperCAmelCase ( self ): '''simple docstring''' return self.train_loader def UpperCAmelCase ( self ): '''simple docstring''' return self.get_dataloader('dev' , self.hparams.eval_batch_size , shuffle=__UpperCAmelCase ) def UpperCAmelCase ( self ): '''simple docstring''' return self.get_dataloader('test' , self.hparams.eval_batch_size , shuffle=__UpperCAmelCase ) def UpperCAmelCase ( self , __UpperCAmelCase ): '''simple docstring''' return os.path.join( self.hparams.data_dir , 'cached_{}_{}_{}'.format( __UpperCAmelCase , list(filter(__UpperCAmelCase , self.hparams.model_name_or_path.split('/' ) ) ).pop() , str(self.hparams.max_seq_length ) , ) , ) @pl.utilities.rank_zero_only def UpperCAmelCase ( self , __UpperCAmelCase ): '''simple docstring''' __UpperCamelCase = self.output_dir.joinpath('best_tfmr' ) __UpperCamelCase = self.step_count self.model.save_pretrained(__UpperCAmelCase ) self.tokenizer.save_pretrained(__UpperCAmelCase ) @staticmethod def UpperCAmelCase ( __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' parser.add_argument( '--model_name_or_path' , default=__UpperCAmelCase , type=__UpperCAmelCase , required=__UpperCAmelCase , help='Path to pretrained model or model identifier from huggingface.co/models' , ) parser.add_argument( '--config_name' , default='' , type=__UpperCAmelCase , help='Pretrained config name or path if not the same as model_name' ) parser.add_argument( '--tokenizer_name' , default=__UpperCAmelCase , type=__UpperCAmelCase , help='Pretrained tokenizer name or path if not the same as model_name' , ) parser.add_argument( '--cache_dir' , default=str(Path(__UpperCAmelCase ).parent / 'test_run' / 'cache' ) , type=__UpperCAmelCase , help='Where do you want to store the pre-trained models downloaded from huggingface.co' , ) parser.add_argument( '--encoder_layerdrop' , type=__UpperCAmelCase , help='Encoder layer dropout probability (Optional). Goes into model.config' , ) parser.add_argument( '--decoder_layerdrop' , type=__UpperCAmelCase , help='Decoder layer dropout probability (Optional). Goes into model.config' , ) parser.add_argument( '--dropout' , type=__UpperCAmelCase , help='Dropout probability (Optional). Goes into model.config' , ) parser.add_argument( '--attention_dropout' , type=__UpperCAmelCase , help='Attention dropout probability (Optional). Goes into model.config' , ) parser.add_argument('--learning_rate' , default=5E-5 , type=__UpperCAmelCase , help='The initial learning rate for Adam.' ) parser.add_argument( '--lr_scheduler' , default='linear' , choices=__UpperCAmelCase , metavar=__UpperCAmelCase , type=__UpperCAmelCase , help='Learning rate scheduler' , ) parser.add_argument('--weight_decay' , default=0.0 , type=__UpperCAmelCase , help='Weight decay if we apply some.' ) parser.add_argument('--adam_epsilon' , default=1E-8 , type=__UpperCAmelCase , help='Epsilon for Adam optimizer.' ) parser.add_argument('--warmup_steps' , default=0 , type=__UpperCAmelCase , help='Linear warmup over warmup_steps.' ) parser.add_argument('--num_workers' , default=4 , type=__UpperCAmelCase , help='kwarg passed to DataLoader' ) parser.add_argument('--num_train_epochs' , dest='max_epochs' , default=3 , type=__UpperCAmelCase ) parser.add_argument('--train_batch_size' , default=32 , type=__UpperCAmelCase ) parser.add_argument('--eval_batch_size' , default=32 , type=__UpperCAmelCase ) parser.add_argument('--adafactor' , action='store_true' ) class __lowerCAmelCase ( pl.Callback ): def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' if ( trainer.is_global_zero and trainer.global_rank == 0 ): # we initialize the retriever only on master worker with RAY. In new pytorch-lightning accelorators are removed. pl_module.model.rag.retriever.init_retrieval() # better to use hook functions. class __lowerCAmelCase ( pl.Callback ): def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' for name, param in pl_module.model.rag.named_parameters(): if param.grad is None: print(__UpperCAmelCase ) class __lowerCAmelCase ( pl.Callback ): def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' __UpperCamelCase = trainer.lr_schedulers[0]['scheduler'] __UpperCamelCase = {F'lr_group_{i}': lr for i, lr in enumerate(lr_scheduler.get_lr() )} pl_module.logger.log_metrics(__UpperCAmelCase ) def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' rank_zero_info('***** Validation results *****' ) __UpperCamelCase = trainer.callback_metrics # Log results for key in sorted(__UpperCAmelCase ): if key not in ["log", "progress_bar"]: rank_zero_info('{} = {}\n'.format(__UpperCAmelCase , str(metrics[key] ) ) ) def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' rank_zero_info('***** Test results *****' ) __UpperCamelCase = trainer.callback_metrics # Log and save results to file __UpperCamelCase = os.path.join(pl_module.hparams.output_dir , 'test_results.txt' ) with open(__UpperCAmelCase , 'w' ) as writer: for key in sorted(__UpperCAmelCase ): if key not in ["log", "progress_bar"]: rank_zero_info('{} = {}\n'.format(__UpperCAmelCase , str(metrics[key] ) ) ) writer.write('{} = {}\n'.format(__UpperCAmelCase , str(metrics[key] ) ) ) def A ( snake_case :Any , snake_case :int ) -> None: # To allow all pl args uncomment the following line # parser = pl.Trainer.add_argparse_args(parser) parser.add_argument( '--output_dir' , default=str(Path(snake_case ).parent / 'test_run' / 'model_checkpoints' ) , type=snake_case , help='The output directory where the model predictions and checkpoints will be written.' , ) parser.add_argument( '--fp16' , action='store_true' , help='Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit' , ) parser.add_argument( '--fp16_opt_level' , type=snake_case , default='O2' , help=( 'For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].' 'See details at https://nvidia.github.io/apex/amp.html' ) , ) parser.add_argument('--n_tpu_cores' , dest='tpu_cores' , type=snake_case ) parser.add_argument('--max_grad_norm' , dest='gradient_clip_val' , default=1.0 , type=snake_case , help='Max gradient norm' ) parser.add_argument('--do_train' , action='store_true' , help='Whether to run training.' ) parser.add_argument('--do_predict' , action='store_true' , help='Whether to run predictions on the test set.' ) parser.add_argument( '--gradient_accumulation_steps' , dest='accumulate_grad_batches' , type=snake_case , default=1 , help='Number of updates steps to accumulate before performing a backward/update pass.' , ) parser.add_argument('--seed' , type=snake_case , default=4_2 , help='random seed for initialization' ) parser.add_argument( '--data_dir' , default=str(Path(snake_case ).parent / 'test_run' / 'dummy-train-data' ) , type=snake_case , help='The input data dir. Should contain the training files for the CoNLL-2003 NER task.' , ) def A ( snake_case :BaseTransformer , snake_case :argparse.Namespace , snake_case :Union[str, Any]=None , snake_case :Union[str, Any]=True , snake_case :Any=[] , snake_case :Tuple=None , snake_case :List[str]=None , **snake_case :Union[str, Any] , ) -> Optional[int]: pl.seed_everything(args.seed ) # init model __UpperCamelCase = Path(model.hparams.output_dir ) odir.mkdir(exist_ok=snake_case ) # add custom checkpoints if checkpoint_callback is None: __UpperCamelCase = pl.callbacks.ModelCheckpoint( filepath=args.output_dir , prefix='checkpoint' , monitor='val_loss' , mode='min' , save_top_k=1 ) if early_stopping_callback: extra_callbacks.append(snake_case ) if logging_callback is None: __UpperCamelCase = LoggingCallback() __UpperCamelCase = {} if args.fpaa: __UpperCamelCase = 1_6 if args.gpus > 1: __UpperCamelCase = 'auto' __UpperCamelCase = 'ddp' __UpperCamelCase = args.accumulate_grad_batches __UpperCamelCase = None __UpperCamelCase = 'auto' __UpperCamelCase = pl.Trainer.from_argparse_args( snake_case , weights_summary=snake_case , callbacks=[logging_callback] + extra_callbacks + [InitCallback()] + [checkpoint_callback] , logger=snake_case , val_check_interval=1 , num_sanity_val_steps=2 , **snake_case , ) if args.do_train: trainer.fit(snake_case ) else: print('RAG modeling tests with new set functions successfuly executed!' ) return trainer
316
1
"""simple docstring""" import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging UpperCamelCase : Dict = logging.get_logger(__name__) UpperCamelCase : int = "▁" UpperCamelCase : List[Any] = {"vocab_file": "sentencepiece.bpe.model", "monolingual_vocab_file": "dict.txt"} UpperCamelCase : Dict = { "vocab_file": { "vinai/bartpho-syllable": "https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model", }, "monolingual_vocab_file": { "vinai/bartpho-syllable": "https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt", }, } UpperCamelCase : List[Any] = {"vinai/bartpho-syllable": 1_0_2_4} class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): lowercase = VOCAB_FILES_NAMES lowercase = PRETRAINED_VOCAB_FILES_MAP lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase = ["input_ids", "attention_mask"] def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase="<s>" , __UpperCAmelCase="</s>" , __UpperCAmelCase="</s>" , __UpperCAmelCase="<s>" , __UpperCAmelCase="<unk>" , __UpperCAmelCase="<pad>" , __UpperCAmelCase="<mask>" , __UpperCAmelCase = None , **__UpperCAmelCase , ): '''simple docstring''' __UpperCamelCase = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else mask_token __UpperCamelCase = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **__UpperCAmelCase , ) __UpperCamelCase = vocab_file __UpperCamelCase = monolingual_vocab_file __UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(__UpperCAmelCase ) ) # Load the reduced vocab # Keep order of special tokens for backward compatibility __UpperCamelCase = {} __UpperCamelCase = 0 for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]: if str(__UpperCAmelCase ) not in self.fairseq_tokens_to_ids: __UpperCamelCase = cnt cnt += 1 with open(__UpperCAmelCase , 'r' , encoding='utf-8' ) as f: for line in f.readlines(): __UpperCamelCase = line.strip().split()[0] __UpperCamelCase = len(self.fairseq_tokens_to_ids ) if str(__UpperCAmelCase ) not in self.fairseq_tokens_to_ids: __UpperCamelCase = len(self.fairseq_tokens_to_ids ) __UpperCamelCase = {v: k for k, v in self.fairseq_tokens_to_ids.items()} def __getstate__( self ): '''simple docstring''' __UpperCamelCase = self.__dict__.copy() __UpperCamelCase = None __UpperCamelCase = self.sp_model.serialized_model_proto() return state def __setstate__( self , __UpperCAmelCase ): '''simple docstring''' __UpperCamelCase = d # for backward compatibility if not hasattr(self , 'sp_model_kwargs' ): __UpperCamelCase = {} __UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.LoadFromSerializedProto(self.sp_model_proto ) def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ): '''simple docstring''' if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] __UpperCamelCase = [self.cls_token_id] __UpperCamelCase = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = False ): '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__UpperCAmelCase , token_ids_a=__UpperCAmelCase , already_has_special_tokens=__UpperCAmelCase ) if token_ids_a is None: return [1] + ([0] * len(__UpperCAmelCase )) + [1] return [1] + ([0] * len(__UpperCAmelCase )) + [1, 1] + ([0] * len(__UpperCAmelCase )) + [1] def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ): '''simple docstring''' __UpperCamelCase = [self.sep_token_id] __UpperCamelCase = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] @property def UpperCAmelCase ( self ): '''simple docstring''' return len(self.fairseq_ids_to_tokens ) def UpperCAmelCase ( self ): '''simple docstring''' __UpperCamelCase = {self.convert_ids_to_tokens(__UpperCAmelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def UpperCAmelCase ( self , __UpperCAmelCase ): '''simple docstring''' return self.sp_model.encode(__UpperCAmelCase , out_type=__UpperCAmelCase ) def UpperCAmelCase ( self , __UpperCAmelCase ): '''simple docstring''' if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] else: return self.unk_token_id def UpperCAmelCase ( self , __UpperCAmelCase ): '''simple docstring''' return self.fairseq_ids_to_tokens[index] def UpperCAmelCase ( self , __UpperCAmelCase ): '''simple docstring''' __UpperCamelCase = ''.join(__UpperCAmelCase ).replace(__UpperCAmelCase , ' ' ).strip() return out_string def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ): '''simple docstring''' if not os.path.isdir(__UpperCAmelCase ): logger.error(F'Vocabulary path ({save_directory}) should be a directory' ) return __UpperCamelCase = os.path.join( __UpperCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) __UpperCamelCase = os.path.join( __UpperCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['monolingual_vocab_file'] , ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCAmelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , __UpperCAmelCase ) elif not os.path.isfile(self.vocab_file ): with open(__UpperCAmelCase , 'wb' ) as fi: __UpperCamelCase = self.sp_model.serialized_model_proto() fi.write(__UpperCAmelCase ) if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath( __UpperCAmelCase ) and os.path.isfile(self.monolingual_vocab_file ): copyfile(self.monolingual_vocab_file , __UpperCAmelCase ) elif not os.path.isfile(self.monolingual_vocab_file ): with open(__UpperCAmelCase , 'w' , encoding='utf-8' ) as fp: for token in self.fairseq_tokens_to_ids: if token not in self.all_special_tokens: fp.write(F'{str(__UpperCAmelCase )} \n' ) return out_vocab_file, out_monolingual_vocab_file
316
"""simple docstring""" import json from typing import TYPE_CHECKING, List, Optional, Tuple from tokenizers import pre_tokenizers from ...tokenization_utils_base import BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_gpta import GPTaTokenizer if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation UpperCamelCase : Any = logging.get_logger(__name__) UpperCamelCase : Any = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"} UpperCamelCase : Dict = { "vocab_file": { "gpt2": "https://huggingface.co/gpt2/resolve/main/vocab.json", "gpt2-medium": "https://huggingface.co/gpt2-medium/resolve/main/vocab.json", "gpt2-large": "https://huggingface.co/gpt2-large/resolve/main/vocab.json", "gpt2-xl": "https://huggingface.co/gpt2-xl/resolve/main/vocab.json", "distilgpt2": "https://huggingface.co/distilgpt2/resolve/main/vocab.json", }, "merges_file": { "gpt2": "https://huggingface.co/gpt2/resolve/main/merges.txt", "gpt2-medium": "https://huggingface.co/gpt2-medium/resolve/main/merges.txt", "gpt2-large": "https://huggingface.co/gpt2-large/resolve/main/merges.txt", "gpt2-xl": "https://huggingface.co/gpt2-xl/resolve/main/merges.txt", "distilgpt2": "https://huggingface.co/distilgpt2/resolve/main/merges.txt", }, "tokenizer_file": { "gpt2": "https://huggingface.co/gpt2/resolve/main/tokenizer.json", "gpt2-medium": "https://huggingface.co/gpt2-medium/resolve/main/tokenizer.json", "gpt2-large": "https://huggingface.co/gpt2-large/resolve/main/tokenizer.json", "gpt2-xl": "https://huggingface.co/gpt2-xl/resolve/main/tokenizer.json", "distilgpt2": "https://huggingface.co/distilgpt2/resolve/main/tokenizer.json", }, } UpperCamelCase : Dict = { "gpt2": 1_0_2_4, "gpt2-medium": 1_0_2_4, "gpt2-large": 1_0_2_4, "gpt2-xl": 1_0_2_4, "distilgpt2": 1_0_2_4, } class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): lowercase = VOCAB_FILES_NAMES lowercase = PRETRAINED_VOCAB_FILES_MAP lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase = ["input_ids", "attention_mask"] lowercase = GPTaTokenizer def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase="<|endoftext|>" , __UpperCAmelCase="<|endoftext|>" , __UpperCAmelCase="<|endoftext|>" , __UpperCAmelCase=False , **__UpperCAmelCase , ): '''simple docstring''' super().__init__( __UpperCAmelCase , __UpperCAmelCase , tokenizer_file=__UpperCAmelCase , unk_token=__UpperCAmelCase , bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase , **__UpperCAmelCase , ) __UpperCamelCase = kwargs.pop('add_bos_token' , __UpperCAmelCase ) __UpperCamelCase = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get('add_prefix_space' , __UpperCAmelCase ) != add_prefix_space: __UpperCamelCase = getattr(__UpperCAmelCase , pre_tok_state.pop('type' ) ) __UpperCamelCase = add_prefix_space __UpperCamelCase = pre_tok_class(**__UpperCAmelCase ) __UpperCamelCase = add_prefix_space def UpperCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase ): '''simple docstring''' __UpperCamelCase = kwargs.get('is_split_into_words' , __UpperCAmelCase ) assert self.add_prefix_space or not is_split_into_words, ( F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True ' "to use it with pretokenized inputs." ) return super()._batch_encode_plus(*__UpperCAmelCase , **__UpperCAmelCase ) def UpperCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase ): '''simple docstring''' __UpperCamelCase = kwargs.get('is_split_into_words' , __UpperCAmelCase ) assert self.add_prefix_space or not is_split_into_words, ( F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True ' "to use it with pretokenized inputs." ) return super()._encode_plus(*__UpperCAmelCase , **__UpperCAmelCase ) def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ): '''simple docstring''' __UpperCamelCase = self._tokenizer.model.save(__UpperCAmelCase , name=__UpperCAmelCase ) return tuple(__UpperCAmelCase ) def UpperCAmelCase ( self , __UpperCAmelCase ): '''simple docstring''' __UpperCamelCase = [] for is_user, text in conversation.iter_texts(): input_ids.extend(self.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ) + [self.eos_token_id] ) if len(__UpperCAmelCase ) > self.model_max_length: __UpperCamelCase = input_ids[-self.model_max_length :] return input_ids
316
1
"""simple docstring""" import argparse import json from tqdm import tqdm def A ( ) -> List[str]: __UpperCamelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( '--src_path' , type=snake_case , default='biencoder-nq-dev.json' , help='Path to raw DPR training data' , ) parser.add_argument( '--evaluation_set' , type=snake_case , help='where to store parsed evaluation_set file' , ) parser.add_argument( '--gold_data_path' , type=snake_case , help='where to store parsed gold_data_path file' , ) __UpperCamelCase = parser.parse_args() with open(args.src_path , 'r' ) as src_file, open(args.evaluation_set , 'w' ) as eval_file, open( args.gold_data_path , 'w' ) as gold_file: __UpperCamelCase = json.load(snake_case ) for dpr_record in tqdm(snake_case ): __UpperCamelCase = dpr_record['question'] __UpperCamelCase = [context['title'] for context in dpr_record['positive_ctxs']] eval_file.write(question + '\n' ) gold_file.write('\t'.join(snake_case ) + '\n' ) if __name__ == "__main__": main()
316
"""simple docstring""" import argparse from pathlib import Path import torch from packaging import version from torch.onnx import export from diffusers import AutoencoderKL UpperCamelCase : Union[str, Any] = version.parse(version.parse(torch.__version__).base_version) < version.parse("1.11") def A ( snake_case :str , snake_case :tuple , snake_case :Path , snake_case :Dict , snake_case :int , snake_case :List[str] , snake_case :Union[str, Any] , snake_case :Union[str, Any]=False , ) -> str: output_path.parent.mkdir(parents=snake_case , exist_ok=snake_case ) # PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11, # so we check the torch version for backwards compatibility if is_torch_less_than_1_11: export( snake_case , snake_case , f=output_path.as_posix() , input_names=snake_case , output_names=snake_case , dynamic_axes=snake_case , do_constant_folding=snake_case , use_external_data_format=snake_case , enable_onnx_checker=snake_case , opset_version=snake_case , ) else: export( snake_case , snake_case , f=output_path.as_posix() , input_names=snake_case , output_names=snake_case , dynamic_axes=snake_case , do_constant_folding=snake_case , opset_version=snake_case , ) @torch.no_grad() def A ( snake_case :str , snake_case :str , snake_case :int , snake_case :bool = False ) -> List[str]: __UpperCamelCase = torch.floataa if fpaa else torch.floataa if fpaa and torch.cuda.is_available(): __UpperCamelCase = 'cuda' elif fpaa and not torch.cuda.is_available(): raise ValueError('`float16` model export is only supported on GPUs with CUDA' ) else: __UpperCamelCase = 'cpu' __UpperCamelCase = Path(snake_case ) # VAE DECODER __UpperCamelCase = AutoencoderKL.from_pretrained(model_path + '/vae' ) __UpperCamelCase = vae_decoder.config.latent_channels # forward only through the decoder part __UpperCamelCase = vae_decoder.decode onnx_export( snake_case , model_args=( torch.randn(1 , snake_case , 2_5 , 2_5 ).to(device=snake_case , dtype=snake_case ), False, ) , output_path=output_path / 'vae_decoder' / 'model.onnx' , ordered_input_names=['latent_sample', 'return_dict'] , output_names=['sample'] , dynamic_axes={ 'latent_sample': {0: 'batch', 1: 'channels', 2: 'height', 3: 'width'}, } , opset=snake_case , ) del vae_decoder if __name__ == "__main__": UpperCamelCase : Dict = argparse.ArgumentParser() parser.add_argument( "--model_path", type=str, required=True, help="Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).", ) parser.add_argument("--output_path", type=str, required=True, help="Path to the output model.") parser.add_argument( "--opset", default=1_4, type=int, help="The version of the ONNX operator set to use.", ) parser.add_argument("--fp16", action="store_true", default=False, help="Export the models in `float16` mode") UpperCamelCase : List[Any] = parser.parse_args() print(args.output_path) convert_models(args.model_path, args.output_path, args.opset, args.fpaa) print("SD: Done: ONNX")
316
1
"""simple docstring""" import gc import unittest import numpy as np import torch from torch.backends.cuda import sdp_kernel from diffusers import ( CMStochasticIterativeScheduler, ConsistencyModelPipeline, UNetaDModel, ) from diffusers.utils import randn_tensor, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_a, require_torch_gpu from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ): lowercase = ConsistencyModelPipeline lowercase = UNCONDITIONAL_IMAGE_GENERATION_PARAMS lowercase = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS # Override required_optional_params to remove num_images_per_prompt lowercase = frozenset( [ "num_inference_steps", "generator", "latents", "output_type", "return_dict", "callback", "callback_steps", ] ) @property def UpperCAmelCase ( self ): '''simple docstring''' __UpperCamelCase = UNetaDModel.from_pretrained( 'diffusers/consistency-models-test' , subfolder='test_unet' , ) return unet @property def UpperCAmelCase ( self ): '''simple docstring''' __UpperCamelCase = UNetaDModel.from_pretrained( 'diffusers/consistency-models-test' , subfolder='test_unet_class_cond' , ) return unet def UpperCAmelCase ( self , __UpperCAmelCase=False ): '''simple docstring''' if class_cond: __UpperCamelCase = self.dummy_cond_unet else: __UpperCamelCase = self.dummy_uncond_unet # Default to CM multistep sampler __UpperCamelCase = CMStochasticIterativeScheduler( num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , ) __UpperCamelCase = { 'unet': unet, 'scheduler': scheduler, } return components def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase=0 ): '''simple docstring''' if str(__UpperCAmelCase ).startswith('mps' ): __UpperCamelCase = torch.manual_seed(__UpperCAmelCase ) else: __UpperCamelCase = torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase ) __UpperCamelCase = { 'batch_size': 1, 'num_inference_steps': None, 'timesteps': [22, 0], 'generator': generator, 'output_type': 'np', } return inputs def UpperCAmelCase ( self ): '''simple docstring''' __UpperCamelCase = 'cpu' # ensure determinism for the device-dependent torch.Generator __UpperCamelCase = self.get_dummy_components() __UpperCamelCase = ConsistencyModelPipeline(**__UpperCAmelCase ) __UpperCamelCase = pipe.to(__UpperCAmelCase ) pipe.set_progress_bar_config(disable=__UpperCAmelCase ) __UpperCamelCase = self.get_dummy_inputs(__UpperCAmelCase ) __UpperCamelCase = pipe(**__UpperCAmelCase ).images assert image.shape == (1, 32, 32, 3) __UpperCamelCase = image[0, -3:, -3:, -1] __UpperCamelCase = np.array([0.3_5_7_2, 0.6_2_7_3, 0.4_0_3_1, 0.3_9_6_1, 0.4_3_2_1, 0.5_7_3_0, 0.5_2_6_6, 0.4_7_8_0, 0.5_0_0_4] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 def UpperCAmelCase ( self ): '''simple docstring''' __UpperCamelCase = 'cpu' # ensure determinism for the device-dependent torch.Generator __UpperCamelCase = self.get_dummy_components(class_cond=__UpperCAmelCase ) __UpperCamelCase = ConsistencyModelPipeline(**__UpperCAmelCase ) __UpperCamelCase = pipe.to(__UpperCAmelCase ) pipe.set_progress_bar_config(disable=__UpperCAmelCase ) __UpperCamelCase = self.get_dummy_inputs(__UpperCAmelCase ) __UpperCamelCase = 0 __UpperCamelCase = pipe(**__UpperCAmelCase ).images assert image.shape == (1, 32, 32, 3) __UpperCamelCase = image[0, -3:, -3:, -1] __UpperCamelCase = np.array([0.3_5_7_2, 0.6_2_7_3, 0.4_0_3_1, 0.3_9_6_1, 0.4_3_2_1, 0.5_7_3_0, 0.5_2_6_6, 0.4_7_8_0, 0.5_0_0_4] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 def UpperCAmelCase ( self ): '''simple docstring''' __UpperCamelCase = 'cpu' # ensure determinism for the device-dependent torch.Generator __UpperCamelCase = self.get_dummy_components() __UpperCamelCase = ConsistencyModelPipeline(**__UpperCAmelCase ) __UpperCamelCase = pipe.to(__UpperCAmelCase ) pipe.set_progress_bar_config(disable=__UpperCAmelCase ) __UpperCamelCase = self.get_dummy_inputs(__UpperCAmelCase ) __UpperCamelCase = 1 __UpperCamelCase = None __UpperCamelCase = pipe(**__UpperCAmelCase ).images assert image.shape == (1, 32, 32, 3) __UpperCamelCase = image[0, -3:, -3:, -1] __UpperCamelCase = np.array([0.5_0_0_4, 0.5_0_0_4, 0.4_9_9_4, 0.5_0_0_8, 0.4_9_7_6, 0.5_0_1_8, 0.4_9_9_0, 0.4_9_8_2, 0.4_9_8_7] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 def UpperCAmelCase ( self ): '''simple docstring''' __UpperCamelCase = 'cpu' # ensure determinism for the device-dependent torch.Generator __UpperCamelCase = self.get_dummy_components(class_cond=__UpperCAmelCase ) __UpperCamelCase = ConsistencyModelPipeline(**__UpperCAmelCase ) __UpperCamelCase = pipe.to(__UpperCAmelCase ) pipe.set_progress_bar_config(disable=__UpperCAmelCase ) __UpperCamelCase = self.get_dummy_inputs(__UpperCAmelCase ) __UpperCamelCase = 1 __UpperCamelCase = None __UpperCamelCase = 0 __UpperCamelCase = pipe(**__UpperCAmelCase ).images assert image.shape == (1, 32, 32, 3) __UpperCamelCase = image[0, -3:, -3:, -1] __UpperCamelCase = np.array([0.5_0_0_4, 0.5_0_0_4, 0.4_9_9_4, 0.5_0_0_8, 0.4_9_7_6, 0.5_0_1_8, 0.4_9_9_0, 0.4_9_8_2, 0.4_9_8_7] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 @slow @require_torch_gpu class __lowerCAmelCase ( unittest.TestCase ): def UpperCAmelCase ( self ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def UpperCAmelCase ( self , __UpperCAmelCase=0 , __UpperCAmelCase=False , __UpperCAmelCase="cpu" , __UpperCAmelCase=torch.floataa , __UpperCAmelCase=(1, 3, 64, 64) ): '''simple docstring''' __UpperCamelCase = torch.manual_seed(__UpperCAmelCase ) __UpperCamelCase = { 'num_inference_steps': None, 'timesteps': [22, 0], 'class_labels': 0, 'generator': generator, 'output_type': 'np', } if get_fixed_latents: __UpperCamelCase = self.get_fixed_latents(seed=__UpperCAmelCase , device=__UpperCAmelCase , dtype=__UpperCAmelCase , shape=__UpperCAmelCase ) __UpperCamelCase = latents return inputs def UpperCAmelCase ( self , __UpperCAmelCase=0 , __UpperCAmelCase="cpu" , __UpperCAmelCase=torch.floataa , __UpperCAmelCase=(1, 3, 64, 64) ): '''simple docstring''' if type(__UpperCAmelCase ) == str: __UpperCamelCase = torch.device(__UpperCAmelCase ) __UpperCamelCase = torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase ) __UpperCamelCase = randn_tensor(__UpperCAmelCase , generator=__UpperCAmelCase , device=__UpperCAmelCase , dtype=__UpperCAmelCase ) return latents def UpperCAmelCase ( self ): '''simple docstring''' __UpperCamelCase = UNetaDModel.from_pretrained('diffusers/consistency_models' , subfolder='diffusers_cd_imagenet64_l2' ) __UpperCamelCase = CMStochasticIterativeScheduler( num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , ) __UpperCamelCase = ConsistencyModelPipeline(unet=__UpperCAmelCase , scheduler=__UpperCAmelCase ) pipe.to(torch_device=__UpperCAmelCase ) pipe.set_progress_bar_config(disable=__UpperCAmelCase ) __UpperCamelCase = self.get_inputs() __UpperCamelCase = pipe(**__UpperCAmelCase ).images assert image.shape == (1, 64, 64, 3) __UpperCamelCase = image[0, -3:, -3:, -1] __UpperCamelCase = np.array([0.0_8_8_8, 0.0_8_8_1, 0.0_6_6_6, 0.0_4_7_9, 0.0_2_9_2, 0.0_1_9_5, 0.0_2_0_1, 0.0_1_6_3, 0.0_2_5_4] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2 def UpperCAmelCase ( self ): '''simple docstring''' __UpperCamelCase = UNetaDModel.from_pretrained('diffusers/consistency_models' , subfolder='diffusers_cd_imagenet64_l2' ) __UpperCamelCase = CMStochasticIterativeScheduler( num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , ) __UpperCamelCase = ConsistencyModelPipeline(unet=__UpperCAmelCase , scheduler=__UpperCAmelCase ) pipe.to(torch_device=__UpperCAmelCase ) pipe.set_progress_bar_config(disable=__UpperCAmelCase ) __UpperCamelCase = self.get_inputs() __UpperCamelCase = 1 __UpperCamelCase = None __UpperCamelCase = pipe(**__UpperCAmelCase ).images assert image.shape == (1, 64, 64, 3) __UpperCamelCase = image[0, -3:, -3:, -1] __UpperCamelCase = np.array([0.0_3_4_0, 0.0_1_5_2, 0.0_0_6_3, 0.0_2_6_7, 0.0_2_2_1, 0.0_1_0_7, 0.0_4_1_6, 0.0_1_8_6, 0.0_2_1_7] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2 @require_torch_a def UpperCAmelCase ( self ): '''simple docstring''' __UpperCamelCase = UNetaDModel.from_pretrained('diffusers/consistency_models' , subfolder='diffusers_cd_imagenet64_l2' ) __UpperCamelCase = CMStochasticIterativeScheduler( num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , ) __UpperCamelCase = ConsistencyModelPipeline(unet=__UpperCAmelCase , scheduler=__UpperCAmelCase ) pipe.to(torch_device=__UpperCAmelCase , torch_dtype=torch.floataa ) pipe.set_progress_bar_config(disable=__UpperCAmelCase ) __UpperCamelCase = self.get_inputs(get_fixed_latents=__UpperCAmelCase , device=__UpperCAmelCase ) # Ensure usage of flash attention in torch 2.0 with sdp_kernel(enable_flash=__UpperCAmelCase , enable_math=__UpperCAmelCase , enable_mem_efficient=__UpperCAmelCase ): __UpperCamelCase = pipe(**__UpperCAmelCase ).images assert image.shape == (1, 64, 64, 3) __UpperCamelCase = image[0, -3:, -3:, -1] __UpperCamelCase = np.array([0.1_8_7_5, 0.1_4_2_8, 0.1_2_8_9, 0.2_1_5_1, 0.2_0_9_2, 0.1_4_7_7, 0.1_8_7_7, 0.1_6_4_1, 0.1_3_5_3] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 @require_torch_a def UpperCAmelCase ( self ): '''simple docstring''' __UpperCamelCase = UNetaDModel.from_pretrained('diffusers/consistency_models' , subfolder='diffusers_cd_imagenet64_l2' ) __UpperCamelCase = CMStochasticIterativeScheduler( num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , ) __UpperCamelCase = ConsistencyModelPipeline(unet=__UpperCAmelCase , scheduler=__UpperCAmelCase ) pipe.to(torch_device=__UpperCAmelCase , torch_dtype=torch.floataa ) pipe.set_progress_bar_config(disable=__UpperCAmelCase ) __UpperCamelCase = self.get_inputs(get_fixed_latents=__UpperCAmelCase , device=__UpperCAmelCase ) __UpperCamelCase = 1 __UpperCamelCase = None # Ensure usage of flash attention in torch 2.0 with sdp_kernel(enable_flash=__UpperCAmelCase , enable_math=__UpperCAmelCase , enable_mem_efficient=__UpperCAmelCase ): __UpperCamelCase = pipe(**__UpperCAmelCase ).images assert image.shape == (1, 64, 64, 3) __UpperCamelCase = image[0, -3:, -3:, -1] __UpperCamelCase = np.array([0.1_6_6_3, 0.1_9_4_8, 0.2_2_7_5, 0.1_6_8_0, 0.1_2_0_4, 0.1_2_4_5, 0.1_8_5_8, 0.1_3_3_8, 0.2_0_9_5] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
316
"""simple docstring""" from __future__ import annotations import string from itertools import cycle, product from pathlib import Path UpperCamelCase : str = ( string.ascii_letters + string.digits + string.punctuation + string.whitespace ) UpperCamelCase : list[int] = [ord(letter) for letter in string.ascii_lowercase] UpperCamelCase : set[int] = {ord(char) for char in VALID_CHARS} UpperCamelCase : list[str] = ["the", "be", "to", "of", "and", "in", "that", "have"] def A ( snake_case :list[int] , snake_case :tuple[int, ...] ) -> str | None: __UpperCamelCase = "" __UpperCamelCase = 42 __UpperCamelCase = 42 __UpperCamelCase = 42 for keychar, cipherchar in zip(cycle(snake_case ) , snake_case ): __UpperCamelCase = cipherchar ^ keychar if decodedchar not in VALID_INTS: return None decoded += chr(snake_case ) return decoded def A ( snake_case :list[int] ) -> list[str]: __UpperCamelCase = [] for key in product(snake_case , repeat=3 ): __UpperCamelCase = try_key(snake_case , snake_case ) if encoded is not None: possibles.append(snake_case ) return possibles def A ( snake_case :list[str] , snake_case :str ) -> list[str]: return [possible for possible in possibles if common_word in possible.lower()] def A ( snake_case :str = "p059_cipher.txt" ) -> int: __UpperCamelCase = 42 __UpperCamelCase = 42 __UpperCamelCase = 42 __UpperCamelCase = 42 __UpperCamelCase = Path(snake_case ).parent.joinpath(snake_case ).read_text(encoding='utf-8' ) __UpperCamelCase = [int(snake_case ) for number in data.strip().split(',' )] __UpperCamelCase = filter_valid_chars(snake_case ) for common_word in COMMON_WORDS: __UpperCamelCase = filter_common_word(snake_case , snake_case ) if len(snake_case ) == 1: break __UpperCamelCase = possibles[0] return sum(ord(snake_case ) for char in decoded_text ) if __name__ == "__main__": print(f'''{solution() = }''')
316
1
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCamelCase : Tuple = logging.get_logger(__name__) UpperCamelCase : Optional[int] = { # See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert } class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): lowercase = "megatron-bert" def __init__( self , __UpperCAmelCase=2_9056 , __UpperCAmelCase=1024 , __UpperCAmelCase=24 , __UpperCAmelCase=16 , __UpperCAmelCase=4096 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=512 , __UpperCAmelCase=2 , __UpperCAmelCase=0.0_2 , __UpperCAmelCase=1E-12 , __UpperCAmelCase=0 , __UpperCAmelCase="absolute" , __UpperCAmelCase=True , **__UpperCAmelCase , ): '''simple docstring''' super().__init__(pad_token_id=__UpperCAmelCase , **__UpperCAmelCase ) __UpperCamelCase = vocab_size __UpperCamelCase = hidden_size __UpperCamelCase = num_hidden_layers __UpperCamelCase = num_attention_heads __UpperCamelCase = hidden_act __UpperCamelCase = intermediate_size __UpperCamelCase = hidden_dropout_prob __UpperCamelCase = attention_probs_dropout_prob __UpperCamelCase = max_position_embeddings __UpperCamelCase = type_vocab_size __UpperCamelCase = initializer_range __UpperCamelCase = layer_norm_eps __UpperCamelCase = position_embedding_type __UpperCamelCase = use_cache
316
"""simple docstring""" UpperCamelCase : dict[str, float] = { "km/h": 1.0, "m/s": 3.6, "mph": 1.60_93_44, "knot": 1.8_52, } UpperCamelCase : dict[str, float] = { "km/h": 1.0, "m/s": 0.2_77_77_77_78, "mph": 0.6_21_37_11_92, "knot": 0.5_39_95_68_03, } def A ( snake_case :float , snake_case :str , snake_case :str ) -> float: if unit_to not in speed_chart or unit_from not in speed_chart_inverse: __UpperCamelCase = ( f'Incorrect \'from_type\' or \'to_type\' value: {unit_from!r}, {unit_to!r}\n' f'Valid values are: {", ".join(snake_case )}' ) raise ValueError(snake_case ) return round(speed * speed_chart[unit_from] * speed_chart_inverse[unit_to] , 3 ) if __name__ == "__main__": import doctest doctest.testmod()
316
1
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCamelCase : List[str] = logging.get_logger(__name__) UpperCamelCase : Union[str, Any] = { "tanreinama/GPTSAN-2.8B-spout_is_uniform": ( "https://huggingface.co/tanreinama/GPTSAN-2.8B-spout_is_uniform/resolve/main/config.json" ), } class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): lowercase = "gptsan-japanese" lowercase = [ "past_key_values", ] lowercase = { "hidden_size": "d_model", "num_attention_heads": "num_heads", "num_hidden_layers": "num_layers", } def __init__( self , __UpperCAmelCase=3_6000 , __UpperCAmelCase=1280 , __UpperCAmelCase=1024 , __UpperCAmelCase=8192 , __UpperCAmelCase=4096 , __UpperCAmelCase=128 , __UpperCAmelCase=10 , __UpperCAmelCase=0 , __UpperCAmelCase=16 , __UpperCAmelCase=16 , __UpperCAmelCase=128 , __UpperCAmelCase=0.0 , __UpperCAmelCase=1E-5 , __UpperCAmelCase=False , __UpperCAmelCase=0.0 , __UpperCAmelCase="float32" , __UpperCAmelCase=False , __UpperCAmelCase=False , __UpperCAmelCase=False , __UpperCAmelCase=0.0_0_2 , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase=3_5998 , __UpperCAmelCase=3_5995 , __UpperCAmelCase=3_5999 , **__UpperCAmelCase , ): '''simple docstring''' __UpperCamelCase = vocab_size __UpperCamelCase = max_position_embeddings __UpperCamelCase = d_model __UpperCamelCase = d_ff __UpperCamelCase = d_ext __UpperCamelCase = d_spout __UpperCamelCase = num_switch_layers __UpperCamelCase = num_ext_layers __UpperCamelCase = num_switch_layers + num_ext_layers __UpperCamelCase = num_heads __UpperCamelCase = num_experts __UpperCamelCase = expert_capacity __UpperCamelCase = dropout_rate __UpperCamelCase = layer_norm_epsilon __UpperCamelCase = router_bias __UpperCamelCase = router_jitter_noise __UpperCamelCase = router_dtype __UpperCamelCase = router_ignore_padding_tokens __UpperCamelCase = output_hidden_states __UpperCamelCase = output_attentions __UpperCamelCase = initializer_factor __UpperCamelCase = output_router_logits __UpperCamelCase = use_cache super().__init__( separator_token_id=__UpperCAmelCase , pad_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , **__UpperCAmelCase , )
316
"""simple docstring""" import random import unittest import torch from diffusers import IFInpaintingSuperResolutionPipeline from diffusers.utils import floats_tensor from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import skip_mps, torch_device from ..pipeline_params import ( TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS, ) from ..test_pipelines_common import PipelineTesterMixin from . import IFPipelineTesterMixin @skip_mps class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ): lowercase = IFInpaintingSuperResolutionPipeline lowercase = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"width", "height"} lowercase = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({"original_image"} ) lowercase = PipelineTesterMixin.required_optional_params - {"latents"} def UpperCAmelCase ( self ): '''simple docstring''' return self._get_superresolution_dummy_components() def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase=0 ): '''simple docstring''' if str(__UpperCAmelCase ).startswith('mps' ): __UpperCamelCase = torch.manual_seed(__UpperCAmelCase ) else: __UpperCamelCase = torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase ) __UpperCamelCase = floats_tensor((1, 3, 16, 16) , rng=random.Random(__UpperCAmelCase ) ).to(__UpperCAmelCase ) __UpperCamelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(__UpperCAmelCase ) ).to(__UpperCAmelCase ) __UpperCamelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(__UpperCAmelCase ) ).to(__UpperCAmelCase ) __UpperCamelCase = { 'prompt': 'A painting of a squirrel eating a burger', 'image': image, 'original_image': original_image, 'mask_image': mask_image, 'generator': generator, 'num_inference_steps': 2, 'output_type': 'numpy', } return inputs @unittest.skipIf( torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , ) def UpperCAmelCase ( self ): '''simple docstring''' self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 ) def UpperCAmelCase ( self ): '''simple docstring''' self._test_save_load_optional_components() @unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA' ) def UpperCAmelCase ( self ): '''simple docstring''' super().test_save_load_floataa(expected_max_diff=1E-1 ) def UpperCAmelCase ( self ): '''simple docstring''' self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 ) def UpperCAmelCase ( self ): '''simple docstring''' self._test_save_load_local() def UpperCAmelCase ( self ): '''simple docstring''' self._test_inference_batch_single_identical( expected_max_diff=1E-2 , )
316
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) UpperCamelCase : int = { "configuration_swiftformer": [ "SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "SwiftFormerConfig", "SwiftFormerOnnxConfig", ] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase : List[Any] = [ "SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "SwiftFormerForImageClassification", "SwiftFormerModel", "SwiftFormerPreTrainedModel", ] if TYPE_CHECKING: from .configuration_swiftformer import ( SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, SwiftFormerConfig, SwiftFormerOnnxConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_swiftformer import ( SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, SwiftFormerForImageClassification, SwiftFormerModel, SwiftFormerPreTrainedModel, ) else: import sys UpperCamelCase : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
316
"""simple docstring""" def A ( snake_case :int ) -> int: __UpperCamelCase = [1] __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = 0, 0, 0 __UpperCamelCase = ugly_nums[ia] * 2 __UpperCamelCase = ugly_nums[ia] * 3 __UpperCamelCase = ugly_nums[ia] * 5 for _ in range(1 , snake_case ): __UpperCamelCase = min(snake_case , snake_case , snake_case ) ugly_nums.append(snake_case ) if next_num == next_a: ia += 1 __UpperCamelCase = ugly_nums[ia] * 2 if next_num == next_a: ia += 1 __UpperCamelCase = ugly_nums[ia] * 3 if next_num == next_a: ia += 1 __UpperCamelCase = ugly_nums[ia] * 5 return ugly_nums[-1] if __name__ == "__main__": from doctest import testmod testmod(verbose=True) print(f'''{ugly_numbers(2_0_0) = }''')
316
1
"""simple docstring""" from __future__ import annotations import copy import inspect import unittest import numpy as np from transformers import is_tf_available, is_vision_available from transformers.models.auto import get_values from transformers.testing_utils import require_tf, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST, TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING, TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, LayoutLMvaConfig, TFLayoutLMvaForQuestionAnswering, TFLayoutLMvaForSequenceClassification, TFLayoutLMvaForTokenClassification, TFLayoutLMvaModel, ) if is_vision_available(): from PIL import Image from transformers import LayoutLMvaImageProcessor class __lowerCAmelCase : def __init__( self , __UpperCAmelCase , __UpperCAmelCase=2 , __UpperCAmelCase=3 , __UpperCAmelCase=4 , __UpperCAmelCase=2 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=99 , __UpperCAmelCase=36 , __UpperCAmelCase=2 , __UpperCAmelCase=4 , __UpperCAmelCase=37 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=512 , __UpperCAmelCase=16 , __UpperCAmelCase=2 , __UpperCAmelCase=0.0_2 , __UpperCAmelCase=6 , __UpperCAmelCase=6 , __UpperCAmelCase=3 , __UpperCAmelCase=4 , __UpperCAmelCase=None , __UpperCAmelCase=1000 , ): '''simple docstring''' __UpperCamelCase = parent __UpperCamelCase = batch_size __UpperCamelCase = num_channels __UpperCamelCase = image_size __UpperCamelCase = patch_size __UpperCamelCase = is_training __UpperCamelCase = use_input_mask __UpperCamelCase = use_token_type_ids __UpperCamelCase = use_labels __UpperCamelCase = vocab_size __UpperCamelCase = hidden_size __UpperCamelCase = num_hidden_layers __UpperCamelCase = num_attention_heads __UpperCamelCase = intermediate_size __UpperCamelCase = hidden_act __UpperCamelCase = hidden_dropout_prob __UpperCamelCase = attention_probs_dropout_prob __UpperCamelCase = max_position_embeddings __UpperCamelCase = type_vocab_size __UpperCamelCase = type_sequence_label_size __UpperCamelCase = initializer_range __UpperCamelCase = coordinate_size __UpperCamelCase = shape_size __UpperCamelCase = num_labels __UpperCamelCase = num_choices __UpperCamelCase = scope __UpperCamelCase = range_bbox # LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token) __UpperCamelCase = text_seq_length __UpperCamelCase = (image_size // patch_size) ** 2 + 1 __UpperCamelCase = self.text_seq_length + self.image_seq_length def UpperCAmelCase ( self ): '''simple docstring''' __UpperCamelCase = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size ) __UpperCamelCase = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox ) __UpperCamelCase = bbox.numpy() # Ensure that bbox is legal for i in range(bbox.shape[0] ): for j in range(bbox.shape[1] ): if bbox[i, j, 3] < bbox[i, j, 1]: __UpperCamelCase = bbox[i, j, 3] __UpperCamelCase = bbox[i, j, 1] __UpperCamelCase = tmp_coordinate if bbox[i, j, 2] < bbox[i, j, 0]: __UpperCamelCase = bbox[i, j, 2] __UpperCamelCase = bbox[i, j, 0] __UpperCamelCase = tmp_coordinate __UpperCamelCase = tf.constant(__UpperCAmelCase ) __UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __UpperCamelCase = None if self.use_input_mask: __UpperCamelCase = random_attention_mask([self.batch_size, self.text_seq_length] ) __UpperCamelCase = None if self.use_token_type_ids: __UpperCamelCase = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size ) __UpperCamelCase = None __UpperCamelCase = None if self.use_labels: __UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __UpperCamelCase = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels ) __UpperCamelCase = LayoutLMvaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , ) return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' __UpperCamelCase = TFLayoutLMvaModel(config=__UpperCAmelCase ) # text + image __UpperCamelCase = model(__UpperCAmelCase , pixel_values=__UpperCAmelCase , training=__UpperCAmelCase ) __UpperCamelCase = model( __UpperCAmelCase , bbox=__UpperCAmelCase , pixel_values=__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , training=__UpperCAmelCase , ) __UpperCamelCase = model(__UpperCAmelCase , bbox=__UpperCAmelCase , pixel_values=__UpperCAmelCase , training=__UpperCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) # text only __UpperCamelCase = model(__UpperCAmelCase , training=__UpperCAmelCase ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) ) # image only __UpperCamelCase = model({'pixel_values': pixel_values} , training=__UpperCAmelCase ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) ) def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' __UpperCamelCase = self.num_labels __UpperCamelCase = TFLayoutLMvaForSequenceClassification(config=__UpperCAmelCase ) __UpperCamelCase = model( __UpperCAmelCase , bbox=__UpperCAmelCase , pixel_values=__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase , training=__UpperCAmelCase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' __UpperCamelCase = self.num_labels __UpperCamelCase = TFLayoutLMvaForTokenClassification(config=__UpperCAmelCase ) __UpperCamelCase = model( __UpperCAmelCase , bbox=__UpperCAmelCase , pixel_values=__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase , training=__UpperCAmelCase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) ) def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' __UpperCamelCase = 2 __UpperCamelCase = TFLayoutLMvaForQuestionAnswering(config=__UpperCAmelCase ) __UpperCamelCase = model( __UpperCAmelCase , bbox=__UpperCAmelCase , pixel_values=__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , start_positions=__UpperCAmelCase , end_positions=__UpperCAmelCase , training=__UpperCAmelCase , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def UpperCAmelCase ( self ): '''simple docstring''' __UpperCamelCase = self.prepare_config_and_inputs() ((__UpperCamelCase) , (__UpperCamelCase) , (__UpperCamelCase) , (__UpperCamelCase) , (__UpperCamelCase) , (__UpperCamelCase) , (__UpperCamelCase) , (__UpperCamelCase)) = config_and_inputs __UpperCamelCase = { 'input_ids': input_ids, 'bbox': bbox, 'pixel_values': pixel_values, 'token_type_ids': token_type_ids, 'attention_mask': input_mask, } return config, inputs_dict @require_tf class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ): lowercase = ( ( TFLayoutLMvaModel, TFLayoutLMvaForQuestionAnswering, TFLayoutLMvaForSequenceClassification, TFLayoutLMvaForTokenClassification, ) if is_tf_available() else () ) lowercase = ( {"document-question-answering": TFLayoutLMvaForQuestionAnswering, "feature-extraction": TFLayoutLMvaModel} if is_tf_available() else {} ) lowercase = False lowercase = False lowercase = False def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' return True def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=False ): '''simple docstring''' __UpperCamelCase = copy.deepcopy(__UpperCAmelCase ) if model_class in get_values(__UpperCAmelCase ): __UpperCamelCase = { k: tf.tile(tf.expand_dims(__UpperCAmelCase , 1 ) , (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1) ) if isinstance(__UpperCAmelCase , tf.Tensor ) and v.ndim > 0 else v for k, v in inputs_dict.items() } if return_labels: if model_class in get_values(__UpperCAmelCase ): __UpperCamelCase = tf.ones(self.model_tester.batch_size , dtype=tf.intaa ) elif model_class in get_values(__UpperCAmelCase ): __UpperCamelCase = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa ) __UpperCamelCase = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa ) elif model_class in get_values(__UpperCAmelCase ): __UpperCamelCase = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa ) elif model_class in get_values(__UpperCAmelCase ): __UpperCamelCase = tf.zeros( (self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=tf.intaa ) return inputs_dict def UpperCAmelCase ( self ): '''simple docstring''' __UpperCamelCase = TFLayoutLMvaModelTester(self ) __UpperCamelCase = ConfigTester(self , config_class=__UpperCAmelCase , hidden_size=37 ) def UpperCAmelCase ( self ): '''simple docstring''' self.config_tester.run_common_tests() def UpperCAmelCase ( self ): '''simple docstring''' __UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __UpperCamelCase = model_class(__UpperCAmelCase ) if getattr(__UpperCAmelCase , 'hf_compute_loss' , __UpperCAmelCase ): # The number of elements in the loss should be the same as the number of elements in the label __UpperCamelCase = self._prepare_for_class(inputs_dict.copy() , __UpperCAmelCase , return_labels=__UpperCAmelCase ) __UpperCamelCase = prepared_for_class[ sorted(prepared_for_class.keys() - inputs_dict.keys() , reverse=__UpperCAmelCase )[0] ] __UpperCamelCase = added_label.shape.as_list()[:1] # Test that model correctly compute the loss with kwargs __UpperCamelCase = self._prepare_for_class(inputs_dict.copy() , __UpperCAmelCase , return_labels=__UpperCAmelCase ) __UpperCamelCase = prepared_for_class.pop('input_ids' ) __UpperCamelCase = model(__UpperCAmelCase , **__UpperCAmelCase )[0] self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] ) # Test that model correctly compute the loss when we mask some positions __UpperCamelCase = self._prepare_for_class(inputs_dict.copy() , __UpperCAmelCase , return_labels=__UpperCAmelCase ) __UpperCamelCase = prepared_for_class.pop('input_ids' ) if "labels" in prepared_for_class: __UpperCamelCase = prepared_for_class['labels'].numpy() if len(labels.shape ) > 1 and labels.shape[1] != 1: __UpperCamelCase = -100 __UpperCamelCase = tf.convert_to_tensor(__UpperCAmelCase ) __UpperCamelCase = model(__UpperCAmelCase , **__UpperCAmelCase )[0] self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] ) self.assertTrue(not np.any(np.isnan(loss.numpy() ) ) ) # Test that model correctly compute the loss with a dict __UpperCamelCase = self._prepare_for_class(inputs_dict.copy() , __UpperCAmelCase , return_labels=__UpperCAmelCase ) __UpperCamelCase = model(__UpperCAmelCase )[0] self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] ) # Test that model correctly compute the loss with a tuple __UpperCamelCase = self._prepare_for_class(inputs_dict.copy() , __UpperCAmelCase , return_labels=__UpperCAmelCase ) # Get keys that were added with the _prepare_for_class function __UpperCamelCase = prepared_for_class.keys() - inputs_dict.keys() __UpperCamelCase = inspect.signature(model.call ).parameters __UpperCamelCase = list(signature.keys() ) # Create a dictionary holding the location of the tensors in the tuple __UpperCamelCase = {0: 'input_ids'} for label_key in label_keys: __UpperCamelCase = signature_names.index(__UpperCAmelCase ) __UpperCamelCase = label_key __UpperCamelCase = sorted(tuple_index_mapping.items() ) # Initialize a list with their default values, update the values and convert to a tuple __UpperCamelCase = [] for name in signature_names: if name != "kwargs": list_input.append(signature[name].default ) for index, value in sorted_tuple_index_mapping: __UpperCamelCase = prepared_for_class[value] __UpperCamelCase = tuple(__UpperCAmelCase ) # Send to model __UpperCamelCase = model(tuple_input[:-1] )[0] self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] ) def UpperCAmelCase ( self ): '''simple docstring''' ( ( __UpperCamelCase ) , ( __UpperCamelCase ) , ( __UpperCamelCase ) , ( __UpperCamelCase ) , ( __UpperCamelCase ) , ( __UpperCamelCase ) , ( __UpperCamelCase ) , ( __UpperCamelCase ) , ) = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) def UpperCAmelCase ( self ): '''simple docstring''' ( ( __UpperCamelCase ) , ( __UpperCamelCase ) , ( __UpperCamelCase ) , ( __UpperCamelCase ) , ( __UpperCamelCase ) , ( __UpperCamelCase ) , ( __UpperCamelCase ) , ( __UpperCamelCase ) , ) = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: __UpperCamelCase = type self.model_tester.create_and_check_model(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) def UpperCAmelCase ( self ): '''simple docstring''' ( ( __UpperCamelCase ) , ( __UpperCamelCase ) , ( __UpperCamelCase ) , ( __UpperCamelCase ) , ( __UpperCamelCase ) , ( __UpperCamelCase ) , ( __UpperCamelCase ) , ( __UpperCamelCase ) , ) = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) def UpperCAmelCase ( self ): '''simple docstring''' ( ( __UpperCamelCase ) , ( __UpperCamelCase ) , ( __UpperCamelCase ) , ( __UpperCamelCase ) , ( __UpperCamelCase ) , ( __UpperCamelCase ) , ( __UpperCamelCase ) , ( __UpperCamelCase ) , ) = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) def UpperCAmelCase ( self ): '''simple docstring''' ( ( __UpperCamelCase ) , ( __UpperCamelCase ) , ( __UpperCamelCase ) , ( __UpperCamelCase ) , ( __UpperCamelCase ) , ( __UpperCamelCase ) , ( __UpperCamelCase ) , ( __UpperCamelCase ) , ) = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) @slow def UpperCAmelCase ( self ): '''simple docstring''' for model_name in TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __UpperCamelCase = TFLayoutLMvaModel.from_pretrained(__UpperCAmelCase ) self.assertIsNotNone(__UpperCAmelCase ) def A ( ) -> List[str]: __UpperCamelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_tf class __lowerCAmelCase ( unittest.TestCase ): @cached_property def UpperCAmelCase ( self ): '''simple docstring''' return LayoutLMvaImageProcessor(apply_ocr=__UpperCAmelCase ) if is_vision_available() else None @slow def UpperCAmelCase ( self ): '''simple docstring''' __UpperCamelCase = TFLayoutLMvaModel.from_pretrained('microsoft/layoutlmv3-base' ) __UpperCamelCase = self.default_image_processor __UpperCamelCase = prepare_img() __UpperCamelCase = image_processor(images=__UpperCAmelCase , return_tensors='tf' ).pixel_values __UpperCamelCase = tf.constant([[1, 2]] ) __UpperCamelCase = tf.expand_dims(tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]] ) , axis=0 ) # forward pass __UpperCamelCase = model(input_ids=__UpperCAmelCase , bbox=__UpperCAmelCase , pixel_values=__UpperCAmelCase , training=__UpperCAmelCase ) # verify the logits __UpperCamelCase = (1, 199, 768) self.assertEqual(outputs.last_hidden_state.shape , __UpperCAmelCase ) __UpperCamelCase = tf.constant( [[-0.0_5_2_9, 0.3_6_1_8, 0.1_6_3_2], [-0.1_5_8_7, -0.1_6_6_7, -0.0_4_0_0], [-0.1_5_5_7, -0.1_6_7_1, -0.0_5_0_5]] ) self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , __UpperCAmelCase , atol=1E-4 ) )
316
"""simple docstring""" import warnings from typing import List import numpy as np from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding from ...utils import is_flax_available, is_tf_available, is_torch_available class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): lowercase = ["image_processor", "tokenizer"] lowercase = "OwlViTImageProcessor" lowercase = ("CLIPTokenizer", "CLIPTokenizerFast") def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase ): '''simple docstring''' __UpperCamelCase = None if "feature_extractor" in kwargs: warnings.warn( 'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`' ' instead.' , __UpperCAmelCase , ) __UpperCamelCase = kwargs.pop('feature_extractor' ) __UpperCamelCase = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('You need to specify an `image_processor`.' ) if tokenizer is None: raise ValueError('You need to specify a `tokenizer`.' ) super().__init__(__UpperCAmelCase , __UpperCAmelCase ) def __call__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase="max_length" , __UpperCAmelCase="np" , **__UpperCAmelCase ): '''simple docstring''' if text is None and query_images is None and images is None: raise ValueError( 'You have to specify at least one text or query image or image. All three cannot be none.' ) if text is not None: if isinstance(__UpperCAmelCase , __UpperCAmelCase ) or (isinstance(__UpperCAmelCase , __UpperCAmelCase ) and not isinstance(text[0] , __UpperCAmelCase )): __UpperCamelCase = [self.tokenizer(__UpperCAmelCase , padding=__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase )] elif isinstance(__UpperCAmelCase , __UpperCAmelCase ) and isinstance(text[0] , __UpperCAmelCase ): __UpperCamelCase = [] # Maximum number of queries across batch __UpperCamelCase = max([len(__UpperCAmelCase ) for t in text] ) # Pad all batch samples to max number of text queries for t in text: if len(__UpperCAmelCase ) != max_num_queries: __UpperCamelCase = t + [' '] * (max_num_queries - len(__UpperCAmelCase )) __UpperCamelCase = self.tokenizer(__UpperCAmelCase , padding=__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase ) encodings.append(__UpperCAmelCase ) else: raise TypeError('Input text should be a string, a list of strings or a nested list of strings' ) if return_tensors == "np": __UpperCamelCase = np.concatenate([encoding['input_ids'] for encoding in encodings] , axis=0 ) __UpperCamelCase = np.concatenate([encoding['attention_mask'] for encoding in encodings] , axis=0 ) elif return_tensors == "jax" and is_flax_available(): import jax.numpy as jnp __UpperCamelCase = jnp.concatenate([encoding['input_ids'] for encoding in encodings] , axis=0 ) __UpperCamelCase = jnp.concatenate([encoding['attention_mask'] for encoding in encodings] , axis=0 ) elif return_tensors == "pt" and is_torch_available(): import torch __UpperCamelCase = torch.cat([encoding['input_ids'] for encoding in encodings] , dim=0 ) __UpperCamelCase = torch.cat([encoding['attention_mask'] for encoding in encodings] , dim=0 ) elif return_tensors == "tf" and is_tf_available(): import tensorflow as tf __UpperCamelCase = tf.stack([encoding['input_ids'] for encoding in encodings] , axis=0 ) __UpperCamelCase = tf.stack([encoding['attention_mask'] for encoding in encodings] , axis=0 ) else: raise ValueError('Target return tensor type could not be returned' ) __UpperCamelCase = BatchEncoding() __UpperCamelCase = input_ids __UpperCamelCase = attention_mask if query_images is not None: __UpperCamelCase = BatchEncoding() __UpperCamelCase = self.image_processor( __UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase ).pixel_values __UpperCamelCase = query_pixel_values if images is not None: __UpperCamelCase = self.image_processor(__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase ) if text is not None and images is not None: __UpperCamelCase = image_features.pixel_values return encoding elif query_images is not None and images is not None: __UpperCamelCase = image_features.pixel_values return encoding elif text is not None or query_images is not None: return encoding else: return BatchEncoding(data=dict(**__UpperCAmelCase ) , tensor_type=__UpperCAmelCase ) def UpperCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase ): '''simple docstring''' return self.image_processor.post_process(*__UpperCAmelCase , **__UpperCAmelCase ) def UpperCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase ): '''simple docstring''' return self.image_processor.post_process_object_detection(*__UpperCAmelCase , **__UpperCAmelCase ) def UpperCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase ): '''simple docstring''' return self.image_processor.post_process_image_guided_detection(*__UpperCAmelCase , **__UpperCAmelCase ) def UpperCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase ): '''simple docstring''' return self.tokenizer.batch_decode(*__UpperCAmelCase , **__UpperCAmelCase ) def UpperCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase ): '''simple docstring''' return self.tokenizer.decode(*__UpperCAmelCase , **__UpperCAmelCase ) @property def UpperCAmelCase ( self ): '''simple docstring''' warnings.warn( '`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , __UpperCAmelCase , ) return self.image_processor_class @property def UpperCAmelCase ( self ): '''simple docstring''' warnings.warn( '`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , __UpperCAmelCase , ) return self.image_processor
316
1
"""simple docstring""" import warnings from collections import OrderedDict from typing import Any, Mapping, Optional from ... import PreTrainedTokenizer from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast from ...onnx.utils import compute_effective_axis_dimension from ...utils import TensorType, is_torch_available, logging UpperCamelCase : int = logging.get_logger(__name__) UpperCamelCase : List[Any] = { "facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/config.json", # See all BART models at https://huggingface.co/models?filter=bart } class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): lowercase = "bart" lowercase = ["past_key_values"] lowercase = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"} def __init__( self , __UpperCAmelCase=5_0265 , __UpperCAmelCase=1024 , __UpperCAmelCase=12 , __UpperCAmelCase=4096 , __UpperCAmelCase=16 , __UpperCAmelCase=12 , __UpperCAmelCase=4096 , __UpperCAmelCase=16 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase="gelu" , __UpperCAmelCase=1024 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0_2 , __UpperCAmelCase=0.0 , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase=3 , __UpperCAmelCase=1 , __UpperCAmelCase=0 , __UpperCAmelCase=2 , __UpperCAmelCase=True , __UpperCAmelCase=2 , __UpperCAmelCase=2 , **__UpperCAmelCase , ): '''simple docstring''' __UpperCamelCase = vocab_size __UpperCamelCase = max_position_embeddings __UpperCamelCase = d_model __UpperCamelCase = encoder_ffn_dim __UpperCamelCase = encoder_layers __UpperCamelCase = encoder_attention_heads __UpperCamelCase = decoder_ffn_dim __UpperCamelCase = decoder_layers __UpperCamelCase = decoder_attention_heads __UpperCamelCase = dropout __UpperCamelCase = attention_dropout __UpperCamelCase = activation_dropout __UpperCamelCase = activation_function __UpperCamelCase = init_std __UpperCamelCase = encoder_layerdrop __UpperCamelCase = decoder_layerdrop __UpperCamelCase = classifier_dropout __UpperCamelCase = use_cache __UpperCamelCase = encoder_layers __UpperCamelCase = scale_embedding # scale factor will be sqrt(d_model) if True super().__init__( num_labels=__UpperCAmelCase , pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , is_encoder_decoder=__UpperCAmelCase , decoder_start_token_id=__UpperCAmelCase , forced_eos_token_id=__UpperCAmelCase , **__UpperCAmelCase , ) # ensure backward compatibility for BART CNN models if self.forced_bos_token_id is None and kwargs.get('force_bos_token_to_be_generated' , __UpperCAmelCase ): __UpperCamelCase = self.bos_token_id warnings.warn( F'Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. ' 'The config can simply be saved and uploaded again to be fixed.' ) class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): @property def UpperCAmelCase ( self ): '''simple docstring''' if self.task in ["default", "seq2seq-lm"]: __UpperCamelCase = OrderedDict( [ ('input_ids', {0: 'batch', 1: 'encoder_sequence'}), ('attention_mask', {0: 'batch', 1: 'encoder_sequence'}), ] ) if self.use_past: __UpperCamelCase = {0: 'batch'} __UpperCamelCase = {0: 'batch', 1: 'past_decoder_sequence + sequence'} else: __UpperCamelCase = {0: 'batch', 1: 'decoder_sequence'} __UpperCamelCase = {0: 'batch', 1: 'decoder_sequence'} if self.use_past: self.fill_with_past_key_values_(__UpperCAmelCase , direction='inputs' ) elif self.task == "causal-lm": # TODO: figure this case out. __UpperCamelCase = OrderedDict( [ ('input_ids', {0: 'batch', 1: 'encoder_sequence'}), ('attention_mask', {0: 'batch', 1: 'encoder_sequence'}), ] ) if self.use_past: __UpperCamelCase , __UpperCamelCase = self.num_layers for i in range(__UpperCAmelCase ): __UpperCamelCase = {0: 'batch', 2: 'past_sequence + sequence'} __UpperCamelCase = {0: 'batch', 2: 'past_sequence + sequence'} else: __UpperCamelCase = OrderedDict( [ ('input_ids', {0: 'batch', 1: 'encoder_sequence'}), ('attention_mask', {0: 'batch', 1: 'encoder_sequence'}), ('decoder_input_ids', {0: 'batch', 1: 'decoder_sequence'}), ('decoder_attention_mask', {0: 'batch', 1: 'decoder_sequence'}), ] ) return common_inputs @property def UpperCAmelCase ( self ): '''simple docstring''' if self.task in ["default", "seq2seq-lm"]: __UpperCamelCase = super().outputs else: __UpperCamelCase = super(__UpperCAmelCase , self ).outputs if self.use_past: __UpperCamelCase , __UpperCamelCase = self.num_layers for i in range(__UpperCAmelCase ): __UpperCamelCase = {0: 'batch', 2: 'past_sequence + sequence'} __UpperCamelCase = {0: 'batch', 2: 'past_sequence + sequence'} return common_outputs def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = -1 , __UpperCAmelCase = -1 , __UpperCAmelCase = False , __UpperCAmelCase = None , ): '''simple docstring''' __UpperCamelCase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) # Generate decoder inputs __UpperCamelCase = seq_length if not self.use_past else 1 __UpperCamelCase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) __UpperCamelCase = {F'decoder_{name}': tensor for name, tensor in decoder_inputs.items()} __UpperCamelCase = dict(**__UpperCAmelCase , **__UpperCAmelCase ) if self.use_past: if not is_torch_available(): raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' ) else: import torch __UpperCamelCase , __UpperCamelCase = common_inputs['input_ids'].shape __UpperCamelCase = common_inputs['decoder_input_ids'].shape[1] __UpperCamelCase , __UpperCamelCase = self.num_attention_heads __UpperCamelCase = ( batch, num_encoder_attention_heads, encoder_seq_length, self._config.hidden_size // num_encoder_attention_heads, ) __UpperCamelCase = decoder_seq_length + 3 __UpperCamelCase = ( batch, num_decoder_attention_heads, decoder_past_length, self._config.hidden_size // num_decoder_attention_heads, ) __UpperCamelCase = torch.cat( [common_inputs['decoder_attention_mask'], torch.ones(__UpperCAmelCase , __UpperCAmelCase )] , dim=1 ) __UpperCamelCase = [] # If the number of encoder and decoder layers are present in the model configuration, both are considered __UpperCamelCase , __UpperCamelCase = self.num_layers __UpperCamelCase = min(__UpperCAmelCase , __UpperCAmelCase ) __UpperCamelCase = max(__UpperCAmelCase , __UpperCAmelCase ) - min_num_layers __UpperCamelCase = 'encoder' if num_encoder_layers > num_decoder_layers else 'decoder' for _ in range(__UpperCAmelCase ): common_inputs["past_key_values"].append( ( torch.zeros(__UpperCAmelCase ), torch.zeros(__UpperCAmelCase ), torch.zeros(__UpperCAmelCase ), torch.zeros(__UpperCAmelCase ), ) ) # TODO: test this. __UpperCamelCase = encoder_shape if remaining_side_name == 'encoder' else decoder_shape for _ in range(__UpperCAmelCase , __UpperCAmelCase ): common_inputs["past_key_values"].append((torch.zeros(__UpperCAmelCase ), torch.zeros(__UpperCAmelCase )) ) return common_inputs def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = -1 , __UpperCAmelCase = -1 , __UpperCAmelCase = False , __UpperCAmelCase = None , ): '''simple docstring''' __UpperCamelCase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) if self.use_past: if not is_torch_available(): raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' ) else: import torch __UpperCamelCase , __UpperCamelCase = common_inputs['input_ids'].shape # Not using the same length for past_key_values __UpperCamelCase = seqlen + 2 __UpperCamelCase , __UpperCamelCase = self.num_layers __UpperCamelCase , __UpperCamelCase = self.num_attention_heads __UpperCamelCase = ( batch, num_encoder_attention_heads, past_key_values_length, self._config.hidden_size // num_encoder_attention_heads, ) __UpperCamelCase = common_inputs['attention_mask'].dtype __UpperCamelCase = torch.cat( [common_inputs['attention_mask'], torch.ones(__UpperCAmelCase , __UpperCAmelCase , dtype=__UpperCAmelCase )] , dim=1 ) __UpperCamelCase = [ (torch.zeros(__UpperCAmelCase ), torch.zeros(__UpperCAmelCase )) for _ in range(__UpperCAmelCase ) ] return common_inputs def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = -1 , __UpperCAmelCase = -1 , __UpperCAmelCase = False , __UpperCAmelCase = None , ): '''simple docstring''' __UpperCamelCase = compute_effective_axis_dimension( __UpperCAmelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 ) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX __UpperCamelCase = tokenizer.num_special_tokens_to_add(__UpperCAmelCase ) __UpperCamelCase = compute_effective_axis_dimension( __UpperCAmelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__UpperCAmelCase ) # Generate dummy inputs according to compute batch and sequence __UpperCamelCase = [' '.join([tokenizer.unk_token] ) * seq_length] * batch_size __UpperCamelCase = dict(tokenizer(__UpperCAmelCase , return_tensors=__UpperCAmelCase ) ) return common_inputs def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = -1 , __UpperCAmelCase = -1 , __UpperCAmelCase = False , __UpperCAmelCase = None , ): '''simple docstring''' if self.task in ["default", "seq2seq-lm"]: __UpperCamelCase = self._generate_dummy_inputs_for_default_and_seqaseq_lm( __UpperCAmelCase , batch_size=__UpperCAmelCase , seq_length=__UpperCAmelCase , is_pair=__UpperCAmelCase , framework=__UpperCAmelCase ) elif self.task == "causal-lm": __UpperCamelCase = self._generate_dummy_inputs_for_causal_lm( __UpperCAmelCase , batch_size=__UpperCAmelCase , seq_length=__UpperCAmelCase , is_pair=__UpperCAmelCase , framework=__UpperCAmelCase ) else: __UpperCamelCase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( __UpperCAmelCase , batch_size=__UpperCAmelCase , seq_length=__UpperCAmelCase , is_pair=__UpperCAmelCase , framework=__UpperCAmelCase ) return common_inputs def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' if self.task in ["default", "seq2seq-lm"]: __UpperCamelCase = super()._flatten_past_key_values_(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) else: __UpperCamelCase = super(__UpperCAmelCase , self )._flatten_past_key_values_( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
316
"""simple docstring""" import tempfile import unittest import numpy as np import transformers from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow from ...generation.test_flax_utils import FlaxGenerationTesterMixin from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax import jax.numpy as jnp from transformers.modeling_flax_pytorch_utils import ( convert_pytorch_state_dict_to_flax, load_flax_weights_in_pytorch_model, ) from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel if is_torch_available(): import torch class __lowerCAmelCase : def __init__( self , __UpperCAmelCase , __UpperCAmelCase=14 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase=99 , __UpperCAmelCase=32 , __UpperCAmelCase=4 , __UpperCAmelCase=4 , __UpperCAmelCase=4 , __UpperCAmelCase=37 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=512 , __UpperCAmelCase=0.0_2 , ): '''simple docstring''' __UpperCamelCase = parent __UpperCamelCase = batch_size __UpperCamelCase = seq_length __UpperCamelCase = is_training __UpperCamelCase = use_input_mask __UpperCamelCase = use_token_type_ids __UpperCamelCase = use_labels __UpperCamelCase = vocab_size __UpperCamelCase = hidden_size __UpperCamelCase = rotary_dim __UpperCamelCase = num_hidden_layers __UpperCamelCase = num_attention_heads __UpperCamelCase = intermediate_size __UpperCamelCase = hidden_act __UpperCamelCase = hidden_dropout_prob __UpperCamelCase = attention_probs_dropout_prob __UpperCamelCase = max_position_embeddings __UpperCamelCase = initializer_range __UpperCamelCase = None __UpperCamelCase = vocab_size - 1 __UpperCamelCase = vocab_size - 1 __UpperCamelCase = vocab_size - 1 def UpperCAmelCase ( self ): '''simple docstring''' __UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __UpperCamelCase = None if self.use_input_mask: __UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] ) __UpperCamelCase = GPTJConfig( vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , use_cache=__UpperCAmelCase , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , rotary_dim=self.rotary_dim , ) return (config, input_ids, input_mask) def UpperCAmelCase ( self ): '''simple docstring''' __UpperCamelCase = self.prepare_config_and_inputs() __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = config_and_inputs __UpperCamelCase = {'input_ids': input_ids, 'attention_mask': attention_mask} return config, inputs_dict def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' __UpperCamelCase = 20 __UpperCamelCase = model_class_name(__UpperCAmelCase ) __UpperCamelCase = model.init_cache(input_ids.shape[0] , __UpperCAmelCase ) __UpperCamelCase = jnp.ones((input_ids.shape[0], max_decoder_length) , dtype='i4' ) __UpperCamelCase = jnp.broadcast_to( jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) ) __UpperCamelCase = model( input_ids[:, :-1] , attention_mask=__UpperCAmelCase , past_key_values=__UpperCAmelCase , position_ids=__UpperCAmelCase , ) __UpperCamelCase = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype='i4' ) __UpperCamelCase = model( input_ids[:, -1:] , attention_mask=__UpperCAmelCase , past_key_values=outputs_cache.past_key_values , position_ids=__UpperCAmelCase , ) __UpperCamelCase = model(__UpperCAmelCase ) __UpperCamelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1E-3 , msg=F'Max diff is {diff}' ) def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' __UpperCamelCase = 20 __UpperCamelCase = model_class_name(__UpperCAmelCase ) __UpperCamelCase = jnp.concatenate( [attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]) )] , axis=-1 , ) __UpperCamelCase = model.init_cache(input_ids.shape[0] , __UpperCAmelCase ) __UpperCamelCase = jnp.broadcast_to( jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) ) __UpperCamelCase = model( input_ids[:, :-1] , attention_mask=__UpperCAmelCase , past_key_values=__UpperCAmelCase , position_ids=__UpperCAmelCase , ) __UpperCamelCase = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype='i4' ) __UpperCamelCase = model( input_ids[:, -1:] , past_key_values=outputs_cache.past_key_values , attention_mask=__UpperCAmelCase , position_ids=__UpperCAmelCase , ) __UpperCamelCase = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase ) __UpperCamelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1E-3 , msg=F'Max diff is {diff}' ) @require_flax class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ): lowercase = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else () lowercase = (FlaxGPTJForCausalLM,) if is_flax_available() else () def UpperCAmelCase ( self ): '''simple docstring''' __UpperCamelCase = FlaxGPTJModelTester(self ) def UpperCAmelCase ( self ): '''simple docstring''' for model_class_name in self.all_model_classes: __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.check_use_cache_forward(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) def UpperCAmelCase ( self ): '''simple docstring''' for model_class_name in self.all_model_classes: __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.check_use_cache_forward_with_attn_mask( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) @tooslow def UpperCAmelCase ( self ): '''simple docstring''' __UpperCamelCase = GPTaTokenizer.from_pretrained('gpt2' , pad_token='<|endoftext|>' , padding_side='left' ) __UpperCamelCase = tokenizer(['Hello this is a long string', 'Hey'] , return_tensors='np' , padding=__UpperCAmelCase , truncation=__UpperCAmelCase ) __UpperCamelCase = FlaxGPTJForCausalLM.from_pretrained('EleutherAI/gpt-j-6B' ) __UpperCamelCase = False __UpperCamelCase = model.config.eos_token_id __UpperCamelCase = jax.jit(model.generate ) __UpperCamelCase = jit_generate( inputs['input_ids'] , attention_mask=inputs['attention_mask'] , pad_token_id=tokenizer.pad_token_id ).sequences __UpperCamelCase = tokenizer.batch_decode(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase ) __UpperCamelCase = [ 'Hello this is a long string of text.\n\nI\'m trying to get the text of the', 'Hey, I\'m a little late to the party. I\'m going to', ] self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase ) @is_pt_flax_cross_test def UpperCAmelCase ( self ): '''simple docstring''' __UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): # prepare inputs __UpperCamelCase = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) __UpperCamelCase = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()} # load corresponding PyTorch class __UpperCamelCase = model_class.__name__[4:] # Skip the "Flax" at the beginning __UpperCamelCase = getattr(__UpperCAmelCase , __UpperCAmelCase ) __UpperCamelCase , __UpperCamelCase = pt_inputs['input_ids'].shape __UpperCamelCase = np.random.randint(0 , seq_length - 1 , size=(batch_size,) ) for batch_idx, start_index in enumerate(__UpperCAmelCase ): __UpperCamelCase = 0 __UpperCamelCase = 1 __UpperCamelCase = 0 __UpperCamelCase = 1 __UpperCamelCase = pt_model_class(__UpperCAmelCase ).eval() __UpperCamelCase = model_class(__UpperCAmelCase , dtype=jnp.floataa ) __UpperCamelCase = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , __UpperCAmelCase ) __UpperCamelCase = fx_state with torch.no_grad(): __UpperCamelCase = pt_model(**__UpperCAmelCase ).to_tuple() __UpperCamelCase = fx_model(**__UpperCAmelCase ).to_tuple() self.assertEqual(len(__UpperCAmelCase ) , len(__UpperCAmelCase ) , 'Output lengths differ between Flax and PyTorch' ) for fx_output, pt_output in zip(__UpperCAmelCase , __UpperCAmelCase ): self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 ) with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(__UpperCAmelCase ) __UpperCamelCase = model_class.from_pretrained(__UpperCAmelCase , from_pt=__UpperCAmelCase ) __UpperCamelCase = fx_model_loaded(**__UpperCAmelCase ).to_tuple() self.assertEqual( len(__UpperCAmelCase ) , len(__UpperCAmelCase ) , 'Output lengths differ between Flax and PyTorch' ) for fx_output_loaded, pt_output in zip(__UpperCAmelCase , __UpperCAmelCase ): self.assert_almost_equals(fx_output_loaded[:, -1] , pt_output[:, -1].numpy() , 4E-2 ) @is_pt_flax_cross_test def UpperCAmelCase ( self ): '''simple docstring''' __UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): # prepare inputs __UpperCamelCase = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) __UpperCamelCase = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()} # load corresponding PyTorch class __UpperCamelCase = model_class.__name__[4:] # Skip the "Flax" at the beginning __UpperCamelCase = getattr(__UpperCAmelCase , __UpperCAmelCase ) __UpperCamelCase = pt_model_class(__UpperCAmelCase ).eval() __UpperCamelCase = model_class(__UpperCAmelCase , dtype=jnp.floataa ) __UpperCamelCase = load_flax_weights_in_pytorch_model(__UpperCAmelCase , fx_model.params ) __UpperCamelCase , __UpperCamelCase = pt_inputs['input_ids'].shape __UpperCamelCase = np.random.randint(0 , seq_length - 1 , size=(batch_size,) ) for batch_idx, start_index in enumerate(__UpperCAmelCase ): __UpperCamelCase = 0 __UpperCamelCase = 1 __UpperCamelCase = 0 __UpperCamelCase = 1 # make sure weights are tied in PyTorch pt_model.tie_weights() with torch.no_grad(): __UpperCamelCase = pt_model(**__UpperCAmelCase ).to_tuple() __UpperCamelCase = fx_model(**__UpperCAmelCase ).to_tuple() self.assertEqual(len(__UpperCAmelCase ) , len(__UpperCAmelCase ) , 'Output lengths differ between Flax and PyTorch' ) for fx_output, pt_output in zip(__UpperCAmelCase , __UpperCAmelCase ): self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 ) with tempfile.TemporaryDirectory() as tmpdirname: fx_model.save_pretrained(__UpperCAmelCase ) __UpperCamelCase = pt_model_class.from_pretrained(__UpperCAmelCase , from_flax=__UpperCAmelCase ) with torch.no_grad(): __UpperCamelCase = pt_model_loaded(**__UpperCAmelCase ).to_tuple() self.assertEqual( len(__UpperCAmelCase ) , len(__UpperCAmelCase ) , 'Output lengths differ between Flax and PyTorch' ) for fx_output, pt_output in zip(__UpperCAmelCase , __UpperCAmelCase ): self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 ) @tooslow def UpperCAmelCase ( self ): '''simple docstring''' for model_class_name in self.all_model_classes: __UpperCamelCase = model_class_name.from_pretrained('EleutherAI/gpt-j-6B' ) __UpperCamelCase = model(np.ones((1, 1) ) ) self.assertIsNotNone(__UpperCAmelCase )
316
1
"""simple docstring""" from __future__ import annotations import unittest from transformers import is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import numpy as np import tensorflow as tf from transformers import ( TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST, FlaubertConfig, TFFlaubertForMultipleChoice, TFFlaubertForQuestionAnsweringSimple, TFFlaubertForSequenceClassification, TFFlaubertForTokenClassification, TFFlaubertModel, TFFlaubertWithLMHeadModel, ) class __lowerCAmelCase : def __init__( self , __UpperCAmelCase , ): '''simple docstring''' __UpperCamelCase = parent __UpperCamelCase = 13 __UpperCamelCase = 7 __UpperCamelCase = True __UpperCamelCase = True __UpperCamelCase = True __UpperCamelCase = True __UpperCamelCase = True __UpperCamelCase = False __UpperCamelCase = False __UpperCamelCase = False __UpperCamelCase = 2 __UpperCamelCase = 99 __UpperCamelCase = 0 __UpperCamelCase = 32 __UpperCamelCase = 2 __UpperCamelCase = 4 __UpperCamelCase = 0.1 __UpperCamelCase = 0.1 __UpperCamelCase = 512 __UpperCamelCase = 16 __UpperCamelCase = 2 __UpperCamelCase = 0.0_2 __UpperCamelCase = 3 __UpperCamelCase = 4 __UpperCamelCase = 'last' __UpperCamelCase = True __UpperCamelCase = None __UpperCamelCase = 0 def UpperCAmelCase ( self ): '''simple docstring''' __UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] , dtype=tf.floataa ) __UpperCamelCase = None if self.use_input_lengths: __UpperCamelCase = ( ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2 ) # small variation of seq_length __UpperCamelCase = None if self.use_token_type_ids: __UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.n_langs ) __UpperCamelCase = None __UpperCamelCase = None __UpperCamelCase = None if self.use_labels: __UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __UpperCamelCase = ids_tensor([self.batch_size] , 2 , dtype=tf.floataa ) __UpperCamelCase = ids_tensor([self.batch_size] , self.num_choices ) __UpperCamelCase = FlaubertConfig( vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , bos_token_id=self.bos_token_id , ) return ( config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ) def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ): '''simple docstring''' __UpperCamelCase = TFFlaubertModel(config=__UpperCAmelCase ) __UpperCamelCase = {'input_ids': input_ids, 'lengths': input_lengths, 'langs': token_type_ids} __UpperCamelCase = model(__UpperCAmelCase ) __UpperCamelCase = [input_ids, input_mask] __UpperCamelCase = model(__UpperCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ): '''simple docstring''' __UpperCamelCase = TFFlaubertWithLMHeadModel(__UpperCAmelCase ) __UpperCamelCase = {'input_ids': input_ids, 'lengths': input_lengths, 'langs': token_type_ids} __UpperCamelCase = model(__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ): '''simple docstring''' __UpperCamelCase = TFFlaubertForQuestionAnsweringSimple(__UpperCAmelCase ) __UpperCamelCase = {'input_ids': input_ids, 'lengths': input_lengths} __UpperCamelCase = model(__UpperCAmelCase ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ): '''simple docstring''' __UpperCamelCase = TFFlaubertForSequenceClassification(__UpperCAmelCase ) __UpperCamelCase = {'input_ids': input_ids, 'lengths': input_lengths} __UpperCamelCase = model(__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ): '''simple docstring''' __UpperCamelCase = self.num_labels __UpperCamelCase = TFFlaubertForTokenClassification(config=__UpperCAmelCase ) __UpperCamelCase = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids} __UpperCamelCase = model(__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ): '''simple docstring''' __UpperCamelCase = self.num_choices __UpperCamelCase = TFFlaubertForMultipleChoice(config=__UpperCAmelCase ) __UpperCamelCase = tf.tile(tf.expand_dims(__UpperCAmelCase , 1 ) , (1, self.num_choices, 1) ) __UpperCamelCase = tf.tile(tf.expand_dims(__UpperCAmelCase , 1 ) , (1, self.num_choices, 1) ) __UpperCamelCase = tf.tile(tf.expand_dims(__UpperCAmelCase , 1 ) , (1, self.num_choices, 1) ) __UpperCamelCase = { 'input_ids': multiple_choice_inputs_ids, 'attention_mask': multiple_choice_input_mask, 'token_type_ids': multiple_choice_token_type_ids, } __UpperCamelCase = model(__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def UpperCAmelCase ( self ): '''simple docstring''' __UpperCamelCase = self.prepare_config_and_inputs() ( ( __UpperCamelCase ) , ( __UpperCamelCase ) , ( __UpperCamelCase ) , ( __UpperCamelCase ) , ( __UpperCamelCase ) , ( __UpperCamelCase ) , ( __UpperCamelCase ) , ( __UpperCamelCase ) , ( __UpperCamelCase ) , ) = config_and_inputs __UpperCamelCase = { 'input_ids': input_ids, 'token_type_ids': token_type_ids, 'langs': token_type_ids, 'lengths': input_lengths, } return config, inputs_dict @require_tf class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ): lowercase = ( ( TFFlaubertModel, TFFlaubertWithLMHeadModel, TFFlaubertForSequenceClassification, TFFlaubertForQuestionAnsweringSimple, TFFlaubertForTokenClassification, TFFlaubertForMultipleChoice, ) if is_tf_available() else () ) lowercase = ( (TFFlaubertWithLMHeadModel,) if is_tf_available() else () ) # TODO (PVP): Check other models whether language generation is also applicable lowercase = ( { "feature-extraction": TFFlaubertModel, "fill-mask": TFFlaubertWithLMHeadModel, "question-answering": TFFlaubertForQuestionAnsweringSimple, "text-classification": TFFlaubertForSequenceClassification, "token-classification": TFFlaubertForTokenClassification, "zero-shot": TFFlaubertForSequenceClassification, } if is_tf_available() else {} ) lowercase = False lowercase = False def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' if ( pipeline_test_casse_name == "QAPipelineTests" and tokenizer_name is not None and not tokenizer_name.endswith('Fast' ) ): # `QAPipelineTests` fails for a few models when the slower tokenizer are used. # (The slower tokenizers were never used for pipeline tests before the pipeline testing rework) # TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer return True return False def UpperCAmelCase ( self ): '''simple docstring''' __UpperCamelCase = TFFlaubertModelTester(self ) __UpperCamelCase = ConfigTester(self , config_class=__UpperCAmelCase , emb_dim=37 ) def UpperCAmelCase ( self ): '''simple docstring''' self.config_tester.run_common_tests() def UpperCAmelCase ( self ): '''simple docstring''' __UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_model(*__UpperCAmelCase ) def UpperCAmelCase ( self ): '''simple docstring''' __UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_lm_head(*__UpperCAmelCase ) def UpperCAmelCase ( self ): '''simple docstring''' __UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_qa(*__UpperCAmelCase ) def UpperCAmelCase ( self ): '''simple docstring''' __UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_sequence_classif(*__UpperCAmelCase ) def UpperCAmelCase ( self ): '''simple docstring''' __UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_for_token_classification(*__UpperCAmelCase ) def UpperCAmelCase ( self ): '''simple docstring''' __UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_for_multiple_choice(*__UpperCAmelCase ) @slow def UpperCAmelCase ( self ): '''simple docstring''' for model_name in TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __UpperCamelCase = TFFlaubertModel.from_pretrained(__UpperCAmelCase ) self.assertIsNotNone(__UpperCAmelCase ) @require_tf @require_sentencepiece @require_tokenizers class __lowerCAmelCase ( unittest.TestCase ): @slow def UpperCAmelCase ( self ): '''simple docstring''' __UpperCamelCase = TFFlaubertModel.from_pretrained('jplu/tf-flaubert-small-cased' ) __UpperCamelCase = tf.convert_to_tensor( [[0, 158, 735, 2592, 1424, 6727, 82, 1]] , dtype=tf.intaa , ) # "J'aime flaubert !" __UpperCamelCase = model(__UpperCAmelCase )[0] __UpperCamelCase = tf.TensorShape((1, 8, 512) ) self.assertEqual(output.shape , __UpperCAmelCase ) # compare the actual values for a slice. __UpperCamelCase = tf.convert_to_tensor( [ [ [-1.8_7_6_8_7_7_3, -1.5_6_6_5_5_5, 0.2_7_0_7_2_4_1_8], [-1.6_9_2_0_0_3_8, -0.5_8_7_3_5_0_5, 1.9_3_2_9_5_9_9], [-2.9_5_6_3_9_8_5, -1.6_9_9_3_8_3_5, 1.7_9_7_2_0_5_2], ] ] , dtype=tf.floataa , ) self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
316
"""simple docstring""" def A ( snake_case :list[int] , snake_case :list[int] ) -> None: __UpperCamelCase = len(snake_case ) print('The following activities are selected:' ) # The first activity is always selected __UpperCamelCase = 0 print(snake_case , end=',' ) # Consider rest of the activities for j in range(snake_case ): # If this activity has start time greater than # or equal to the finish time of previously # selected activity, then select it if start[j] >= finish[i]: print(snake_case , end=',' ) __UpperCamelCase = j if __name__ == "__main__": import doctest doctest.testmod() UpperCamelCase : int = [1, 3, 0, 5, 8, 5] UpperCamelCase : str = [2, 4, 6, 7, 9, 9] print_max_activities(start, finish)
316
1
"""simple docstring""" # Lint as: python3 import os import re import urllib.parse from pathlib import Path from typing import Callable, List, Optional, Union from zipfile import ZipFile from ..utils.file_utils import cached_path, hf_github_url from ..utils.logging import get_logger from ..utils.version import Version UpperCamelCase : Optional[Any] = get_logger(__name__) class __lowerCAmelCase : lowercase = "dummy_data" lowercase = "datasets" lowercase = False def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = False , __UpperCAmelCase = True , __UpperCAmelCase = None , ): '''simple docstring''' __UpperCamelCase = 0 __UpperCamelCase = dataset_name __UpperCamelCase = cache_dir __UpperCamelCase = use_local_dummy_data __UpperCamelCase = config # download_callbacks take a single url as input __UpperCamelCase = download_callbacks or [] # if False, it doesn't load existing files and it returns the paths of the dummy files relative # to the dummy_data zip file root __UpperCamelCase = load_existing_dummy_data # TODO(PVP, QL) might need to make this more general __UpperCamelCase = str(__UpperCAmelCase ) # to be downloaded __UpperCamelCase = None __UpperCamelCase = None @property def UpperCAmelCase ( self ): '''simple docstring''' if self._dummy_file is None: __UpperCamelCase = self.download_dummy_data() return self._dummy_file @property def UpperCAmelCase ( self ): '''simple docstring''' if self.config is not None: # structure is dummy / config_name / version_name return os.path.join('dummy' , self.config.name , self.version_name ) # structure is dummy / version_name return os.path.join('dummy' , self.version_name ) @property def UpperCAmelCase ( self ): '''simple docstring''' return os.path.join(self.dummy_data_folder , 'dummy_data.zip' ) def UpperCAmelCase ( self ): '''simple docstring''' __UpperCamelCase = ( self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data ) __UpperCamelCase = cached_path( __UpperCAmelCase , cache_dir=self.cache_dir , extract_compressed_file=__UpperCAmelCase , force_extract=__UpperCAmelCase ) return os.path.join(__UpperCAmelCase , self.dummy_file_name ) @property def UpperCAmelCase ( self ): '''simple docstring''' return os.path.join(self.datasets_scripts_dir , self.dataset_name , self.dummy_zip_file ) @property def UpperCAmelCase ( self ): '''simple docstring''' if self._bucket_url is None: __UpperCamelCase = hf_github_url(self.dataset_name , self.dummy_zip_file.replace(os.sep , '/' ) ) return self._bucket_url @property def UpperCAmelCase ( self ): '''simple docstring''' if os.path.isdir(self.dummy_file ): return self.dummy_file # else cut off path to file -> example `xsum`. return "/".join(self.dummy_file.replace(os.sep , '/' ).split('/' )[:-1] ) def UpperCAmelCase ( self , __UpperCAmelCase , *__UpperCAmelCase ): '''simple docstring''' if self.load_existing_dummy_data: # dummy data is downloaded and tested __UpperCamelCase = self.dummy_file else: # dummy data cannot be downloaded and only the path to dummy file is returned __UpperCamelCase = self.dummy_file_name # special case when data_url is a dict if isinstance(__UpperCAmelCase , __UpperCAmelCase ): return self.create_dummy_data_dict(__UpperCAmelCase , __UpperCAmelCase ) elif isinstance(__UpperCAmelCase , (list, tuple) ): return self.create_dummy_data_list(__UpperCAmelCase , __UpperCAmelCase ) else: return self.create_dummy_data_single(__UpperCAmelCase , __UpperCAmelCase ) def UpperCAmelCase ( self , __UpperCAmelCase , *__UpperCAmelCase ): '''simple docstring''' return self.download_and_extract(__UpperCAmelCase ) def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' return self.download_and_extract(__UpperCAmelCase ) def UpperCAmelCase ( self , __UpperCAmelCase , *__UpperCAmelCase , **__UpperCAmelCase ): '''simple docstring''' return path def UpperCAmelCase ( self ): '''simple docstring''' return {} def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' __UpperCamelCase = {} for key, single_urls in data_url.items(): for download_callback in self.download_callbacks: if isinstance(__UpperCAmelCase , __UpperCAmelCase ): for single_url in single_urls: download_callback(__UpperCAmelCase ) else: __UpperCamelCase = single_urls download_callback(__UpperCAmelCase ) # we force the name of each key to be the last file / folder name of the url path # if the url has arguments, we need to encode them with urllib.parse.quote_plus if isinstance(__UpperCAmelCase , __UpperCAmelCase ): __UpperCamelCase = [os.path.join(__UpperCAmelCase , urllib.parse.quote_plus(Path(__UpperCAmelCase ).name ) ) for x in single_urls] else: __UpperCamelCase = single_urls __UpperCamelCase = os.path.join(__UpperCAmelCase , urllib.parse.quote_plus(Path(__UpperCAmelCase ).name ) ) __UpperCamelCase = value # make sure that values are unique if all(isinstance(__UpperCAmelCase , __UpperCAmelCase ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len( dummy_data_dict.values() ): # append key to value to make its name unique __UpperCamelCase = {key: value + key for key, value in dummy_data_dict.items()} return dummy_data_dict def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' __UpperCamelCase = [] # trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one __UpperCamelCase = all(bool(re.findall('[0-9]{3,}-of-[0-9]{3,}' , __UpperCAmelCase ) ) for url in data_url ) __UpperCamelCase = all( url.startswith('https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed' ) for url in data_url ) if data_url and (is_tf_records or is_pubmed_records): __UpperCamelCase = [data_url[0]] * len(__UpperCAmelCase ) for single_url in data_url: for download_callback in self.download_callbacks: download_callback(__UpperCAmelCase ) # we force the name of each key to be the last file / folder name of the url path # if the url has arguments, we need to encode them with urllib.parse.quote_plus __UpperCamelCase = os.path.join(__UpperCAmelCase , urllib.parse.quote_plus(single_url.split('/' )[-1] ) ) dummy_data_list.append(__UpperCAmelCase ) return dummy_data_list def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' for download_callback in self.download_callbacks: download_callback(__UpperCAmelCase ) # we force the name of each key to be the last file / folder name of the url path # if the url has arguments, we need to encode them with urllib.parse.quote_plus __UpperCamelCase = os.path.join(__UpperCAmelCase , urllib.parse.quote_plus(data_url.split('/' )[-1] ) ) if os.path.exists(__UpperCAmelCase ) or not self.load_existing_dummy_data: return value else: # Backward compatibility, maybe deprecate at one point. # For many datasets with single url calls to dl_manager.download_and_extract, # the dummy_data.zip file is actually the zipped downloaded file # while now we expected the dummy_data.zip file to be a directory containing # the downloaded file. return path_to_dummy_data def UpperCAmelCase ( self ): '''simple docstring''' pass def UpperCAmelCase ( self ): '''simple docstring''' pass def UpperCAmelCase ( self , __UpperCAmelCase ): '''simple docstring''' def _iter_archive_members(__UpperCAmelCase ): # this preserves the order of the members inside the ZIP archive __UpperCamelCase = Path(self.dummy_file ).parent __UpperCamelCase = path.relative_to(__UpperCAmelCase ) with ZipFile(self.local_path_to_dummy_data ) as zip_file: __UpperCamelCase = zip_file.namelist() for member in members: if member.startswith(relative_path.as_posix() ): yield dummy_parent_path.joinpath(__UpperCAmelCase ) __UpperCamelCase = Path(__UpperCAmelCase ) __UpperCamelCase = _iter_archive_members(__UpperCAmelCase ) if self.use_local_dummy_data else path.rglob('*' ) for file_path in file_paths: if file_path.is_file() and not file_path.name.startswith(('.', '__') ): yield file_path.relative_to(__UpperCAmelCase ).as_posix(), file_path.open('rb' ) def UpperCAmelCase ( self , __UpperCAmelCase ): '''simple docstring''' if not isinstance(__UpperCAmelCase , __UpperCAmelCase ): __UpperCamelCase = [paths] for path in paths: if os.path.isfile(__UpperCAmelCase ): if os.path.basename(__UpperCAmelCase ).startswith(('.', '__') ): return yield path else: for dirpath, dirnames, filenames in os.walk(__UpperCAmelCase ): if os.path.basename(__UpperCAmelCase ).startswith(('.', '__') ): continue dirnames.sort() for filename in sorted(__UpperCAmelCase ): if filename.startswith(('.', '__') ): continue yield os.path.join(__UpperCAmelCase , __UpperCAmelCase )
316
"""simple docstring""" def A ( snake_case :int ) -> list: # bit count represents no. of bits in the gray code if bit_count < 0: raise ValueError('The given input must be positive' ) # get the generated string sequence __UpperCamelCase = gray_code_sequence_string(snake_case ) # # convert them to integers for i in range(len(snake_case ) ): __UpperCamelCase = int(sequence[i] , 2 ) return sequence def A ( snake_case :int ) -> list: # The approach is a recursive one # Base case achieved when either n = 0 or n=1 if bit_count == 0: return ["0"] if bit_count == 1: return ["0", "1"] __UpperCamelCase = 1 << bit_count # defines the length of the sequence # 1<< n is equivalent to 2^n # recursive answer will generate answer for n-1 bits __UpperCamelCase = gray_code_sequence_string(bit_count - 1 ) __UpperCamelCase = [] # append 0 to first half of the smaller sequence generated for i in range(seq_len // 2 ): __UpperCamelCase = '0' + smaller_sequence[i] sequence.append(snake_case ) # append 1 to second half ... start from the end of the list for i in reversed(range(seq_len // 2 ) ): __UpperCamelCase = '1' + smaller_sequence[i] sequence.append(snake_case ) return sequence if __name__ == "__main__": import doctest doctest.testmod()
316
1
"""simple docstring""" def A ( snake_case :Optional[Any] ) -> List[Any]: if not head: return True # split the list to two parts __UpperCamelCase , __UpperCamelCase = head.next, head while fast and fast.next: __UpperCamelCase = fast.next.next __UpperCamelCase = slow.next __UpperCamelCase = slow.next __UpperCamelCase = None # Don't forget here! But forget still works! # reverse the second part __UpperCamelCase = None while second: __UpperCamelCase = second.next __UpperCamelCase = node __UpperCamelCase = second __UpperCamelCase = nxt # compare two parts # second part has the same or one less node while node: if node.val != head.val: return False __UpperCamelCase = node.next __UpperCamelCase = head.next return True def A ( snake_case :int ) -> Tuple: if not head or not head.next: return True # 1. Get the midpoint (slow) __UpperCamelCase = __UpperCamelCase = __UpperCamelCase = head while fast and fast.next: __UpperCamelCase , __UpperCamelCase = fast.next.next, slow.next # 2. Push the second half into the stack __UpperCamelCase = [slow.val] while slow.next: __UpperCamelCase = slow.next stack.append(slow.val ) # 3. Comparison while stack: if stack.pop() != cur.val: return False __UpperCamelCase = cur.next return True def A ( snake_case :Optional[int] ) -> List[str]: if not head or not head.next: return True __UpperCamelCase = {} __UpperCamelCase = 0 while head: if head.val in d: d[head.val].append(snake_case ) else: __UpperCamelCase = [pos] __UpperCamelCase = head.next pos += 1 __UpperCamelCase = pos - 1 __UpperCamelCase = 0 for v in d.values(): if len(snake_case ) % 2 != 0: middle += 1 else: __UpperCamelCase = 0 for i in range(0 , len(snake_case ) ): if v[i] + v[len(snake_case ) - 1 - step] != checksum: return False step += 1 if middle > 1: return False return True
316
"""simple docstring""" import inspect import unittest from datasets import load_dataset from packaging import version from transformers import BeitConfig from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( MODEL_MAPPING, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation, BeitModel, ) from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): import PIL from PIL import Image from transformers import BeitImageProcessor class __lowerCAmelCase : def __init__( self , __UpperCAmelCase , __UpperCAmelCase=100 , __UpperCAmelCase=13 , __UpperCAmelCase=30 , __UpperCAmelCase=2 , __UpperCAmelCase=3 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=32 , __UpperCAmelCase=4 , __UpperCAmelCase=4 , __UpperCAmelCase=37 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=10 , __UpperCAmelCase=0.0_2 , __UpperCAmelCase=3 , __UpperCAmelCase=None , __UpperCAmelCase=[0, 1, 2, 3] , ): '''simple docstring''' __UpperCamelCase = parent __UpperCamelCase = 100 __UpperCamelCase = batch_size __UpperCamelCase = image_size __UpperCamelCase = patch_size __UpperCamelCase = num_channels __UpperCamelCase = is_training __UpperCamelCase = use_labels __UpperCamelCase = hidden_size __UpperCamelCase = num_hidden_layers __UpperCamelCase = num_attention_heads __UpperCamelCase = intermediate_size __UpperCamelCase = hidden_act __UpperCamelCase = hidden_dropout_prob __UpperCamelCase = attention_probs_dropout_prob __UpperCamelCase = type_sequence_label_size __UpperCamelCase = initializer_range __UpperCamelCase = scope __UpperCamelCase = out_indices __UpperCamelCase = num_labels # in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) __UpperCamelCase = (image_size // patch_size) ** 2 __UpperCamelCase = num_patches + 1 def UpperCAmelCase ( self ): '''simple docstring''' __UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __UpperCamelCase = None __UpperCamelCase = None if self.use_labels: __UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __UpperCamelCase = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels ) __UpperCamelCase = self.get_config() return config, pixel_values, labels, pixel_labels def UpperCAmelCase ( self ): '''simple docstring''' return BeitConfig( vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__UpperCAmelCase , initializer_range=self.initializer_range , out_indices=self.out_indices , ) def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' __UpperCamelCase = BeitModel(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __UpperCamelCase = model(__UpperCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' __UpperCamelCase = BeitForMaskedImageModeling(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __UpperCamelCase = model(__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) ) def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' __UpperCamelCase = self.type_sequence_label_size __UpperCamelCase = BeitForImageClassification(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __UpperCamelCase = model(__UpperCAmelCase , labels=__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images __UpperCamelCase = 1 __UpperCamelCase = BeitForImageClassification(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __UpperCamelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) __UpperCamelCase = model(__UpperCAmelCase , labels=__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' __UpperCamelCase = self.num_labels __UpperCamelCase = BeitForSemanticSegmentation(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __UpperCamelCase = model(__UpperCAmelCase ) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) ) __UpperCamelCase = model(__UpperCAmelCase , labels=__UpperCAmelCase ) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) ) def UpperCAmelCase ( self ): '''simple docstring''' __UpperCamelCase = self.prepare_config_and_inputs() __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = config_and_inputs __UpperCamelCase = {'pixel_values': pixel_values} return config, inputs_dict @require_torch class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ): lowercase = ( (BeitModel, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation) if is_torch_available() else () ) lowercase = ( { "feature-extraction": BeitModel, "image-classification": BeitForImageClassification, "image-segmentation": BeitForSemanticSegmentation, } if is_torch_available() else {} ) lowercase = False lowercase = False lowercase = False def UpperCAmelCase ( self ): '''simple docstring''' __UpperCamelCase = BeitModelTester(self ) __UpperCamelCase = ConfigTester(self , config_class=__UpperCAmelCase , has_text_modality=__UpperCAmelCase , hidden_size=37 ) def UpperCAmelCase ( self ): '''simple docstring''' self.config_tester.run_common_tests() @unittest.skip(reason='BEiT does not use inputs_embeds' ) def UpperCAmelCase ( self ): '''simple docstring''' pass @require_torch_multi_gpu @unittest.skip(reason='BEiT has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`' ) def UpperCAmelCase ( self ): '''simple docstring''' pass def UpperCAmelCase ( self ): '''simple docstring''' __UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __UpperCamelCase = model_class(__UpperCAmelCase ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) __UpperCamelCase = model.get_output_embeddings() self.assertTrue(x is None or isinstance(__UpperCAmelCase , nn.Linear ) ) def UpperCAmelCase ( self ): '''simple docstring''' __UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __UpperCamelCase = model_class(__UpperCAmelCase ) __UpperCamelCase = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __UpperCamelCase = [*signature.parameters.keys()] __UpperCamelCase = ['pixel_values'] self.assertListEqual(arg_names[:1] , __UpperCAmelCase ) def UpperCAmelCase ( self ): '''simple docstring''' __UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__UpperCAmelCase ) def UpperCAmelCase ( self ): '''simple docstring''' __UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*__UpperCAmelCase ) def UpperCAmelCase ( self ): '''simple docstring''' __UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__UpperCAmelCase ) def UpperCAmelCase ( self ): '''simple docstring''' __UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*__UpperCAmelCase ) def UpperCAmelCase ( self ): '''simple docstring''' if not self.model_tester.is_training: return __UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() __UpperCamelCase = True for model_class in self.all_model_classes: # we don't test BeitForMaskedImageModeling if model_class in [*get_values(__UpperCAmelCase ), BeitForMaskedImageModeling]: continue __UpperCamelCase = model_class(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.train() __UpperCamelCase = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase , return_labels=__UpperCAmelCase ) __UpperCamelCase = model(**__UpperCAmelCase ).loss loss.backward() def UpperCAmelCase ( self ): '''simple docstring''' __UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() if not self.model_tester.is_training: return __UpperCamelCase = False __UpperCamelCase = True for model_class in self.all_model_classes: # we don't test BeitForMaskedImageModeling if ( model_class in [*get_values(__UpperCAmelCase ), BeitForMaskedImageModeling] or not model_class.supports_gradient_checkpointing ): continue __UpperCamelCase = model_class(__UpperCAmelCase ) model.gradient_checkpointing_enable() model.to(__UpperCAmelCase ) model.train() __UpperCamelCase = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase , return_labels=__UpperCAmelCase ) __UpperCamelCase = model(**__UpperCAmelCase ).loss loss.backward() def UpperCAmelCase ( self ): '''simple docstring''' __UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() __UpperCamelCase = _config_zero_init(__UpperCAmelCase ) for model_class in self.all_model_classes: __UpperCamelCase = model_class(config=__UpperCAmelCase ) for name, param in model.named_parameters(): # we skip lambda parameters as these require special initial values # determined by config.layer_scale_init_value if "lambda" in name: continue if param.requires_grad: self.assertIn( ((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=F'Parameter {name} of model {model_class} seems not properly initialized' , ) @slow def UpperCAmelCase ( self ): '''simple docstring''' for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __UpperCamelCase = BeitModel.from_pretrained(__UpperCAmelCase ) self.assertIsNotNone(__UpperCAmelCase ) def A ( ) -> int: __UpperCamelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_torch @require_vision class __lowerCAmelCase ( unittest.TestCase ): @cached_property def UpperCAmelCase ( self ): '''simple docstring''' return BeitImageProcessor.from_pretrained('microsoft/beit-base-patch16-224' ) if is_vision_available() else None @slow def UpperCAmelCase ( self ): '''simple docstring''' __UpperCamelCase = BeitForMaskedImageModeling.from_pretrained('microsoft/beit-base-patch16-224-pt22k' ).to(__UpperCAmelCase ) __UpperCamelCase = self.default_image_processor __UpperCamelCase = prepare_img() __UpperCamelCase = image_processor(images=__UpperCAmelCase , return_tensors='pt' ).pixel_values.to(__UpperCAmelCase ) # prepare bool_masked_pos __UpperCamelCase = torch.ones((1, 196) , dtype=torch.bool ).to(__UpperCAmelCase ) # forward pass with torch.no_grad(): __UpperCamelCase = model(pixel_values=__UpperCAmelCase , bool_masked_pos=__UpperCAmelCase ) __UpperCamelCase = outputs.logits # verify the logits __UpperCamelCase = torch.Size((1, 196, 8192) ) self.assertEqual(logits.shape , __UpperCAmelCase ) __UpperCamelCase = torch.tensor( [[-3.2_4_3_7, 0.5_0_7_2, -1_3.9_1_7_4], [-3.2_4_5_6, 0.4_9_4_8, -1_3.9_4_0_1], [-3.2_0_3_3, 0.5_1_2_1, -1_3.8_5_5_0]] ).to(__UpperCAmelCase ) self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3] , __UpperCAmelCase , atol=1E-2 ) ) @slow def UpperCAmelCase ( self ): '''simple docstring''' __UpperCamelCase = BeitForImageClassification.from_pretrained('microsoft/beit-base-patch16-224' ).to(__UpperCAmelCase ) __UpperCamelCase = self.default_image_processor __UpperCamelCase = prepare_img() __UpperCamelCase = image_processor(images=__UpperCAmelCase , return_tensors='pt' ).to(__UpperCAmelCase ) # forward pass with torch.no_grad(): __UpperCamelCase = model(**__UpperCAmelCase ) __UpperCamelCase = outputs.logits # verify the logits __UpperCamelCase = torch.Size((1, 1000) ) self.assertEqual(logits.shape , __UpperCAmelCase ) __UpperCamelCase = torch.tensor([-1.2_3_8_5, -1.0_9_8_7, -1.0_1_0_8] ).to(__UpperCAmelCase ) self.assertTrue(torch.allclose(logits[0, :3] , __UpperCAmelCase , atol=1E-4 ) ) __UpperCamelCase = 281 self.assertEqual(logits.argmax(-1 ).item() , __UpperCAmelCase ) @slow def UpperCAmelCase ( self ): '''simple docstring''' __UpperCamelCase = BeitForImageClassification.from_pretrained('microsoft/beit-large-patch16-224-pt22k-ft22k' ).to( __UpperCAmelCase ) __UpperCamelCase = self.default_image_processor __UpperCamelCase = prepare_img() __UpperCamelCase = image_processor(images=__UpperCAmelCase , return_tensors='pt' ).to(__UpperCAmelCase ) # forward pass with torch.no_grad(): __UpperCamelCase = model(**__UpperCAmelCase ) __UpperCamelCase = outputs.logits # verify the logits __UpperCamelCase = torch.Size((1, 2_1841) ) self.assertEqual(logits.shape , __UpperCAmelCase ) __UpperCamelCase = torch.tensor([1.6_8_8_1, -0.2_7_8_7, 0.5_9_0_1] ).to(__UpperCAmelCase ) self.assertTrue(torch.allclose(logits[0, :3] , __UpperCAmelCase , atol=1E-4 ) ) __UpperCamelCase = 2396 self.assertEqual(logits.argmax(-1 ).item() , __UpperCAmelCase ) @slow def UpperCAmelCase ( self ): '''simple docstring''' __UpperCamelCase = BeitForSemanticSegmentation.from_pretrained('microsoft/beit-base-finetuned-ade-640-640' ) __UpperCamelCase = model.to(__UpperCAmelCase ) __UpperCamelCase = BeitImageProcessor(do_resize=__UpperCAmelCase , size=640 , do_center_crop=__UpperCAmelCase ) __UpperCamelCase = load_dataset('hf-internal-testing/fixtures_ade20k' , split='test' ) __UpperCamelCase = Image.open(ds[0]['file'] ) __UpperCamelCase = image_processor(images=__UpperCAmelCase , return_tensors='pt' ).to(__UpperCAmelCase ) # forward pass with torch.no_grad(): __UpperCamelCase = model(**__UpperCAmelCase ) __UpperCamelCase = outputs.logits # verify the logits __UpperCamelCase = torch.Size((1, 150, 160, 160) ) self.assertEqual(logits.shape , __UpperCAmelCase ) __UpperCamelCase = version.parse(PIL.__version__ ) < version.parse('9.0.0' ) if is_pillow_less_than_a: __UpperCamelCase = torch.tensor( [ [[-4.9_2_2_5, -2.3_9_5_4, -3.0_5_2_2], [-2.8_8_2_2, -1.0_0_4_6, -1.7_5_6_1], [-2.9_5_4_9, -1.3_2_2_8, -2.1_3_4_7]], [[-5.8_1_6_8, -3.4_1_2_9, -4.0_7_7_8], [-3.8_6_5_1, -2.2_2_1_4, -3.0_2_7_7], [-3.8_3_5_6, -2.4_6_4_3, -3.3_5_3_5]], [[-0.0_0_7_8, 3.9_9_5_2, 4.0_7_5_4], [2.9_8_5_6, 4.6_9_4_4, 5.0_0_3_5], [3.2_4_1_3, 4.7_8_1_3, 4.9_9_6_9]], ] , device=__UpperCAmelCase , ) else: __UpperCamelCase = torch.tensor( [ [[-4.8_9_6_0, -2.3_6_8_8, -3.0_3_5_5], [-2.8_4_7_8, -0.9_8_3_6, -1.7_4_1_8], [-2.9_4_4_9, -1.3_3_3_2, -2.1_4_5_6]], [[-5.8_0_8_1, -3.4_1_2_4, -4.1_0_0_6], [-3.8_5_6_1, -2.2_0_8_1, -3.0_3_2_3], [-3.8_3_6_5, -2.4_6_0_1, -3.3_6_6_9]], [[-0.0_3_0_9, 3.9_8_6_8, 4.0_5_4_0], [2.9_6_4_0, 4.6_8_7_7, 4.9_9_7_6], [3.2_0_8_1, 4.7_6_9_0, 4.9_9_4_2]], ] , device=__UpperCAmelCase , ) self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , __UpperCAmelCase , atol=1E-4 ) ) @slow def UpperCAmelCase ( self ): '''simple docstring''' __UpperCamelCase = BeitForSemanticSegmentation.from_pretrained('microsoft/beit-base-finetuned-ade-640-640' ) __UpperCamelCase = model.to(__UpperCAmelCase ) __UpperCamelCase = BeitImageProcessor(do_resize=__UpperCAmelCase , size=640 , do_center_crop=__UpperCAmelCase ) __UpperCamelCase = load_dataset('hf-internal-testing/fixtures_ade20k' , split='test' ) __UpperCamelCase = Image.open(ds[0]['file'] ) __UpperCamelCase = image_processor(images=__UpperCAmelCase , return_tensors='pt' ).to(__UpperCAmelCase ) # forward pass with torch.no_grad(): __UpperCamelCase = model(**__UpperCAmelCase ) __UpperCamelCase = outputs.logits.detach().cpu() __UpperCamelCase = image_processor.post_process_semantic_segmentation(outputs=__UpperCAmelCase , target_sizes=[(500, 300)] ) __UpperCamelCase = torch.Size((500, 300) ) self.assertEqual(segmentation[0].shape , __UpperCAmelCase ) __UpperCamelCase = image_processor.post_process_semantic_segmentation(outputs=__UpperCAmelCase ) __UpperCamelCase = torch.Size((160, 160) ) self.assertEqual(segmentation[0].shape , __UpperCAmelCase )
316
1
"""simple docstring""" def A ( snake_case :list[int] , snake_case :int ) -> bool: __UpperCamelCase = len(snake_case ) __UpperCamelCase = [[False] * (required_sum + 1) for _ in range(arr_len + 1 )] # for each arr value, a sum of zero(0) can be formed by not taking any element # hence True/1 for i in range(arr_len + 1 ): __UpperCamelCase = True # sum is not zero and set is empty then false for i in range(1 , required_sum + 1 ): __UpperCamelCase = False for i in range(1 , arr_len + 1 ): for j in range(1 , required_sum + 1 ): if arr[i - 1] > j: __UpperCamelCase = subset[i - 1][j] if arr[i - 1] <= j: __UpperCamelCase = subset[i - 1][j] or subset[i - 1][j - arr[i - 1]] return subset[arr_len][required_sum] if __name__ == "__main__": import doctest doctest.testmod()
316
"""simple docstring""" def A ( snake_case :int = 1_0 , snake_case :int = 2_2 ) -> int: __UpperCamelCase = range(1 , snake_case ) __UpperCamelCase = range(1 , snake_case ) return sum( 1 for power in powers for base in bases if len(str(base**power ) ) == power ) if __name__ == "__main__": print(f'''{solution(1_0, 2_2) = }''')
316
1
"""simple docstring""" def A ( snake_case :list , snake_case :list , snake_case :int ) -> int: if len(snake_case ) != len(snake_case ): raise ValueError('The length of profit and weight must be same.' ) if max_weight <= 0: raise ValueError('max_weight must greater than zero.' ) if any(p < 0 for p in profit ): raise ValueError('Profit can not be negative.' ) if any(w < 0 for w in weight ): raise ValueError('Weight can not be negative.' ) # List created to store profit gained for the 1kg in case of each weight # respectively. Calculate and append profit/weight for each element. __UpperCamelCase = [p / w for p, w in zip(snake_case , snake_case )] # Creating a copy of the list and sorting profit/weight in ascending order __UpperCamelCase = sorted(snake_case ) # declaring useful variables __UpperCamelCase = len(snake_case ) __UpperCamelCase = 0 __UpperCamelCase = 0 __UpperCamelCase = 0 # loop till the total weight do not reach max limit e.g. 15 kg and till i<length while limit <= max_weight and i < length: # flag value for encountered greatest element in sorted_profit_by_weight __UpperCamelCase = sorted_profit_by_weight[length - i - 1] __UpperCamelCase = profit_by_weight.index(snake_case ) __UpperCamelCase = -1 # check if the weight encountered is less than the total weight # encountered before. if max_weight - limit >= weight[index]: limit += weight[index] # Adding profit gained for the given weight 1 === # weight[index]/weight[index] gain += 1 * profit[index] else: # Since the weight encountered is greater than limit, therefore take the # required number of remaining kgs and calculate profit for it. # weight remaining / weight[index] gain += (max_weight - limit) / weight[index] * profit[index] break i += 1 return gain if __name__ == "__main__": print( "Input profits, weights, and then max_weight (all positive ints) separated by " "spaces." ) UpperCamelCase : str = [int(x) for x in input("Input profits separated by spaces: ").split()] UpperCamelCase : str = [int(x) for x in input("Input weights separated by spaces: ").split()] UpperCamelCase : str = int(input("Max weight allowed: ")) # Function Call calc_profit(profit, weight, max_weight)
316
"""simple docstring""" # this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.: # python ./utils/get_modified_files.py utils src tests examples # # it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered # since the output of this script is fed into Makefile commands it doesn't print a newline after the results import re import subprocess import sys UpperCamelCase : Union[str, Any] = subprocess.check_output("git merge-base main HEAD".split()).decode("utf-8") UpperCamelCase : Any = subprocess.check_output(f'''git diff --name-only {fork_point_sha}'''.split()).decode("utf-8").split() UpperCamelCase : Tuple = "|".join(sys.argv[1:]) UpperCamelCase : Optional[int] = re.compile(Rf'''^({joined_dirs}).*?\.py$''') UpperCamelCase : Optional[Any] = [x for x in modified_files if regex.match(x)] print(" ".join(relevant_modified_files), end="")
316
1
"""simple docstring""" def A ( snake_case :list[int] ) -> int: if not numbers: return 0 if not isinstance(snake_case , (list, tuple) ) or not all( isinstance(snake_case , snake_case ) for number in numbers ): raise ValueError('numbers must be an iterable of integers' ) __UpperCamelCase = __UpperCamelCase = __UpperCamelCase = numbers[0] for i in range(1 , len(snake_case ) ): # update the maximum and minimum subarray products __UpperCamelCase = numbers[i] if number < 0: __UpperCamelCase , __UpperCamelCase = min_till_now, max_till_now __UpperCamelCase = max(snake_case , max_till_now * number ) __UpperCamelCase = min(snake_case , min_till_now * number ) # update the maximum product found till now __UpperCamelCase = max(snake_case , snake_case ) return max_prod
316
"""simple docstring""" from typing import Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images from ...utils import TensorType, logging UpperCamelCase : Any = logging.get_logger(__name__) class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): lowercase = ["pixel_values"] def __init__( self , __UpperCAmelCase = True , __UpperCAmelCase = 1 / 255 , __UpperCAmelCase = True , __UpperCAmelCase = 8 , **__UpperCAmelCase , ): '''simple docstring''' super().__init__(**__UpperCAmelCase ) __UpperCamelCase = do_rescale __UpperCamelCase = rescale_factor __UpperCamelCase = do_pad __UpperCamelCase = pad_size def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , **__UpperCAmelCase ): '''simple docstring''' return rescale(__UpperCAmelCase , scale=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase ) def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None ): '''simple docstring''' __UpperCamelCase , __UpperCamelCase = get_image_size(__UpperCAmelCase ) __UpperCamelCase = (old_height // size + 1) * size - old_height __UpperCamelCase = (old_width // size + 1) * size - old_width return pad(__UpperCAmelCase , ((0, pad_height), (0, pad_width)) , mode='symmetric' , data_format=__UpperCAmelCase ) def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = ChannelDimension.FIRST , **__UpperCAmelCase , ): '''simple docstring''' __UpperCamelCase = do_rescale if do_rescale is not None else self.do_rescale __UpperCamelCase = rescale_factor if rescale_factor is not None else self.rescale_factor __UpperCamelCase = do_pad if do_pad is not None else self.do_pad __UpperCamelCase = pad_size if pad_size is not None else self.pad_size __UpperCamelCase = make_list_of_images(__UpperCAmelCase ) if not valid_images(__UpperCAmelCase ): raise ValueError( 'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ' 'torch.Tensor, tf.Tensor or jax.ndarray.' ) if do_rescale and rescale_factor is None: raise ValueError('Rescale factor must be specified if do_rescale is True.' ) # All transformations expect numpy arrays. __UpperCamelCase = [to_numpy_array(__UpperCAmelCase ) for image in images] if do_rescale: __UpperCamelCase = [self.rescale(image=__UpperCAmelCase , scale=__UpperCAmelCase ) for image in images] if do_pad: __UpperCamelCase = [self.pad(__UpperCAmelCase , size=__UpperCAmelCase ) for image in images] __UpperCamelCase = [to_channel_dimension_format(__UpperCAmelCase , __UpperCAmelCase ) for image in images] __UpperCamelCase = {'pixel_values': images} return BatchFeature(data=__UpperCAmelCase , tensor_type=__UpperCAmelCase )
316
1
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCamelCase : Optional[Any] = logging.get_logger(__name__) UpperCamelCase : Dict = { "uw-madison/mra-base-512-4": "https://huggingface.co/uw-madison/mra-base-512-4/resolve/main/config.json", } class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): lowercase = "mra" def __init__( self , __UpperCAmelCase=5_0265 , __UpperCAmelCase=768 , __UpperCAmelCase=12 , __UpperCAmelCase=12 , __UpperCAmelCase=3072 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=512 , __UpperCAmelCase=1 , __UpperCAmelCase=0.0_2 , __UpperCAmelCase=1E-5 , __UpperCAmelCase="absolute" , __UpperCAmelCase=4 , __UpperCAmelCase="full" , __UpperCAmelCase=0 , __UpperCAmelCase=0 , __UpperCAmelCase=1 , __UpperCAmelCase=0 , __UpperCAmelCase=2 , **__UpperCAmelCase , ): '''simple docstring''' super().__init__(pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , **__UpperCAmelCase ) __UpperCamelCase = vocab_size __UpperCamelCase = max_position_embeddings __UpperCamelCase = hidden_size __UpperCamelCase = num_hidden_layers __UpperCamelCase = num_attention_heads __UpperCamelCase = intermediate_size __UpperCamelCase = hidden_act __UpperCamelCase = hidden_dropout_prob __UpperCamelCase = attention_probs_dropout_prob __UpperCamelCase = initializer_range __UpperCamelCase = type_vocab_size __UpperCamelCase = layer_norm_eps __UpperCamelCase = position_embedding_type __UpperCamelCase = block_per_row __UpperCamelCase = approx_mode __UpperCamelCase = initial_prior_first_n_blocks __UpperCamelCase = initial_prior_diagonal_n_blocks
316
"""simple docstring""" from __future__ import annotations import unittest from transformers import is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import numpy as np import tensorflow as tf from transformers import ( TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST, FlaubertConfig, TFFlaubertForMultipleChoice, TFFlaubertForQuestionAnsweringSimple, TFFlaubertForSequenceClassification, TFFlaubertForTokenClassification, TFFlaubertModel, TFFlaubertWithLMHeadModel, ) class __lowerCAmelCase : def __init__( self , __UpperCAmelCase , ): '''simple docstring''' __UpperCamelCase = parent __UpperCamelCase = 13 __UpperCamelCase = 7 __UpperCamelCase = True __UpperCamelCase = True __UpperCamelCase = True __UpperCamelCase = True __UpperCamelCase = True __UpperCamelCase = False __UpperCamelCase = False __UpperCamelCase = False __UpperCamelCase = 2 __UpperCamelCase = 99 __UpperCamelCase = 0 __UpperCamelCase = 32 __UpperCamelCase = 2 __UpperCamelCase = 4 __UpperCamelCase = 0.1 __UpperCamelCase = 0.1 __UpperCamelCase = 512 __UpperCamelCase = 16 __UpperCamelCase = 2 __UpperCamelCase = 0.0_2 __UpperCamelCase = 3 __UpperCamelCase = 4 __UpperCamelCase = 'last' __UpperCamelCase = True __UpperCamelCase = None __UpperCamelCase = 0 def UpperCAmelCase ( self ): '''simple docstring''' __UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] , dtype=tf.floataa ) __UpperCamelCase = None if self.use_input_lengths: __UpperCamelCase = ( ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2 ) # small variation of seq_length __UpperCamelCase = None if self.use_token_type_ids: __UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.n_langs ) __UpperCamelCase = None __UpperCamelCase = None __UpperCamelCase = None if self.use_labels: __UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __UpperCamelCase = ids_tensor([self.batch_size] , 2 , dtype=tf.floataa ) __UpperCamelCase = ids_tensor([self.batch_size] , self.num_choices ) __UpperCamelCase = FlaubertConfig( vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , bos_token_id=self.bos_token_id , ) return ( config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ) def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ): '''simple docstring''' __UpperCamelCase = TFFlaubertModel(config=__UpperCAmelCase ) __UpperCamelCase = {'input_ids': input_ids, 'lengths': input_lengths, 'langs': token_type_ids} __UpperCamelCase = model(__UpperCAmelCase ) __UpperCamelCase = [input_ids, input_mask] __UpperCamelCase = model(__UpperCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ): '''simple docstring''' __UpperCamelCase = TFFlaubertWithLMHeadModel(__UpperCAmelCase ) __UpperCamelCase = {'input_ids': input_ids, 'lengths': input_lengths, 'langs': token_type_ids} __UpperCamelCase = model(__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ): '''simple docstring''' __UpperCamelCase = TFFlaubertForQuestionAnsweringSimple(__UpperCAmelCase ) __UpperCamelCase = {'input_ids': input_ids, 'lengths': input_lengths} __UpperCamelCase = model(__UpperCAmelCase ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ): '''simple docstring''' __UpperCamelCase = TFFlaubertForSequenceClassification(__UpperCAmelCase ) __UpperCamelCase = {'input_ids': input_ids, 'lengths': input_lengths} __UpperCamelCase = model(__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ): '''simple docstring''' __UpperCamelCase = self.num_labels __UpperCamelCase = TFFlaubertForTokenClassification(config=__UpperCAmelCase ) __UpperCamelCase = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids} __UpperCamelCase = model(__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ): '''simple docstring''' __UpperCamelCase = self.num_choices __UpperCamelCase = TFFlaubertForMultipleChoice(config=__UpperCAmelCase ) __UpperCamelCase = tf.tile(tf.expand_dims(__UpperCAmelCase , 1 ) , (1, self.num_choices, 1) ) __UpperCamelCase = tf.tile(tf.expand_dims(__UpperCAmelCase , 1 ) , (1, self.num_choices, 1) ) __UpperCamelCase = tf.tile(tf.expand_dims(__UpperCAmelCase , 1 ) , (1, self.num_choices, 1) ) __UpperCamelCase = { 'input_ids': multiple_choice_inputs_ids, 'attention_mask': multiple_choice_input_mask, 'token_type_ids': multiple_choice_token_type_ids, } __UpperCamelCase = model(__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def UpperCAmelCase ( self ): '''simple docstring''' __UpperCamelCase = self.prepare_config_and_inputs() ( ( __UpperCamelCase ) , ( __UpperCamelCase ) , ( __UpperCamelCase ) , ( __UpperCamelCase ) , ( __UpperCamelCase ) , ( __UpperCamelCase ) , ( __UpperCamelCase ) , ( __UpperCamelCase ) , ( __UpperCamelCase ) , ) = config_and_inputs __UpperCamelCase = { 'input_ids': input_ids, 'token_type_ids': token_type_ids, 'langs': token_type_ids, 'lengths': input_lengths, } return config, inputs_dict @require_tf class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ): lowercase = ( ( TFFlaubertModel, TFFlaubertWithLMHeadModel, TFFlaubertForSequenceClassification, TFFlaubertForQuestionAnsweringSimple, TFFlaubertForTokenClassification, TFFlaubertForMultipleChoice, ) if is_tf_available() else () ) lowercase = ( (TFFlaubertWithLMHeadModel,) if is_tf_available() else () ) # TODO (PVP): Check other models whether language generation is also applicable lowercase = ( { "feature-extraction": TFFlaubertModel, "fill-mask": TFFlaubertWithLMHeadModel, "question-answering": TFFlaubertForQuestionAnsweringSimple, "text-classification": TFFlaubertForSequenceClassification, "token-classification": TFFlaubertForTokenClassification, "zero-shot": TFFlaubertForSequenceClassification, } if is_tf_available() else {} ) lowercase = False lowercase = False def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' if ( pipeline_test_casse_name == "QAPipelineTests" and tokenizer_name is not None and not tokenizer_name.endswith('Fast' ) ): # `QAPipelineTests` fails for a few models when the slower tokenizer are used. # (The slower tokenizers were never used for pipeline tests before the pipeline testing rework) # TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer return True return False def UpperCAmelCase ( self ): '''simple docstring''' __UpperCamelCase = TFFlaubertModelTester(self ) __UpperCamelCase = ConfigTester(self , config_class=__UpperCAmelCase , emb_dim=37 ) def UpperCAmelCase ( self ): '''simple docstring''' self.config_tester.run_common_tests() def UpperCAmelCase ( self ): '''simple docstring''' __UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_model(*__UpperCAmelCase ) def UpperCAmelCase ( self ): '''simple docstring''' __UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_lm_head(*__UpperCAmelCase ) def UpperCAmelCase ( self ): '''simple docstring''' __UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_qa(*__UpperCAmelCase ) def UpperCAmelCase ( self ): '''simple docstring''' __UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_sequence_classif(*__UpperCAmelCase ) def UpperCAmelCase ( self ): '''simple docstring''' __UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_for_token_classification(*__UpperCAmelCase ) def UpperCAmelCase ( self ): '''simple docstring''' __UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_for_multiple_choice(*__UpperCAmelCase ) @slow def UpperCAmelCase ( self ): '''simple docstring''' for model_name in TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __UpperCamelCase = TFFlaubertModel.from_pretrained(__UpperCAmelCase ) self.assertIsNotNone(__UpperCAmelCase ) @require_tf @require_sentencepiece @require_tokenizers class __lowerCAmelCase ( unittest.TestCase ): @slow def UpperCAmelCase ( self ): '''simple docstring''' __UpperCamelCase = TFFlaubertModel.from_pretrained('jplu/tf-flaubert-small-cased' ) __UpperCamelCase = tf.convert_to_tensor( [[0, 158, 735, 2592, 1424, 6727, 82, 1]] , dtype=tf.intaa , ) # "J'aime flaubert !" __UpperCamelCase = model(__UpperCAmelCase )[0] __UpperCamelCase = tf.TensorShape((1, 8, 512) ) self.assertEqual(output.shape , __UpperCAmelCase ) # compare the actual values for a slice. __UpperCamelCase = tf.convert_to_tensor( [ [ [-1.8_7_6_8_7_7_3, -1.5_6_6_5_5_5, 0.2_7_0_7_2_4_1_8], [-1.6_9_2_0_0_3_8, -0.5_8_7_3_5_0_5, 1.9_3_2_9_5_9_9], [-2.9_5_6_3_9_8_5, -1.6_9_9_3_8_3_5, 1.7_9_7_2_0_5_2], ] ] , dtype=tf.floataa , ) self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
316
1
"""simple docstring""" import os import posixpath import uuid from dataclasses import dataclass from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union import numpy as np import pyarrow as pa import datasets from datasets.arrow_writer import ArrowWriter, ParquetWriter from datasets.config import MAX_SHARD_SIZE from datasets.filesystems import ( is_remote_filesystem, rename, ) from datasets.iterable_dataset import _BaseExamplesIterable from datasets.utils.py_utils import convert_file_size_to_int UpperCamelCase : Dict = datasets.utils.logging.get_logger(__name__) if TYPE_CHECKING: import pyspark @dataclass class __lowerCAmelCase ( datasets.BuilderConfig ): lowercase = None def A ( snake_case :"pyspark.sql.DataFrame" , snake_case :List[int] , ) -> int: import pyspark def generate_fn(): __UpperCamelCase = df.select('*' , pyspark.sql.functions.spark_partition_id().alias('part_id' ) ) for partition_id in partition_order: __UpperCamelCase = df_with_partition_id.select('*' ).where(f'part_id = {partition_id}' ).drop('part_id' ) __UpperCamelCase = partition_df.collect() __UpperCamelCase = 0 for row in rows: yield f'{partition_id}_{row_id}', row.asDict() row_id += 1 return generate_fn class __lowerCAmelCase ( _BaseExamplesIterable ): def __init__( self , __UpperCAmelCase , __UpperCAmelCase=None , ): '''simple docstring''' __UpperCamelCase = df __UpperCamelCase = partition_order or range(self.df.rdd.getNumPartitions() ) __UpperCamelCase = _generate_iterable_examples(self.df , self.partition_order ) def __iter__( self ): '''simple docstring''' yield from self.generate_examples_fn() def UpperCAmelCase ( self , __UpperCAmelCase ): '''simple docstring''' __UpperCamelCase = list(range(self.df.rdd.getNumPartitions() ) ) generator.shuffle(__UpperCAmelCase ) return SparkExamplesIterable(self.df , partition_order=__UpperCAmelCase ) def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' __UpperCamelCase = self.split_shard_indices_by_worker(__UpperCAmelCase , __UpperCAmelCase ) return SparkExamplesIterable(self.df , partition_order=__UpperCAmelCase ) @property def UpperCAmelCase ( self ): '''simple docstring''' return len(self.partition_order ) class __lowerCAmelCase ( datasets.DatasetBuilder ): lowercase = SparkConfig def __init__( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , **__UpperCAmelCase , ): '''simple docstring''' import pyspark __UpperCamelCase = pyspark.sql.SparkSession.builder.getOrCreate() __UpperCamelCase = df __UpperCamelCase = working_dir super().__init__( cache_dir=__UpperCAmelCase , config_name=str(self.df.semanticHash() ) , **__UpperCAmelCase , ) def UpperCAmelCase ( self ): '''simple docstring''' def create_cache_and_write_probe(__UpperCAmelCase ): # makedirs with exist_ok will recursively create the directory. It will not throw an error if directories # already exist. os.makedirs(self._cache_dir , exist_ok=__UpperCAmelCase ) __UpperCamelCase = os.path.join(self._cache_dir , 'fs_test' + uuid.uuida().hex ) # Opening the file in append mode will create a new file unless it already exists, in which case it will not # change the file contents. open(__UpperCAmelCase , 'a' ) return [probe_file] if self._spark.conf.get('spark.master' , '' ).startswith('local' ): return # If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS # accessible to the driver. # TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error. if self._cache_dir: __UpperCamelCase = ( self._spark.sparkContext.parallelize(range(1 ) , 1 ).mapPartitions(__UpperCAmelCase ).collect() ) if os.path.isfile(probe[0] ): return raise ValueError( 'When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir' ) def UpperCAmelCase ( self ): '''simple docstring''' return datasets.DatasetInfo(features=self.config.features ) def UpperCAmelCase ( self , __UpperCAmelCase ): '''simple docstring''' return [datasets.SplitGenerator(name=datasets.Split.TRAIN )] def UpperCAmelCase ( self , __UpperCAmelCase ): '''simple docstring''' import pyspark def get_arrow_batch_size(__UpperCAmelCase ): for batch in it: yield pa.RecordBatch.from_pydict({'batch_bytes': [batch.nbytes]} ) __UpperCamelCase = self.df.count() __UpperCamelCase = df_num_rows if df_num_rows <= 100 else 100 # Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample. __UpperCamelCase = ( self.df.limit(__UpperCAmelCase ) .repartition(1 ) .mapInArrow(__UpperCAmelCase , 'batch_bytes: long' ) .agg(pyspark.sql.functions.sum('batch_bytes' ).alias('sample_bytes' ) ) .collect()[0] .sample_bytes / sample_num_rows ) __UpperCamelCase = approx_bytes_per_row * df_num_rows if approx_total_size > max_shard_size: # Make sure there is at least one row per partition. __UpperCamelCase = min(__UpperCAmelCase , int(approx_total_size / max_shard_size ) ) __UpperCamelCase = self.df.repartition(__UpperCAmelCase ) def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ): '''simple docstring''' import pyspark __UpperCamelCase = ParquetWriter if file_format == 'parquet' else ArrowWriter __UpperCamelCase = os.path.join(self._working_dir , os.path.basename(__UpperCAmelCase ) ) if self._working_dir else fpath __UpperCamelCase = file_format == 'parquet' # Define these so that we don't reference self in write_arrow, which will result in a pickling error due to # pickling the SparkContext. __UpperCamelCase = self.config.features __UpperCamelCase = self._writer_batch_size __UpperCamelCase = self._fs.storage_options def write_arrow(__UpperCAmelCase ): # Within the same SparkContext, no two task attempts will share the same attempt ID. __UpperCamelCase = pyspark.TaskContext().taskAttemptId() __UpperCamelCase = next(__UpperCAmelCase , __UpperCAmelCase ) if first_batch is None: # Some partitions might not receive any data. return pa.RecordBatch.from_arrays( [[task_id], [0], [0]] , names=['task_id', 'num_examples', 'num_bytes'] , ) __UpperCamelCase = 0 __UpperCamelCase = writer_class( features=__UpperCAmelCase , path=working_fpath.replace('SSSSS' , F'{shard_id:05d}' ).replace('TTTTT' , F'{task_id:05d}' ) , writer_batch_size=__UpperCAmelCase , storage_options=__UpperCAmelCase , embed_local_files=__UpperCAmelCase , ) __UpperCamelCase = pa.Table.from_batches([first_batch] ) writer.write_table(__UpperCAmelCase ) for batch in it: if max_shard_size is not None and writer._num_bytes >= max_shard_size: __UpperCamelCase , __UpperCamelCase = writer.finalize() writer.close() yield pa.RecordBatch.from_arrays( [[task_id], [num_examples], [num_bytes]] , names=['task_id', 'num_examples', 'num_bytes'] , ) shard_id += 1 __UpperCamelCase = writer_class( features=writer._features , path=working_fpath.replace('SSSSS' , F'{shard_id:05d}' ).replace('TTTTT' , F'{task_id:05d}' ) , writer_batch_size=__UpperCAmelCase , storage_options=__UpperCAmelCase , embed_local_files=__UpperCAmelCase , ) __UpperCamelCase = pa.Table.from_batches([batch] ) writer.write_table(__UpperCAmelCase ) if writer._num_bytes > 0: __UpperCamelCase , __UpperCamelCase = writer.finalize() writer.close() yield pa.RecordBatch.from_arrays( [[task_id], [num_examples], [num_bytes]] , names=['task_id', 'num_examples', 'num_bytes'] , ) if working_fpath != fpath: for file in os.listdir(os.path.dirname(__UpperCAmelCase ) ): __UpperCamelCase = os.path.join(os.path.dirname(__UpperCAmelCase ) , os.path.basename(__UpperCAmelCase ) ) shutil.move(__UpperCAmelCase , __UpperCAmelCase ) __UpperCamelCase = ( self.df.mapInArrow(__UpperCAmelCase , 'task_id: long, num_examples: long, num_bytes: long' ) .groupBy('task_id' ) .agg( pyspark.sql.functions.sum('num_examples' ).alias('total_num_examples' ) , pyspark.sql.functions.sum('num_bytes' ).alias('total_num_bytes' ) , pyspark.sql.functions.count('num_bytes' ).alias('num_shards' ) , pyspark.sql.functions.collect_list('num_examples' ).alias('shard_lengths' ) , ) .collect() ) for row in stats: yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths) def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = "arrow" , __UpperCAmelCase = None , __UpperCAmelCase = None , **__UpperCAmelCase , ): '''simple docstring''' self._validate_cache_dir() __UpperCamelCase = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE ) self._repartition_df_if_needed(__UpperCAmelCase ) __UpperCamelCase = not is_remote_filesystem(self._fs ) __UpperCamelCase = os.path.join if is_local else posixpath.join __UpperCamelCase = '-TTTTT-SSSSS-of-NNNNN' __UpperCamelCase = F'{self.name}-{split_generator.name}{SUFFIX}.{file_format}' __UpperCamelCase = path_join(self._output_dir , __UpperCAmelCase ) __UpperCamelCase = 0 __UpperCamelCase = 0 __UpperCamelCase = 0 __UpperCamelCase = [] __UpperCamelCase = [] for task_id, content in self._prepare_split_single(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): ( ( __UpperCamelCase ) , ( __UpperCamelCase ) , ( __UpperCamelCase ) , ( __UpperCamelCase ) , ) = content if num_bytes > 0: total_num_examples += num_examples total_num_bytes += num_bytes total_shards += num_shards task_id_and_num_shards.append((task_id, num_shards) ) all_shard_lengths.extend(__UpperCAmelCase ) __UpperCamelCase = total_num_examples __UpperCamelCase = total_num_bytes # should rename everything at the end logger.debug(F'Renaming {total_shards} shards.' ) if total_shards > 1: __UpperCamelCase = all_shard_lengths # Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a # pickling error due to pickling the SparkContext. __UpperCamelCase = self._fs # use the -SSSSS-of-NNNNN pattern def _rename_shard( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ): rename( __UpperCAmelCase , fpath.replace('SSSSS' , F'{shard_id:05d}' ).replace('TTTTT' , F'{task_id:05d}' ) , fpath.replace('TTTTT-SSSSS' , F'{global_shard_id:05d}' ).replace('NNNNN' , F'{total_shards:05d}' ) , ) __UpperCamelCase = [] __UpperCamelCase = 0 for i in range(len(__UpperCAmelCase ) ): __UpperCamelCase , __UpperCamelCase = task_id_and_num_shards[i] for shard_id in range(__UpperCAmelCase ): args.append([task_id, shard_id, global_shard_id] ) global_shard_id += 1 self._spark.sparkContext.parallelize(__UpperCAmelCase , len(__UpperCAmelCase ) ).map(lambda __UpperCAmelCase : _rename_shard(*__UpperCAmelCase ) ).collect() else: # don't use any pattern __UpperCamelCase = 0 __UpperCamelCase = task_id_and_num_shards[0][0] self._rename( fpath.replace('SSSSS' , F'{shard_id:05d}' ).replace('TTTTT' , F'{task_id:05d}' ) , fpath.replace(__UpperCAmelCase , '' ) , ) def UpperCAmelCase ( self , __UpperCAmelCase , ): '''simple docstring''' return SparkExamplesIterable(self.df )
316
"""simple docstring""" import argparse import os import torch from transformers import FlavaImageCodebook, FlavaImageCodebookConfig def A ( snake_case :Union[str, Any] , snake_case :Any , snake_case :Union[str, Any] , snake_case :Any ) -> str: __UpperCamelCase = s.rsplit(snake_case , snake_case ) return new.join(snake_case ) def A ( snake_case :List[Any] ) -> int: # encoder.embeddings are double copied in original FLAVA return sum(param.float().sum() if 'encoder.embeddings' not in key else 0 for key, param in state_dict.items() ) def A ( snake_case :str ) -> Union[str, Any]: __UpperCamelCase = {} __UpperCamelCase = ['group_1', 'group_2', 'group_3', 'group_4'] for key, value in state_dict.items(): for group_key in group_keys: if group_key in key: __UpperCamelCase = key.replace(f'{group_key}.' , f'{group_key}.group.' ) if "res_path" in key: __UpperCamelCase = key.replace('res_path.' , 'res_path.path.' ) if key.endswith('.w' ): __UpperCamelCase = rreplace(snake_case , '.w' , '.weight' , 1 ) if key.endswith('.b' ): __UpperCamelCase = rreplace(snake_case , '.b' , '.bias' , 1 ) __UpperCamelCase = value.float() return upgrade @torch.no_grad() def A ( snake_case :List[str] , snake_case :Tuple , snake_case :List[Any]=None , snake_case :str=True ) -> int: from dall_e import Encoder __UpperCamelCase = Encoder() if os.path.exists(snake_case ): __UpperCamelCase = torch.load(snake_case ) else: __UpperCamelCase = torch.hub.load_state_dict_from_url(snake_case ) if isinstance(snake_case , snake_case ): __UpperCamelCase = ckpt.state_dict() encoder.load_state_dict(snake_case ) if config_path is not None: __UpperCamelCase = FlavaImageCodebookConfig.from_pretrained(snake_case ) else: __UpperCamelCase = FlavaImageCodebookConfig() __UpperCamelCase = FlavaImageCodebook(snake_case ).eval() __UpperCamelCase = encoder.state_dict() __UpperCamelCase = upgrade_state_dict(snake_case ) hf_model.load_state_dict(snake_case ) __UpperCamelCase = hf_model.state_dict() __UpperCamelCase = count_parameters(snake_case ) __UpperCamelCase = count_parameters(snake_case ) assert torch.allclose(snake_case , snake_case , atol=1e-3 ) if save_checkpoint: hf_model.save_pretrained(snake_case ) else: return hf_state_dict if __name__ == "__main__": UpperCamelCase : Any = argparse.ArgumentParser() parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to flava checkpoint") parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert") UpperCamelCase : int = parser.parse_args() convert_dalle_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
316
1
"""simple docstring""" import inspect import unittest from transformers import DPTConfig from transformers.file_utils import is_torch_available, is_vision_available from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_vision, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import DPTImageProcessor class __lowerCAmelCase : def __init__( self , __UpperCAmelCase , __UpperCAmelCase=2 , __UpperCAmelCase=32 , __UpperCAmelCase=16 , __UpperCAmelCase=3 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=32 , __UpperCAmelCase=4 , __UpperCAmelCase=[0, 1, 2, 3] , __UpperCAmelCase=4 , __UpperCAmelCase=37 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.0_2 , __UpperCAmelCase=3 , __UpperCAmelCase=[1, 384, 24, 24] , __UpperCAmelCase=True , __UpperCAmelCase=None , ): '''simple docstring''' __UpperCamelCase = parent __UpperCamelCase = batch_size __UpperCamelCase = image_size __UpperCamelCase = patch_size __UpperCamelCase = num_channels __UpperCamelCase = is_training __UpperCamelCase = use_labels __UpperCamelCase = hidden_size __UpperCamelCase = num_hidden_layers __UpperCamelCase = backbone_out_indices __UpperCamelCase = num_attention_heads __UpperCamelCase = intermediate_size __UpperCamelCase = hidden_act __UpperCamelCase = hidden_dropout_prob __UpperCamelCase = attention_probs_dropout_prob __UpperCamelCase = initializer_range __UpperCamelCase = num_labels __UpperCamelCase = backbone_featmap_shape __UpperCamelCase = scope __UpperCamelCase = is_hybrid # sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token) __UpperCamelCase = (image_size // patch_size) ** 2 __UpperCamelCase = num_patches + 1 def UpperCAmelCase ( self ): '''simple docstring''' __UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __UpperCamelCase = None if self.use_labels: __UpperCamelCase = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels ) __UpperCamelCase = self.get_config() return config, pixel_values, labels def UpperCAmelCase ( self ): '''simple docstring''' __UpperCamelCase = { 'global_padding': 'same', 'layer_type': 'bottleneck', 'depths': [3, 4, 9], 'out_features': ['stage1', 'stage2', 'stage3'], 'embedding_dynamic_padding': True, 'hidden_sizes': [96, 192, 384, 768], 'num_groups': 2, } return DPTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , backbone_out_indices=self.backbone_out_indices , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__UpperCAmelCase , initializer_range=self.initializer_range , is_hybrid=self.is_hybrid , backbone_config=__UpperCAmelCase , backbone_featmap_shape=self.backbone_featmap_shape , ) def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' __UpperCamelCase = DPTModel(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __UpperCamelCase = model(__UpperCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' __UpperCamelCase = self.num_labels __UpperCamelCase = DPTForDepthEstimation(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __UpperCamelCase = model(__UpperCAmelCase ) self.parent.assertEqual(result.predicted_depth.shape , (self.batch_size, self.image_size, self.image_size) ) def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' __UpperCamelCase = self.num_labels __UpperCamelCase = DPTForSemanticSegmentation(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __UpperCamelCase = model(__UpperCAmelCase , labels=__UpperCAmelCase ) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) ) def UpperCAmelCase ( self ): '''simple docstring''' __UpperCamelCase = self.prepare_config_and_inputs() __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = config_and_inputs __UpperCamelCase = {'pixel_values': pixel_values} return config, inputs_dict @require_torch class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ): lowercase = (DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else () lowercase = ( { "depth-estimation": DPTForDepthEstimation, "feature-extraction": DPTModel, "image-segmentation": DPTForSemanticSegmentation, } if is_torch_available() else {} ) lowercase = False lowercase = False lowercase = False def UpperCAmelCase ( self ): '''simple docstring''' __UpperCamelCase = DPTModelTester(self ) __UpperCamelCase = ConfigTester(self , config_class=__UpperCAmelCase , has_text_modality=__UpperCAmelCase , hidden_size=37 ) def UpperCAmelCase ( self ): '''simple docstring''' self.config_tester.run_common_tests() @unittest.skip(reason='DPT does not use inputs_embeds' ) def UpperCAmelCase ( self ): '''simple docstring''' pass def UpperCAmelCase ( self ): '''simple docstring''' __UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __UpperCamelCase = model_class(__UpperCAmelCase ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) __UpperCamelCase = model.get_output_embeddings() self.assertTrue(x is None or isinstance(__UpperCAmelCase , nn.Linear ) ) def UpperCAmelCase ( self ): '''simple docstring''' __UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __UpperCamelCase = model_class(__UpperCAmelCase ) __UpperCamelCase = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __UpperCamelCase = [*signature.parameters.keys()] __UpperCamelCase = ['pixel_values'] self.assertListEqual(arg_names[:1] , __UpperCAmelCase ) def UpperCAmelCase ( self ): '''simple docstring''' __UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__UpperCAmelCase ) def UpperCAmelCase ( self ): '''simple docstring''' __UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_depth_estimation(*__UpperCAmelCase ) def UpperCAmelCase ( self ): '''simple docstring''' __UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*__UpperCAmelCase ) def UpperCAmelCase ( self ): '''simple docstring''' for model_class in self.all_model_classes: if model_class.__name__ == "DPTForDepthEstimation": continue __UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() __UpperCamelCase = True if model_class in get_values(__UpperCAmelCase ): continue __UpperCamelCase = model_class(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.train() __UpperCamelCase = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase , return_labels=__UpperCAmelCase ) __UpperCamelCase = model(**__UpperCAmelCase ).loss loss.backward() def UpperCAmelCase ( self ): '''simple docstring''' for model_class in self.all_model_classes: if model_class.__name__ == "DPTForDepthEstimation": continue __UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() __UpperCamelCase = False __UpperCamelCase = True if model_class in get_values(__UpperCAmelCase ) or not model_class.supports_gradient_checkpointing: continue __UpperCamelCase = model_class(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.gradient_checkpointing_enable() model.train() __UpperCamelCase = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase , return_labels=__UpperCAmelCase ) __UpperCamelCase = model(**__UpperCAmelCase ).loss loss.backward() def UpperCAmelCase ( self ): '''simple docstring''' __UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() __UpperCamelCase = _config_zero_init(__UpperCAmelCase ) for model_class in self.all_model_classes: __UpperCamelCase = model_class(config=__UpperCAmelCase ) # Skip the check for the backbone __UpperCamelCase = [] for name, module in model.named_modules(): if module.__class__.__name__ == "DPTViTHybridEmbeddings": __UpperCamelCase = [F'{name}.{key}' for key in module.state_dict().keys()] break for name, param in model.named_parameters(): if param.requires_grad: if name in backbone_params: continue self.assertIn( ((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=F'Parameter {name} of model {model_class} seems not properly initialized' , ) @unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' ) def UpperCAmelCase ( self ): '''simple docstring''' pass @slow def UpperCAmelCase ( self ): '''simple docstring''' for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]: __UpperCamelCase = DPTModel.from_pretrained(__UpperCAmelCase ) self.assertIsNotNone(__UpperCAmelCase ) def UpperCAmelCase ( self ): '''simple docstring''' __UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() __UpperCamelCase = 'add' with self.assertRaises(__UpperCAmelCase ): __UpperCamelCase = DPTForDepthEstimation(__UpperCAmelCase ) def A ( ) -> List[Any]: __UpperCamelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_torch @require_vision @slow class __lowerCAmelCase ( unittest.TestCase ): def UpperCAmelCase ( self ): '''simple docstring''' __UpperCamelCase = DPTImageProcessor.from_pretrained('Intel/dpt-hybrid-midas' ) __UpperCamelCase = DPTForDepthEstimation.from_pretrained('Intel/dpt-hybrid-midas' ).to(__UpperCAmelCase ) __UpperCamelCase = prepare_img() __UpperCamelCase = image_processor(images=__UpperCAmelCase , return_tensors='pt' ).to(__UpperCAmelCase ) # forward pass with torch.no_grad(): __UpperCamelCase = model(**__UpperCAmelCase ) __UpperCamelCase = outputs.predicted_depth # verify the predicted depth __UpperCamelCase = torch.Size((1, 384, 384) ) self.assertEqual(predicted_depth.shape , __UpperCAmelCase ) __UpperCamelCase = torch.tensor( [[[5.6_4_3_7, 5.6_1_4_6, 5.6_5_1_1], [5.4_3_7_1, 5.5_6_4_9, 5.5_9_5_8], [5.5_2_1_5, 5.5_1_8_4, 5.5_2_9_3]]] ).to(__UpperCAmelCase ) self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 100 , __UpperCAmelCase , atol=1E-4 ) )
316
"""simple docstring""" import logging from dataclasses import dataclass, field from pathlib import Path from typing import Optional, Union from .generation.configuration_utils import GenerationConfig from .training_args import TrainingArguments from .utils import add_start_docstrings UpperCamelCase : str = logging.getLogger(__name__) @dataclass @add_start_docstrings(TrainingArguments.__doc__ ) class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): lowercase = field(default=__SCREAMING_SNAKE_CASE , metadata={"help": "Whether to use SortishSampler or not."} ) lowercase = field( default=__SCREAMING_SNAKE_CASE , metadata={"help": "Whether to use generate to calculate generative metrics (ROUGE, BLEU)."} ) lowercase = field( default=__SCREAMING_SNAKE_CASE , metadata={ "help": ( "The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default " "to the `max_length` value of the model configuration." ) } , ) lowercase = field( default=__SCREAMING_SNAKE_CASE , metadata={ "help": ( "The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default " "to the `num_beams` value of the model configuration." ) } , ) lowercase = field( default=__SCREAMING_SNAKE_CASE , metadata={ "help": "Model id, file path or url pointing to a GenerationConfig json file, to use during prediction." } , ) def UpperCAmelCase ( self ): '''simple docstring''' __UpperCamelCase = super().to_dict() for k, v in d.items(): if isinstance(__UpperCAmelCase , __UpperCAmelCase ): __UpperCamelCase = v.to_dict() return d
316
1
"""simple docstring""" import argparse from argparse import Namespace import torch from torch import nn from transformers import XGLMConfig, XGLMForCausalLM def A ( snake_case :List[Any] ) -> Tuple: __UpperCamelCase = [ 'decoder.version', 'decoder.output_projection.weight', '_float_tensor', 'decoder.embed_positions._float_tensor', ] for k in ignore_keys: state_dict.pop(snake_case , snake_case ) def A ( snake_case :Any ) -> Optional[int]: __UpperCamelCase , __UpperCamelCase = emb.weight.shape __UpperCamelCase = nn.Linear(snake_case , snake_case , bias=snake_case ) __UpperCamelCase = emb.weight.data return lin_layer def A ( snake_case :Tuple ) -> Tuple: __UpperCamelCase = torch.load(snake_case , map_location='cpu' ) __UpperCamelCase = Namespace(**checkpoint['cfg']['model'] ) __UpperCamelCase = checkpoint['model'] remove_ignore_keys_(snake_case ) __UpperCamelCase = state_dict['decoder.embed_tokens.weight'].shape[0] __UpperCamelCase = {key.replace('decoder' , 'model' ): val for key, val in state_dict.items()} __UpperCamelCase = XGLMConfig( vocab_size=snake_case , max_position_embeddings=args.max_target_positions , num_layers=args.decoder_layers , attention_heads=args.decoder_attention_heads , ffn_dim=args.decoder_ffn_embed_dim , d_model=args.decoder_embed_dim , layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function='gelu' , scale_embedding=not args.no_scale_embedding , tie_word_embeddings=args.share_decoder_input_output_embed , ) __UpperCamelCase = XGLMForCausalLM(snake_case ) __UpperCamelCase = model.load_state_dict(snake_case , strict=snake_case ) print(snake_case ) __UpperCamelCase = make_linear_from_emb(model.model.embed_tokens ) return model if __name__ == "__main__": UpperCamelCase : Any = argparse.ArgumentParser() # Required parameters parser.add_argument("fairseq_path", type=str, help="path to a model.pt on local filesystem.") parser.add_argument("pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") UpperCamelCase : str = parser.parse_args() UpperCamelCase : int = convert_fairseq_xglm_checkpoint_from_disk(args.fairseq_path) model.save_pretrained(args.pytorch_dump_folder_path)
316
"""simple docstring""" from collections.abc import Iterator, MutableMapping from dataclasses import dataclass from typing import Generic, TypeVar UpperCamelCase : List[str] = TypeVar("KEY") UpperCamelCase : List[str] = TypeVar("VAL") @dataclass(frozen=__SCREAMING_SNAKE_CASE , slots=__SCREAMING_SNAKE_CASE ) class __lowerCAmelCase ( Generic[KEY, VAL] ): lowercase = 42 lowercase = 42 class __lowerCAmelCase ( _Item ): def __init__( self ): '''simple docstring''' super().__init__(__UpperCAmelCase , __UpperCAmelCase ) def __bool__( self ): '''simple docstring''' return False UpperCamelCase : Any = _DeletedItem() class __lowerCAmelCase ( MutableMapping[KEY, VAL] ): def __init__( self , __UpperCAmelCase = 8 , __UpperCAmelCase = 0.7_5 ): '''simple docstring''' __UpperCamelCase = initial_block_size __UpperCamelCase = [None] * initial_block_size assert 0.0 < capacity_factor < 1.0 __UpperCamelCase = capacity_factor __UpperCamelCase = 0 def UpperCAmelCase ( self , __UpperCAmelCase ): '''simple docstring''' return hash(__UpperCAmelCase ) % len(self._buckets ) def UpperCAmelCase ( self , __UpperCAmelCase ): '''simple docstring''' return (ind + 1) % len(self._buckets ) def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' __UpperCamelCase = self._buckets[ind] if not stored: __UpperCamelCase = _Item(__UpperCAmelCase , __UpperCAmelCase ) self._len += 1 return True elif stored.key == key: __UpperCamelCase = _Item(__UpperCAmelCase , __UpperCAmelCase ) return True else: return False def UpperCAmelCase ( self ): '''simple docstring''' __UpperCamelCase = len(self._buckets ) * self._capacity_factor return len(self ) >= int(__UpperCAmelCase ) def UpperCAmelCase ( self ): '''simple docstring''' if len(self._buckets ) <= self._initial_block_size: return False __UpperCamelCase = len(self._buckets ) * self._capacity_factor / 2 return len(self ) < limit def UpperCAmelCase ( self , __UpperCAmelCase ): '''simple docstring''' __UpperCamelCase = self._buckets __UpperCamelCase = [None] * new_size __UpperCamelCase = 0 for item in old_buckets: if item: self._add_item(item.key , item.val ) def UpperCAmelCase ( self ): '''simple docstring''' self._resize(len(self._buckets ) * 2 ) def UpperCAmelCase ( self ): '''simple docstring''' self._resize(len(self._buckets ) // 2 ) def UpperCAmelCase ( self , __UpperCAmelCase ): '''simple docstring''' __UpperCamelCase = self._get_bucket_index(__UpperCAmelCase ) for _ in range(len(self._buckets ) ): yield ind __UpperCamelCase = self._get_next_ind(__UpperCAmelCase ) def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' for ind in self._iterate_buckets(__UpperCAmelCase ): if self._try_set(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): break def __setitem__( self , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' if self._is_full(): self._size_up() self._add_item(__UpperCAmelCase , __UpperCAmelCase ) def __delitem__( self , __UpperCAmelCase ): '''simple docstring''' for ind in self._iterate_buckets(__UpperCAmelCase ): __UpperCamelCase = self._buckets[ind] if item is None: raise KeyError(__UpperCAmelCase ) if item is _deleted: continue if item.key == key: __UpperCamelCase = _deleted self._len -= 1 break if self._is_sparse(): self._size_down() def __getitem__( self , __UpperCAmelCase ): '''simple docstring''' for ind in self._iterate_buckets(__UpperCAmelCase ): __UpperCamelCase = self._buckets[ind] if item is None: break if item is _deleted: continue if item.key == key: return item.val raise KeyError(__UpperCAmelCase ) def __len__( self ): '''simple docstring''' return self._len def __iter__( self ): '''simple docstring''' yield from (item.key for item in self._buckets if item) def __repr__( self ): '''simple docstring''' __UpperCamelCase = ' ,'.join( F'{item.key}: {item.val}' for item in self._buckets if item ) return F'HashMap({val_string})'
316
1
"""simple docstring""" UpperCamelCase : List[str] = 9.8_06_65 def A ( snake_case :float , snake_case :float , snake_case :float = g ) -> float: if fluid_density <= 0: raise ValueError('Impossible fluid density' ) if volume < 0: raise ValueError('Impossible Object volume' ) if gravity <= 0: raise ValueError('Impossible Gravity' ) return fluid_density * gravity * volume if __name__ == "__main__": import doctest # run doctest doctest.testmod()
316
"""simple docstring""" def A ( snake_case :int , snake_case :int ) -> bool: return numa ^ numa < 0 if __name__ == "__main__": import doctest doctest.testmod()
316
1
"""simple docstring""" UpperCamelCase : Any = { 0: "0", 1: "1", 2: "2", 3: "3", 4: "4", 5: "5", 6: "6", 7: "7", 8: "8", 9: "9", 1_0: "a", 1_1: "b", 1_2: "c", 1_3: "d", 1_4: "e", 1_5: "f", } def A ( snake_case :float ) -> str: assert type(snake_case ) in (int, float) and decimal == int(snake_case ) __UpperCamelCase = int(snake_case ) __UpperCamelCase = '' __UpperCamelCase = False if decimal < 0: __UpperCamelCase = True decimal *= -1 while decimal > 0: __UpperCamelCase , __UpperCamelCase = divmod(snake_case , 1_6 ) __UpperCamelCase = values[remainder] + hexadecimal __UpperCamelCase = '0x' + hexadecimal if negative: __UpperCamelCase = '-' + hexadecimal return hexadecimal if __name__ == "__main__": import doctest doctest.testmod()
316
"""simple docstring""" from typing import List, Optional, Tuple, Union import torch from ...models import UNetaDModel from ...schedulers import ScoreSdeVeScheduler from ...utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): lowercase = 42 lowercase = 42 def __init__( self , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' super().__init__() self.register_modules(unet=__UpperCAmelCase , scheduler=__UpperCAmelCase ) @torch.no_grad() def __call__( self , __UpperCAmelCase = 1 , __UpperCAmelCase = 2000 , __UpperCAmelCase = None , __UpperCAmelCase = "pil" , __UpperCAmelCase = True , **__UpperCAmelCase , ): '''simple docstring''' __UpperCamelCase = self.unet.config.sample_size __UpperCamelCase = (batch_size, 3, img_size, img_size) __UpperCamelCase = self.unet __UpperCamelCase = randn_tensor(__UpperCAmelCase , generator=__UpperCAmelCase ) * self.scheduler.init_noise_sigma __UpperCamelCase = sample.to(self.device ) self.scheduler.set_timesteps(__UpperCAmelCase ) self.scheduler.set_sigmas(__UpperCAmelCase ) for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ): __UpperCamelCase = self.scheduler.sigmas[i] * torch.ones(shape[0] , device=self.device ) # correction step for _ in range(self.scheduler.config.correct_steps ): __UpperCamelCase = self.unet(__UpperCAmelCase , __UpperCAmelCase ).sample __UpperCamelCase = self.scheduler.step_correct(__UpperCAmelCase , __UpperCAmelCase , generator=__UpperCAmelCase ).prev_sample # prediction step __UpperCamelCase = model(__UpperCAmelCase , __UpperCAmelCase ).sample __UpperCamelCase = self.scheduler.step_pred(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , generator=__UpperCAmelCase ) __UpperCamelCase , __UpperCamelCase = output.prev_sample, output.prev_sample_mean __UpperCamelCase = sample_mean.clamp(0 , 1 ) __UpperCamelCase = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": __UpperCamelCase = self.numpy_to_pil(__UpperCAmelCase ) if not return_dict: return (sample,) return ImagePipelineOutput(images=__UpperCAmelCase )
316
1
"""simple docstring""" import qiskit def A ( snake_case :int = 2 ) -> qiskit.result.counts.Counts: __UpperCamelCase = qubits # Using Aer's simulator __UpperCamelCase = qiskit.Aer.get_backend('aer_simulator' ) # Creating a Quantum Circuit acting on the q register __UpperCamelCase = qiskit.QuantumCircuit(snake_case , snake_case ) # Adding a H gate on qubit 0 (now q0 in superposition) circuit.h(0 ) for i in range(1 , snake_case ): # Adding CX (CNOT) gate circuit.cx(i - 1 , snake_case ) # Mapping the quantum measurement to the classical bits circuit.measure(list(range(snake_case ) ) , list(range(snake_case ) ) ) # Now measuring any one qubit would affect other qubits to collapse # their super position and have same state as the measured one. # Executing the circuit on the simulator __UpperCamelCase = qiskit.execute(snake_case , snake_case , shots=1_0_0_0 ) return job.result().get_counts(snake_case ) if __name__ == "__main__": print(f'''Total count for various states are: {quantum_entanglement(3)}''')
316
"""simple docstring""" def A ( snake_case :list[int] , snake_case :int ) -> bool: __UpperCamelCase = len(snake_case ) __UpperCamelCase = [[False] * (required_sum + 1) for _ in range(arr_len + 1 )] # for each arr value, a sum of zero(0) can be formed by not taking any element # hence True/1 for i in range(arr_len + 1 ): __UpperCamelCase = True # sum is not zero and set is empty then false for i in range(1 , required_sum + 1 ): __UpperCamelCase = False for i in range(1 , arr_len + 1 ): for j in range(1 , required_sum + 1 ): if arr[i - 1] > j: __UpperCamelCase = subset[i - 1][j] if arr[i - 1] <= j: __UpperCamelCase = subset[i - 1][j] or subset[i - 1][j - arr[i - 1]] return subset[arr_len][required_sum] if __name__ == "__main__": import doctest doctest.testmod()
316
1
"""simple docstring""" import json import os from pathlib import Path from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple, Union import sentencepiece from ...tokenization_utils import BatchEncoding, PreTrainedTokenizer from ...utils import logging UpperCamelCase : Tuple = logging.get_logger(__name__) UpperCamelCase : Tuple = "▁" UpperCamelCase : List[Any] = { "vocab_file": "vocab.json", "spm_file": "sentencepiece.bpe.model", "tokenizer_config_file": "tokenizer_config.json", } UpperCamelCase : Optional[Any] = { "vocab_file": { "facebook/m2m100_418M": "https://huggingface.co/facebook/m2m100_418M/resolve/main/vocab.json", "facebook/m2m100_1.2B": "https://huggingface.co/facebook/m2m100_1.2B/resolve/main/vocab.json", }, "spm_file": { "facebook/m2m100_418M": "https://huggingface.co/facebook/m2m100_418M/resolve/main/sentencepiece.bpe.model", "facebook/m2m100_1.2B": "https://huggingface.co/facebook/m2m100_1.2B/resolve/main/sentencepiece.bpe.model", }, "tokenizer_config_file": { "facebook/m2m100_418M": "https://huggingface.co/facebook/m2m100_418M/resolve/main/tokenizer_config.json", "facebook/m2m100_1.2B": "https://huggingface.co/facebook/m2m100_1.2B/resolve/main/tokenizer_config.json", }, } UpperCamelCase : Union[str, Any] = { "facebook/m2m100_418M": 1_0_2_4, } # fmt: off UpperCamelCase : Dict = { "m2m100": ["af", "am", "ar", "ast", "az", "ba", "be", "bg", "bn", "br", "bs", "ca", "ceb", "cs", "cy", "da", "de", "el", "en", "es", "et", "fa", "ff", "fi", "fr", "fy", "ga", "gd", "gl", "gu", "ha", "he", "hi", "hr", "ht", "hu", "hy", "id", "ig", "ilo", "is", "it", "ja", "jv", "ka", "kk", "km", "kn", "ko", "lb", "lg", "ln", "lo", "lt", "lv", "mg", "mk", "ml", "mn", "mr", "ms", "my", "ne", "nl", "no", "ns", "oc", "or", "pa", "pl", "ps", "pt", "ro", "ru", "sd", "si", "sk", "sl", "so", "sq", "sr", "ss", "su", "sv", "sw", "ta", "th", "tl", "tn", "tr", "uk", "ur", "uz", "vi", "wo", "xh", "yi", "yo", "zh", "zu"], "wmt21": ["en", "ha", "is", "ja", "cs", "ru", "zh", "de"] } class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): lowercase = VOCAB_FILES_NAMES lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase = PRETRAINED_VOCAB_FILES_MAP lowercase = ["input_ids", "attention_mask"] lowercase = [] lowercase = [] def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase="<s>" , __UpperCAmelCase="</s>" , __UpperCAmelCase="</s>" , __UpperCAmelCase="<pad>" , __UpperCAmelCase="<unk>" , __UpperCAmelCase="m2m100" , __UpperCAmelCase = None , __UpperCAmelCase=8 , **__UpperCAmelCase , ): '''simple docstring''' __UpperCamelCase = {} if sp_model_kwargs is None else sp_model_kwargs __UpperCamelCase = language_codes __UpperCamelCase = FAIRSEQ_LANGUAGE_CODES[language_codes] __UpperCamelCase = {lang_code: F'__{lang_code}__' for lang_code in fairseq_language_code} __UpperCamelCase = kwargs.get('additional_special_tokens' , [] ) kwargs["additional_special_tokens"] += [ self.get_lang_token(__UpperCAmelCase ) for lang_code in fairseq_language_code if self.get_lang_token(__UpperCAmelCase ) not in kwargs["additional_special_tokens"] ] super().__init__( src_lang=__UpperCAmelCase , tgt_lang=__UpperCAmelCase , bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , language_codes=__UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , num_madeup_words=__UpperCAmelCase , **__UpperCAmelCase , ) __UpperCamelCase = vocab_file __UpperCamelCase = load_json(__UpperCAmelCase ) __UpperCamelCase = {v: k for k, v in self.encoder.items()} __UpperCamelCase = spm_file __UpperCamelCase = load_spm(__UpperCAmelCase , self.sp_model_kwargs ) __UpperCamelCase = len(self.encoder ) __UpperCamelCase = { self.get_lang_token(__UpperCAmelCase ): self.encoder_size + i for i, lang_code in enumerate(__UpperCAmelCase ) } __UpperCamelCase = {lang_code: self.encoder_size + i for i, lang_code in enumerate(__UpperCAmelCase )} __UpperCamelCase = {v: k for k, v in self.lang_token_to_id.items()} __UpperCamelCase = src_lang if src_lang is not None else 'en' __UpperCamelCase = tgt_lang __UpperCamelCase = self.get_lang_id(self._src_lang ) self.set_src_lang_special_tokens(self._src_lang ) __UpperCamelCase = num_madeup_words @property def UpperCAmelCase ( self ): '''simple docstring''' return len(self.encoder ) + len(self.lang_token_to_id ) @property def UpperCAmelCase ( self ): '''simple docstring''' return self._src_lang @src_lang.setter def UpperCAmelCase ( self , __UpperCAmelCase ): '''simple docstring''' __UpperCamelCase = new_src_lang self.set_src_lang_special_tokens(self._src_lang ) def UpperCAmelCase ( self , __UpperCAmelCase ): '''simple docstring''' return self.sp_model.encode(__UpperCAmelCase , out_type=__UpperCAmelCase ) def UpperCAmelCase ( self , __UpperCAmelCase ): '''simple docstring''' if token in self.lang_token_to_id: return self.lang_token_to_id[token] return self.encoder.get(__UpperCAmelCase , self.encoder[self.unk_token] ) def UpperCAmelCase ( self , __UpperCAmelCase ): '''simple docstring''' if index in self.id_to_lang_token: return self.id_to_lang_token[index] return self.decoder.get(__UpperCAmelCase , self.unk_token ) def UpperCAmelCase ( self , __UpperCAmelCase ): '''simple docstring''' __UpperCamelCase = [] __UpperCamelCase = '' for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: out_string += self.sp_model.decode(__UpperCAmelCase ) + token __UpperCamelCase = [] else: current_sub_tokens.append(__UpperCAmelCase ) out_string += self.sp_model.decode(__UpperCAmelCase ) return out_string.strip() def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = False ): '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__UpperCAmelCase , token_ids_a=__UpperCAmelCase , already_has_special_tokens=__UpperCAmelCase ) __UpperCamelCase = [1] * len(self.prefix_tokens ) __UpperCamelCase = [1] * len(self.suffix_tokens ) if token_ids_a is None: return prefix_ones + ([0] * len(__UpperCAmelCase )) + suffix_ones return prefix_ones + ([0] * len(__UpperCAmelCase )) + ([0] * len(__UpperCAmelCase )) + suffix_ones def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ): '''simple docstring''' if token_ids_a is None: return self.prefix_tokens + token_ids_a + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens def UpperCAmelCase ( self ): '''simple docstring''' __UpperCamelCase = {self.convert_ids_to_tokens(__UpperCAmelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self ): '''simple docstring''' __UpperCamelCase = self.__dict__.copy() __UpperCamelCase = None return state def __setstate__( self , __UpperCAmelCase ): '''simple docstring''' __UpperCamelCase = d # for backward compatibility if not hasattr(self , 'sp_model_kwargs' ): __UpperCamelCase = {} __UpperCamelCase = load_spm(self.spm_file , self.sp_model_kwargs ) def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ): '''simple docstring''' __UpperCamelCase = Path(__UpperCAmelCase ) if not save_dir.is_dir(): raise OSError(F'{save_directory} should be a directory' ) __UpperCamelCase = save_dir / ( (filename_prefix + '-' if filename_prefix else '') + self.vocab_files_names['vocab_file'] ) __UpperCamelCase = save_dir / ( (filename_prefix + '-' if filename_prefix else '') + self.vocab_files_names['spm_file'] ) save_json(self.encoder , __UpperCAmelCase ) if os.path.abspath(self.spm_file ) != os.path.abspath(__UpperCAmelCase ) and os.path.isfile(self.spm_file ): copyfile(self.spm_file , __UpperCAmelCase ) elif not os.path.isfile(self.spm_file ): with open(__UpperCAmelCase , 'wb' ) as fi: __UpperCamelCase = self.sp_model.serialized_model_proto() fi.write(__UpperCAmelCase ) return (str(__UpperCAmelCase ), str(__UpperCAmelCase )) def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = "en" , __UpperCAmelCase = None , __UpperCAmelCase = "ro" , **__UpperCAmelCase , ): '''simple docstring''' __UpperCamelCase = src_lang __UpperCamelCase = tgt_lang self.set_src_lang_special_tokens(self.src_lang ) return super().prepare_seqaseq_batch(__UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase ) def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase ): '''simple docstring''' if src_lang is None or tgt_lang is None: raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' ) __UpperCamelCase = src_lang __UpperCamelCase = self(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase , **__UpperCAmelCase ) __UpperCamelCase = self.get_lang_id(__UpperCAmelCase ) __UpperCamelCase = tgt_lang_id return inputs def UpperCAmelCase ( self ): '''simple docstring''' self.set_src_lang_special_tokens(self.src_lang ) def UpperCAmelCase ( self ): '''simple docstring''' self.set_tgt_lang_special_tokens(self.tgt_lang ) def UpperCAmelCase ( self , __UpperCAmelCase ): '''simple docstring''' __UpperCamelCase = self.get_lang_token(__UpperCAmelCase ) __UpperCamelCase = self.lang_token_to_id[lang_token] __UpperCamelCase = [self.cur_lang_id] __UpperCamelCase = [self.eos_token_id] def UpperCAmelCase ( self , __UpperCAmelCase ): '''simple docstring''' __UpperCamelCase = self.get_lang_token(__UpperCAmelCase ) __UpperCamelCase = self.lang_token_to_id[lang_token] __UpperCamelCase = [self.cur_lang_id] __UpperCamelCase = [self.eos_token_id] def UpperCAmelCase ( self , __UpperCAmelCase ): '''simple docstring''' return self.lang_code_to_token[lang] def UpperCAmelCase ( self , __UpperCAmelCase ): '''simple docstring''' __UpperCamelCase = self.get_lang_token(__UpperCAmelCase ) return self.lang_token_to_id[lang_token] def A ( snake_case :str , snake_case :Dict[str, Any] ) -> sentencepiece.SentencePieceProcessor: __UpperCamelCase = sentencepiece.SentencePieceProcessor(**snake_case ) spm.Load(str(snake_case ) ) return spm def A ( snake_case :str ) -> Union[Dict, List]: with open(snake_case , 'r' ) as f: return json.load(snake_case ) def A ( snake_case :Union[str, Any] , snake_case :str ) -> None: with open(snake_case , 'w' ) as f: json.dump(snake_case , snake_case , indent=2 )
316
"""simple docstring""" import argparse import logging import os from pathlib import Path from typing import Any, Dict import pytorch_lightning as pl from pytorch_lightning.utilities import rank_zero_info from transformers import ( AdamW, AutoConfig, AutoModel, AutoModelForPreTraining, AutoModelForQuestionAnswering, AutoModelForSeqaSeqLM, AutoModelForSequenceClassification, AutoModelForTokenClassification, AutoModelWithLMHead, AutoTokenizer, PretrainedConfig, PreTrainedTokenizer, ) from transformers.optimization import ( Adafactor, get_cosine_schedule_with_warmup, get_cosine_with_hard_restarts_schedule_with_warmup, get_linear_schedule_with_warmup, get_polynomial_decay_schedule_with_warmup, ) from transformers.utils.versions import require_version UpperCamelCase : Union[str, Any] = logging.getLogger(__name__) require_version("pytorch_lightning>=1.0.4") UpperCamelCase : int = { "base": AutoModel, "sequence-classification": AutoModelForSequenceClassification, "question-answering": AutoModelForQuestionAnswering, "pretraining": AutoModelForPreTraining, "token-classification": AutoModelForTokenClassification, "language-modeling": AutoModelWithLMHead, "summarization": AutoModelForSeqaSeqLM, "translation": AutoModelForSeqaSeqLM, } # update this and the import above to support new schedulers from transformers.optimization UpperCamelCase : Optional[Any] = { "linear": get_linear_schedule_with_warmup, "cosine": get_cosine_schedule_with_warmup, "cosine_w_restarts": get_cosine_with_hard_restarts_schedule_with_warmup, "polynomial": get_polynomial_decay_schedule_with_warmup, # '': get_constant_schedule, # not supported for now # '': get_constant_schedule_with_warmup, # not supported for now } UpperCamelCase : str = sorted(arg_to_scheduler.keys()) UpperCamelCase : List[str] = "{" + ", ".join(arg_to_scheduler_choices) + "}" class __lowerCAmelCase ( pl.LightningModule ): def __init__( self , __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase="base" , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase , ): '''simple docstring''' super().__init__() # TODO: move to self.save_hyperparameters() # self.save_hyperparameters() # can also expand arguments into trainer signature for easier reading self.save_hyperparameters(__UpperCAmelCase ) __UpperCamelCase = 0 __UpperCamelCase = Path(self.hparams.output_dir ) __UpperCamelCase = self.hparams.cache_dir if self.hparams.cache_dir else None if config is None: __UpperCamelCase = AutoConfig.from_pretrained( self.hparams.config_name if self.hparams.config_name else self.hparams.model_name_or_path , **({'num_labels': num_labels} if num_labels is not None else {}) , cache_dir=__UpperCAmelCase , **__UpperCAmelCase , ) else: __UpperCamelCase = config __UpperCamelCase = ('encoder_layerdrop', 'decoder_layerdrop', 'dropout', 'attention_dropout') for p in extra_model_params: if getattr(self.hparams , __UpperCAmelCase , __UpperCAmelCase ): assert hasattr(self.config , __UpperCAmelCase ), F'model config doesn\'t have a `{p}` attribute' setattr(self.config , __UpperCAmelCase , getattr(self.hparams , __UpperCAmelCase ) ) if tokenizer is None: __UpperCamelCase = AutoTokenizer.from_pretrained( self.hparams.tokenizer_name if self.hparams.tokenizer_name else self.hparams.model_name_or_path , cache_dir=__UpperCAmelCase , ) else: __UpperCamelCase = tokenizer __UpperCamelCase = MODEL_MODES[mode] if model is None: __UpperCamelCase = self.model_type.from_pretrained( self.hparams.model_name_or_path , from_tf=bool('.ckpt' in self.hparams.model_name_or_path ) , config=self.config , cache_dir=__UpperCAmelCase , ) else: __UpperCamelCase = model def UpperCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase ): '''simple docstring''' __UpperCamelCase = self.model_type.from_pretrained(*__UpperCAmelCase , **__UpperCAmelCase ) def UpperCAmelCase ( self ): '''simple docstring''' __UpperCamelCase = arg_to_scheduler[self.hparams.lr_scheduler] __UpperCamelCase = get_schedule_func( self.opt , num_warmup_steps=self.hparams.warmup_steps , num_training_steps=self.total_steps() ) __UpperCamelCase = {'scheduler': scheduler, 'interval': 'step', 'frequency': 1} return scheduler def UpperCAmelCase ( self ): '''simple docstring''' __UpperCamelCase = self.model __UpperCamelCase = ['bias', 'LayerNorm.weight'] __UpperCamelCase = [ { 'params': [ p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay ) ], # check this named paramters 'weight_decay': self.hparams.weight_decay, }, { 'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay )], 'weight_decay': 0.0, }, ] if self.hparams.adafactor: __UpperCamelCase = Adafactor( __UpperCAmelCase , lr=self.hparams.learning_rate , scale_parameter=__UpperCAmelCase , relative_step=__UpperCAmelCase ) else: __UpperCamelCase = AdamW( __UpperCAmelCase , lr=self.hparams.learning_rate , eps=self.hparams.adam_epsilon ) __UpperCamelCase = optimizer __UpperCamelCase = self.get_lr_scheduler() return [optimizer], [scheduler] def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' return self.validation_step(__UpperCAmelCase , __UpperCAmelCase ) def UpperCAmelCase ( self , __UpperCAmelCase ): '''simple docstring''' return self.validation_end(__UpperCAmelCase ) def UpperCAmelCase ( self ): '''simple docstring''' __UpperCamelCase = max(1 , self.hparams.gpus ) # TODO: consider num_tpu_cores __UpperCamelCase = self.hparams.train_batch_size * self.hparams.accumulate_grad_batches * num_devices return (self.dataset_size / effective_batch_size) * self.hparams.max_epochs def UpperCAmelCase ( self , __UpperCAmelCase ): '''simple docstring''' if stage == "test": __UpperCamelCase = len(self.test_dataloader().dataset ) else: __UpperCamelCase = self.get_dataloader('train' , self.hparams.train_batch_size , shuffle=__UpperCAmelCase ) __UpperCamelCase = len(self.train_dataloader().dataset ) def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = False ): '''simple docstring''' raise NotImplementedError('You must implement this for your task' ) def UpperCAmelCase ( self ): '''simple docstring''' return self.train_loader def UpperCAmelCase ( self ): '''simple docstring''' return self.get_dataloader('dev' , self.hparams.eval_batch_size , shuffle=__UpperCAmelCase ) def UpperCAmelCase ( self ): '''simple docstring''' return self.get_dataloader('test' , self.hparams.eval_batch_size , shuffle=__UpperCAmelCase ) def UpperCAmelCase ( self , __UpperCAmelCase ): '''simple docstring''' return os.path.join( self.hparams.data_dir , 'cached_{}_{}_{}'.format( __UpperCAmelCase , list(filter(__UpperCAmelCase , self.hparams.model_name_or_path.split('/' ) ) ).pop() , str(self.hparams.max_seq_length ) , ) , ) @pl.utilities.rank_zero_only def UpperCAmelCase ( self , __UpperCAmelCase ): '''simple docstring''' __UpperCamelCase = self.output_dir.joinpath('best_tfmr' ) __UpperCamelCase = self.step_count self.model.save_pretrained(__UpperCAmelCase ) self.tokenizer.save_pretrained(__UpperCAmelCase ) @staticmethod def UpperCAmelCase ( __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' parser.add_argument( '--model_name_or_path' , default=__UpperCAmelCase , type=__UpperCAmelCase , required=__UpperCAmelCase , help='Path to pretrained model or model identifier from huggingface.co/models' , ) parser.add_argument( '--config_name' , default='' , type=__UpperCAmelCase , help='Pretrained config name or path if not the same as model_name' ) parser.add_argument( '--tokenizer_name' , default=__UpperCAmelCase , type=__UpperCAmelCase , help='Pretrained tokenizer name or path if not the same as model_name' , ) parser.add_argument( '--cache_dir' , default=str(Path(__UpperCAmelCase ).parent / 'test_run' / 'cache' ) , type=__UpperCAmelCase , help='Where do you want to store the pre-trained models downloaded from huggingface.co' , ) parser.add_argument( '--encoder_layerdrop' , type=__UpperCAmelCase , help='Encoder layer dropout probability (Optional). Goes into model.config' , ) parser.add_argument( '--decoder_layerdrop' , type=__UpperCAmelCase , help='Decoder layer dropout probability (Optional). Goes into model.config' , ) parser.add_argument( '--dropout' , type=__UpperCAmelCase , help='Dropout probability (Optional). Goes into model.config' , ) parser.add_argument( '--attention_dropout' , type=__UpperCAmelCase , help='Attention dropout probability (Optional). Goes into model.config' , ) parser.add_argument('--learning_rate' , default=5E-5 , type=__UpperCAmelCase , help='The initial learning rate for Adam.' ) parser.add_argument( '--lr_scheduler' , default='linear' , choices=__UpperCAmelCase , metavar=__UpperCAmelCase , type=__UpperCAmelCase , help='Learning rate scheduler' , ) parser.add_argument('--weight_decay' , default=0.0 , type=__UpperCAmelCase , help='Weight decay if we apply some.' ) parser.add_argument('--adam_epsilon' , default=1E-8 , type=__UpperCAmelCase , help='Epsilon for Adam optimizer.' ) parser.add_argument('--warmup_steps' , default=0 , type=__UpperCAmelCase , help='Linear warmup over warmup_steps.' ) parser.add_argument('--num_workers' , default=4 , type=__UpperCAmelCase , help='kwarg passed to DataLoader' ) parser.add_argument('--num_train_epochs' , dest='max_epochs' , default=3 , type=__UpperCAmelCase ) parser.add_argument('--train_batch_size' , default=32 , type=__UpperCAmelCase ) parser.add_argument('--eval_batch_size' , default=32 , type=__UpperCAmelCase ) parser.add_argument('--adafactor' , action='store_true' ) class __lowerCAmelCase ( pl.Callback ): def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' if ( trainer.is_global_zero and trainer.global_rank == 0 ): # we initialize the retriever only on master worker with RAY. In new pytorch-lightning accelorators are removed. pl_module.model.rag.retriever.init_retrieval() # better to use hook functions. class __lowerCAmelCase ( pl.Callback ): def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' for name, param in pl_module.model.rag.named_parameters(): if param.grad is None: print(__UpperCAmelCase ) class __lowerCAmelCase ( pl.Callback ): def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' __UpperCamelCase = trainer.lr_schedulers[0]['scheduler'] __UpperCamelCase = {F'lr_group_{i}': lr for i, lr in enumerate(lr_scheduler.get_lr() )} pl_module.logger.log_metrics(__UpperCAmelCase ) def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' rank_zero_info('***** Validation results *****' ) __UpperCamelCase = trainer.callback_metrics # Log results for key in sorted(__UpperCAmelCase ): if key not in ["log", "progress_bar"]: rank_zero_info('{} = {}\n'.format(__UpperCAmelCase , str(metrics[key] ) ) ) def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' rank_zero_info('***** Test results *****' ) __UpperCamelCase = trainer.callback_metrics # Log and save results to file __UpperCamelCase = os.path.join(pl_module.hparams.output_dir , 'test_results.txt' ) with open(__UpperCAmelCase , 'w' ) as writer: for key in sorted(__UpperCAmelCase ): if key not in ["log", "progress_bar"]: rank_zero_info('{} = {}\n'.format(__UpperCAmelCase , str(metrics[key] ) ) ) writer.write('{} = {}\n'.format(__UpperCAmelCase , str(metrics[key] ) ) ) def A ( snake_case :Any , snake_case :int ) -> None: # To allow all pl args uncomment the following line # parser = pl.Trainer.add_argparse_args(parser) parser.add_argument( '--output_dir' , default=str(Path(snake_case ).parent / 'test_run' / 'model_checkpoints' ) , type=snake_case , help='The output directory where the model predictions and checkpoints will be written.' , ) parser.add_argument( '--fp16' , action='store_true' , help='Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit' , ) parser.add_argument( '--fp16_opt_level' , type=snake_case , default='O2' , help=( 'For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].' 'See details at https://nvidia.github.io/apex/amp.html' ) , ) parser.add_argument('--n_tpu_cores' , dest='tpu_cores' , type=snake_case ) parser.add_argument('--max_grad_norm' , dest='gradient_clip_val' , default=1.0 , type=snake_case , help='Max gradient norm' ) parser.add_argument('--do_train' , action='store_true' , help='Whether to run training.' ) parser.add_argument('--do_predict' , action='store_true' , help='Whether to run predictions on the test set.' ) parser.add_argument( '--gradient_accumulation_steps' , dest='accumulate_grad_batches' , type=snake_case , default=1 , help='Number of updates steps to accumulate before performing a backward/update pass.' , ) parser.add_argument('--seed' , type=snake_case , default=4_2 , help='random seed for initialization' ) parser.add_argument( '--data_dir' , default=str(Path(snake_case ).parent / 'test_run' / 'dummy-train-data' ) , type=snake_case , help='The input data dir. Should contain the training files for the CoNLL-2003 NER task.' , ) def A ( snake_case :BaseTransformer , snake_case :argparse.Namespace , snake_case :Union[str, Any]=None , snake_case :Union[str, Any]=True , snake_case :Any=[] , snake_case :Tuple=None , snake_case :List[str]=None , **snake_case :Union[str, Any] , ) -> Optional[int]: pl.seed_everything(args.seed ) # init model __UpperCamelCase = Path(model.hparams.output_dir ) odir.mkdir(exist_ok=snake_case ) # add custom checkpoints if checkpoint_callback is None: __UpperCamelCase = pl.callbacks.ModelCheckpoint( filepath=args.output_dir , prefix='checkpoint' , monitor='val_loss' , mode='min' , save_top_k=1 ) if early_stopping_callback: extra_callbacks.append(snake_case ) if logging_callback is None: __UpperCamelCase = LoggingCallback() __UpperCamelCase = {} if args.fpaa: __UpperCamelCase = 1_6 if args.gpus > 1: __UpperCamelCase = 'auto' __UpperCamelCase = 'ddp' __UpperCamelCase = args.accumulate_grad_batches __UpperCamelCase = None __UpperCamelCase = 'auto' __UpperCamelCase = pl.Trainer.from_argparse_args( snake_case , weights_summary=snake_case , callbacks=[logging_callback] + extra_callbacks + [InitCallback()] + [checkpoint_callback] , logger=snake_case , val_check_interval=1 , num_sanity_val_steps=2 , **snake_case , ) if args.do_train: trainer.fit(snake_case ) else: print('RAG modeling tests with new set functions successfuly executed!' ) return trainer
316
1
"""simple docstring""" # Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available UpperCamelCase : int = { "configuration_xmod": [ "XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP", "XmodConfig", "XmodOnnxConfig", ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase : Union[str, Any] = [ "XMOD_PRETRAINED_MODEL_ARCHIVE_LIST", "XmodForCausalLM", "XmodForMaskedLM", "XmodForMultipleChoice", "XmodForQuestionAnswering", "XmodForSequenceClassification", "XmodForTokenClassification", "XmodModel", "XmodPreTrainedModel", ] if TYPE_CHECKING: from .configuration_xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig, XmodOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xmod import ( XMOD_PRETRAINED_MODEL_ARCHIVE_LIST, XmodForCausalLM, XmodForMaskedLM, XmodForMultipleChoice, XmodForQuestionAnswering, XmodForSequenceClassification, XmodForTokenClassification, XmodModel, XmodPreTrainedModel, ) else: import sys UpperCamelCase : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
316
"""simple docstring""" import json from typing import TYPE_CHECKING, List, Optional, Tuple from tokenizers import pre_tokenizers from ...tokenization_utils_base import BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_gpta import GPTaTokenizer if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation UpperCamelCase : Any = logging.get_logger(__name__) UpperCamelCase : Any = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"} UpperCamelCase : Dict = { "vocab_file": { "gpt2": "https://huggingface.co/gpt2/resolve/main/vocab.json", "gpt2-medium": "https://huggingface.co/gpt2-medium/resolve/main/vocab.json", "gpt2-large": "https://huggingface.co/gpt2-large/resolve/main/vocab.json", "gpt2-xl": "https://huggingface.co/gpt2-xl/resolve/main/vocab.json", "distilgpt2": "https://huggingface.co/distilgpt2/resolve/main/vocab.json", }, "merges_file": { "gpt2": "https://huggingface.co/gpt2/resolve/main/merges.txt", "gpt2-medium": "https://huggingface.co/gpt2-medium/resolve/main/merges.txt", "gpt2-large": "https://huggingface.co/gpt2-large/resolve/main/merges.txt", "gpt2-xl": "https://huggingface.co/gpt2-xl/resolve/main/merges.txt", "distilgpt2": "https://huggingface.co/distilgpt2/resolve/main/merges.txt", }, "tokenizer_file": { "gpt2": "https://huggingface.co/gpt2/resolve/main/tokenizer.json", "gpt2-medium": "https://huggingface.co/gpt2-medium/resolve/main/tokenizer.json", "gpt2-large": "https://huggingface.co/gpt2-large/resolve/main/tokenizer.json", "gpt2-xl": "https://huggingface.co/gpt2-xl/resolve/main/tokenizer.json", "distilgpt2": "https://huggingface.co/distilgpt2/resolve/main/tokenizer.json", }, } UpperCamelCase : Dict = { "gpt2": 1_0_2_4, "gpt2-medium": 1_0_2_4, "gpt2-large": 1_0_2_4, "gpt2-xl": 1_0_2_4, "distilgpt2": 1_0_2_4, } class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): lowercase = VOCAB_FILES_NAMES lowercase = PRETRAINED_VOCAB_FILES_MAP lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase = ["input_ids", "attention_mask"] lowercase = GPTaTokenizer def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase="<|endoftext|>" , __UpperCAmelCase="<|endoftext|>" , __UpperCAmelCase="<|endoftext|>" , __UpperCAmelCase=False , **__UpperCAmelCase , ): '''simple docstring''' super().__init__( __UpperCAmelCase , __UpperCAmelCase , tokenizer_file=__UpperCAmelCase , unk_token=__UpperCAmelCase , bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase , **__UpperCAmelCase , ) __UpperCamelCase = kwargs.pop('add_bos_token' , __UpperCAmelCase ) __UpperCamelCase = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get('add_prefix_space' , __UpperCAmelCase ) != add_prefix_space: __UpperCamelCase = getattr(__UpperCAmelCase , pre_tok_state.pop('type' ) ) __UpperCamelCase = add_prefix_space __UpperCamelCase = pre_tok_class(**__UpperCAmelCase ) __UpperCamelCase = add_prefix_space def UpperCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase ): '''simple docstring''' __UpperCamelCase = kwargs.get('is_split_into_words' , __UpperCAmelCase ) assert self.add_prefix_space or not is_split_into_words, ( F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True ' "to use it with pretokenized inputs." ) return super()._batch_encode_plus(*__UpperCAmelCase , **__UpperCAmelCase ) def UpperCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase ): '''simple docstring''' __UpperCamelCase = kwargs.get('is_split_into_words' , __UpperCAmelCase ) assert self.add_prefix_space or not is_split_into_words, ( F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True ' "to use it with pretokenized inputs." ) return super()._encode_plus(*__UpperCAmelCase , **__UpperCAmelCase ) def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ): '''simple docstring''' __UpperCamelCase = self._tokenizer.model.save(__UpperCAmelCase , name=__UpperCAmelCase ) return tuple(__UpperCAmelCase ) def UpperCAmelCase ( self , __UpperCAmelCase ): '''simple docstring''' __UpperCamelCase = [] for is_user, text in conversation.iter_texts(): input_ids.extend(self.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ) + [self.eos_token_id] ) if len(__UpperCAmelCase ) > self.model_max_length: __UpperCamelCase = input_ids[-self.model_max_length :] return input_ids
316
1
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCamelCase : Any = logging.get_logger(__name__) UpperCamelCase : Dict = { "google/pegasus-large": "https://huggingface.co/google/pegasus-large/resolve/main/config.json", # See all PEGASUS models at https://huggingface.co/models?filter=pegasus } class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): lowercase = "pegasus" lowercase = ["past_key_values"] lowercase = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"} def __init__( self , __UpperCAmelCase=5_0265 , __UpperCAmelCase=1024 , __UpperCAmelCase=12 , __UpperCAmelCase=4096 , __UpperCAmelCase=16 , __UpperCAmelCase=12 , __UpperCAmelCase=4096 , __UpperCAmelCase=16 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase="gelu" , __UpperCAmelCase=1024 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0_2 , __UpperCAmelCase=0 , __UpperCAmelCase=False , __UpperCAmelCase=0 , __UpperCAmelCase=1 , __UpperCAmelCase=1 , **__UpperCAmelCase , ): '''simple docstring''' __UpperCamelCase = vocab_size __UpperCamelCase = max_position_embeddings __UpperCamelCase = d_model __UpperCamelCase = encoder_ffn_dim __UpperCamelCase = encoder_layers __UpperCamelCase = encoder_attention_heads __UpperCamelCase = decoder_ffn_dim __UpperCamelCase = decoder_layers __UpperCamelCase = decoder_attention_heads __UpperCamelCase = dropout __UpperCamelCase = attention_dropout __UpperCamelCase = activation_dropout __UpperCamelCase = activation_function __UpperCamelCase = init_std __UpperCamelCase = encoder_layerdrop __UpperCamelCase = decoder_layerdrop __UpperCamelCase = use_cache __UpperCamelCase = encoder_layers __UpperCamelCase = scale_embedding # scale factor will be sqrt(d_model) if True super().__init__( pad_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , is_encoder_decoder=__UpperCAmelCase , decoder_start_token_id=__UpperCAmelCase , forced_eos_token_id=__UpperCAmelCase , **__UpperCAmelCase , ) @property def UpperCAmelCase ( self ): '''simple docstring''' return self.encoder_attention_heads @property def UpperCAmelCase ( self ): '''simple docstring''' return self.d_model
316
"""simple docstring""" import argparse from pathlib import Path import torch from packaging import version from torch.onnx import export from diffusers import AutoencoderKL UpperCamelCase : Union[str, Any] = version.parse(version.parse(torch.__version__).base_version) < version.parse("1.11") def A ( snake_case :str , snake_case :tuple , snake_case :Path , snake_case :Dict , snake_case :int , snake_case :List[str] , snake_case :Union[str, Any] , snake_case :Union[str, Any]=False , ) -> str: output_path.parent.mkdir(parents=snake_case , exist_ok=snake_case ) # PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11, # so we check the torch version for backwards compatibility if is_torch_less_than_1_11: export( snake_case , snake_case , f=output_path.as_posix() , input_names=snake_case , output_names=snake_case , dynamic_axes=snake_case , do_constant_folding=snake_case , use_external_data_format=snake_case , enable_onnx_checker=snake_case , opset_version=snake_case , ) else: export( snake_case , snake_case , f=output_path.as_posix() , input_names=snake_case , output_names=snake_case , dynamic_axes=snake_case , do_constant_folding=snake_case , opset_version=snake_case , ) @torch.no_grad() def A ( snake_case :str , snake_case :str , snake_case :int , snake_case :bool = False ) -> List[str]: __UpperCamelCase = torch.floataa if fpaa else torch.floataa if fpaa and torch.cuda.is_available(): __UpperCamelCase = 'cuda' elif fpaa and not torch.cuda.is_available(): raise ValueError('`float16` model export is only supported on GPUs with CUDA' ) else: __UpperCamelCase = 'cpu' __UpperCamelCase = Path(snake_case ) # VAE DECODER __UpperCamelCase = AutoencoderKL.from_pretrained(model_path + '/vae' ) __UpperCamelCase = vae_decoder.config.latent_channels # forward only through the decoder part __UpperCamelCase = vae_decoder.decode onnx_export( snake_case , model_args=( torch.randn(1 , snake_case , 2_5 , 2_5 ).to(device=snake_case , dtype=snake_case ), False, ) , output_path=output_path / 'vae_decoder' / 'model.onnx' , ordered_input_names=['latent_sample', 'return_dict'] , output_names=['sample'] , dynamic_axes={ 'latent_sample': {0: 'batch', 1: 'channels', 2: 'height', 3: 'width'}, } , opset=snake_case , ) del vae_decoder if __name__ == "__main__": UpperCamelCase : Dict = argparse.ArgumentParser() parser.add_argument( "--model_path", type=str, required=True, help="Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).", ) parser.add_argument("--output_path", type=str, required=True, help="Path to the output model.") parser.add_argument( "--opset", default=1_4, type=int, help="The version of the ONNX operator set to use.", ) parser.add_argument("--fp16", action="store_true", default=False, help="Export the models in `float16` mode") UpperCamelCase : List[Any] = parser.parse_args() print(args.output_path) convert_models(args.model_path, args.output_path, args.opset, args.fpaa) print("SD: Done: ONNX")
316
1
"""simple docstring""" # this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.: # python ./utils/get_modified_files.py utils src tests examples # # it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered # since the output of this script is fed into Makefile commands it doesn't print a newline after the results import re import subprocess import sys UpperCamelCase : Union[str, Any] = subprocess.check_output("git merge-base main HEAD".split()).decode("utf-8") UpperCamelCase : Any = subprocess.check_output(f'''git diff --name-only {fork_point_sha}'''.split()).decode("utf-8").split() UpperCamelCase : Tuple = "|".join(sys.argv[1:]) UpperCamelCase : Optional[int] = re.compile(Rf'''^({joined_dirs}).*?\.py$''') UpperCamelCase : Optional[Any] = [x for x in modified_files if regex.match(x)] print(" ".join(relevant_modified_files), end="")
316
"""simple docstring""" from __future__ import annotations import string from itertools import cycle, product from pathlib import Path UpperCamelCase : str = ( string.ascii_letters + string.digits + string.punctuation + string.whitespace ) UpperCamelCase : list[int] = [ord(letter) for letter in string.ascii_lowercase] UpperCamelCase : set[int] = {ord(char) for char in VALID_CHARS} UpperCamelCase : list[str] = ["the", "be", "to", "of", "and", "in", "that", "have"] def A ( snake_case :list[int] , snake_case :tuple[int, ...] ) -> str | None: __UpperCamelCase = "" __UpperCamelCase = 42 __UpperCamelCase = 42 __UpperCamelCase = 42 for keychar, cipherchar in zip(cycle(snake_case ) , snake_case ): __UpperCamelCase = cipherchar ^ keychar if decodedchar not in VALID_INTS: return None decoded += chr(snake_case ) return decoded def A ( snake_case :list[int] ) -> list[str]: __UpperCamelCase = [] for key in product(snake_case , repeat=3 ): __UpperCamelCase = try_key(snake_case , snake_case ) if encoded is not None: possibles.append(snake_case ) return possibles def A ( snake_case :list[str] , snake_case :str ) -> list[str]: return [possible for possible in possibles if common_word in possible.lower()] def A ( snake_case :str = "p059_cipher.txt" ) -> int: __UpperCamelCase = 42 __UpperCamelCase = 42 __UpperCamelCase = 42 __UpperCamelCase = 42 __UpperCamelCase = Path(snake_case ).parent.joinpath(snake_case ).read_text(encoding='utf-8' ) __UpperCamelCase = [int(snake_case ) for number in data.strip().split(',' )] __UpperCamelCase = filter_valid_chars(snake_case ) for common_word in COMMON_WORDS: __UpperCamelCase = filter_common_word(snake_case , snake_case ) if len(snake_case ) == 1: break __UpperCamelCase = possibles[0] return sum(ord(snake_case ) for char in decoded_text ) if __name__ == "__main__": print(f'''{solution() = }''')
316
1
"""simple docstring""" import os from typing import Optional import fsspec from fsspec.archive import AbstractArchiveFileSystem from fsspec.utils import DEFAULT_BLOCK_SIZE class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): lowercase = "" lowercase = ( None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz ) lowercase = None # compression type in fsspec. ex: "gzip" lowercase = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz def __init__( self , __UpperCAmelCase = "" , __UpperCAmelCase = None , __UpperCAmelCase = None , **__UpperCAmelCase ): '''simple docstring''' super().__init__(self , **__UpperCAmelCase ) # always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode __UpperCamelCase = fsspec.open( __UpperCAmelCase , mode='rb' , protocol=__UpperCAmelCase , compression=self.compression , client_kwargs={ 'requote_redirect_url': False, # see https://github.com/huggingface/datasets/pull/5459 'trust_env': True, # Enable reading proxy env variables. **(target_options or {}).pop('client_kwargs' , {} ), # To avoid issues if it was already passed. } , **(target_options or {}) , ) __UpperCamelCase = os.path.basename(self.file.path.split('::' )[0] ) __UpperCamelCase = ( self.compressed_name[: self.compressed_name.rindex('.' )] if '.' in self.compressed_name else self.compressed_name ) __UpperCamelCase = None @classmethod def UpperCAmelCase ( cls , __UpperCAmelCase ): '''simple docstring''' return super()._strip_protocol(__UpperCAmelCase ).lstrip('/' ) def UpperCAmelCase ( self ): '''simple docstring''' if self.dir_cache is None: __UpperCamelCase = {**self.file.fs.info(self.file.path ), 'name': self.uncompressed_name} __UpperCamelCase = {f['name']: f} def UpperCAmelCase ( self , __UpperCAmelCase ): '''simple docstring''' return self.file.open().read() def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = "rb" , __UpperCAmelCase=None , __UpperCAmelCase=True , __UpperCAmelCase=None , **__UpperCAmelCase , ): '''simple docstring''' __UpperCamelCase = self._strip_protocol(__UpperCAmelCase ) if mode != "rb": raise ValueError(F'Tried to read with mode {mode} on file {self.file.path} opened with mode \'rb\'' ) return self.file.open() class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): lowercase = "bz2" lowercase = "bz2" lowercase = ".bz2" class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): lowercase = "gzip" lowercase = "gzip" lowercase = ".gz" class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): lowercase = "lz4" lowercase = "lz4" lowercase = ".lz4" class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): lowercase = "xz" lowercase = "xz" lowercase = ".xz" class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): lowercase = "zstd" lowercase = "zstd" lowercase = ".zst" def __init__( self , __UpperCAmelCase , __UpperCAmelCase = "rb" , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = DEFAULT_BLOCK_SIZE , **__UpperCAmelCase , ): '''simple docstring''' super().__init__( fo=__UpperCAmelCase , mode=__UpperCAmelCase , target_protocol=__UpperCAmelCase , target_options=__UpperCAmelCase , block_size=__UpperCAmelCase , **__UpperCAmelCase , ) # We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2: # # File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open # out.close = close # AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only # # see https://github.com/intake/filesystem_spec/issues/725 __UpperCamelCase = self.file.__enter__ class __lowerCAmelCase : def __init__( self , __UpperCAmelCase ): '''simple docstring''' __UpperCamelCase = file_ def __enter__( self ): '''simple docstring''' self._file.__enter__() return self def __exit__( self , *__UpperCAmelCase , **__UpperCAmelCase ): '''simple docstring''' self._file.__exit__(*__UpperCAmelCase , **__UpperCAmelCase ) def __iter__( self ): '''simple docstring''' return iter(self._file ) def UpperCAmelCase ( self ): '''simple docstring''' return next(self._file ) def __getattr__( self , __UpperCAmelCase ): '''simple docstring''' return getattr(self._file , __UpperCAmelCase ) def fixed_enter(*__UpperCAmelCase , **__UpperCAmelCase ): return WrappedFile(_enter(*__UpperCAmelCase , **__UpperCAmelCase ) ) __UpperCamelCase = fixed_enter
316
"""simple docstring""" UpperCamelCase : dict[str, float] = { "km/h": 1.0, "m/s": 3.6, "mph": 1.60_93_44, "knot": 1.8_52, } UpperCamelCase : dict[str, float] = { "km/h": 1.0, "m/s": 0.2_77_77_77_78, "mph": 0.6_21_37_11_92, "knot": 0.5_39_95_68_03, } def A ( snake_case :float , snake_case :str , snake_case :str ) -> float: if unit_to not in speed_chart or unit_from not in speed_chart_inverse: __UpperCamelCase = ( f'Incorrect \'from_type\' or \'to_type\' value: {unit_from!r}, {unit_to!r}\n' f'Valid values are: {", ".join(snake_case )}' ) raise ValueError(snake_case ) return round(speed * speed_chart[unit_from] * speed_chart_inverse[unit_to] , 3 ) if __name__ == "__main__": import doctest doctest.testmod()
316
1
"""simple docstring""" import logging import os from dataclasses import dataclass from enum import Enum from typing import List, Optional, Union from filelock import FileLock from transformers import PreTrainedTokenizer, is_tf_available, is_torch_available UpperCamelCase : Dict = logging.getLogger(__name__) @dataclass class __lowerCAmelCase : lowercase = 42 lowercase = 42 lowercase = 42 @dataclass class __lowerCAmelCase : lowercase = 42 lowercase = 42 lowercase = None lowercase = None class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): lowercase = "train" lowercase = "dev" lowercase = "test" class __lowerCAmelCase : @staticmethod def UpperCAmelCase ( __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' raise NotImplementedError @staticmethod def UpperCAmelCase ( __UpperCAmelCase ): '''simple docstring''' raise NotImplementedError @staticmethod def UpperCAmelCase ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=False , __UpperCAmelCase="[CLS]" , __UpperCAmelCase=1 , __UpperCAmelCase="[SEP]" , __UpperCAmelCase=False , __UpperCAmelCase=False , __UpperCAmelCase=0 , __UpperCAmelCase=0 , __UpperCAmelCase=-100 , __UpperCAmelCase=0 , __UpperCAmelCase=True , ): '''simple docstring''' __UpperCamelCase = {label: i for i, label in enumerate(__UpperCAmelCase )} __UpperCamelCase = [] for ex_index, example in enumerate(__UpperCAmelCase ): if ex_index % 1_0000 == 0: logger.info('Writing example %d of %d' , __UpperCAmelCase , len(__UpperCAmelCase ) ) __UpperCamelCase = [] __UpperCamelCase = [] for word, label in zip(example.words , example.labels ): __UpperCamelCase = tokenizer.tokenize(__UpperCAmelCase ) # bert-base-multilingual-cased sometimes output "nothing ([]) when calling tokenize with just a space. if len(__UpperCAmelCase ) > 0: tokens.extend(__UpperCAmelCase ) # Use the real label id for the first token of the word, and padding ids for the remaining tokens label_ids.extend([label_map[label]] + [pad_token_label_id] * (len(__UpperCAmelCase ) - 1) ) # Account for [CLS] and [SEP] with "- 2" and with "- 3" for RoBERTa. __UpperCamelCase = tokenizer.num_special_tokens_to_add() if len(__UpperCAmelCase ) > max_seq_length - special_tokens_count: __UpperCamelCase = tokens[: (max_seq_length - special_tokens_count)] __UpperCamelCase = label_ids[: (max_seq_length - special_tokens_count)] # The convention in BERT is: # (a) For sequence pairs: # tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP] # type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1 # (b) For single sequences: # tokens: [CLS] the dog is hairy . [SEP] # type_ids: 0 0 0 0 0 0 0 # # Where "type_ids" are used to indicate whether this is the first # sequence or the second sequence. The embedding vectors for `type=0` and # `type=1` were learned during pre-training and are added to the wordpiece # embedding vector (and position vector). This is not *strictly* necessary # since the [SEP] token unambiguously separates the sequences, but it makes # it easier for the model to learn the concept of sequences. # # For classification tasks, the first vector (corresponding to [CLS]) is # used as the "sentence vector". Note that this only makes sense because # the entire model is fine-tuned. tokens += [sep_token] label_ids += [pad_token_label_id] if sep_token_extra: # roberta uses an extra separator b/w pairs of sentences tokens += [sep_token] label_ids += [pad_token_label_id] __UpperCamelCase = [sequence_a_segment_id] * len(__UpperCAmelCase ) if cls_token_at_end: tokens += [cls_token] label_ids += [pad_token_label_id] segment_ids += [cls_token_segment_id] else: __UpperCamelCase = [cls_token] + tokens __UpperCamelCase = [pad_token_label_id] + label_ids __UpperCamelCase = [cls_token_segment_id] + segment_ids __UpperCamelCase = tokenizer.convert_tokens_to_ids(__UpperCAmelCase ) # The mask has 1 for real tokens and 0 for padding tokens. Only real # tokens are attended to. __UpperCamelCase = [1 if mask_padding_with_zero else 0] * len(__UpperCAmelCase ) # Zero-pad up to the sequence length. __UpperCamelCase = max_seq_length - len(__UpperCAmelCase ) if pad_on_left: __UpperCamelCase = ([pad_token] * padding_length) + input_ids __UpperCamelCase = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask __UpperCamelCase = ([pad_token_segment_id] * padding_length) + segment_ids __UpperCamelCase = ([pad_token_label_id] * padding_length) + label_ids else: input_ids += [pad_token] * padding_length input_mask += [0 if mask_padding_with_zero else 1] * padding_length segment_ids += [pad_token_segment_id] * padding_length label_ids += [pad_token_label_id] * padding_length assert len(__UpperCAmelCase ) == max_seq_length assert len(__UpperCAmelCase ) == max_seq_length assert len(__UpperCAmelCase ) == max_seq_length assert len(__UpperCAmelCase ) == max_seq_length if ex_index < 5: logger.info('*** Example ***' ) logger.info('guid: %s' , example.guid ) logger.info('tokens: %s' , ' '.join([str(__UpperCAmelCase ) for x in tokens] ) ) logger.info('input_ids: %s' , ' '.join([str(__UpperCAmelCase ) for x in input_ids] ) ) logger.info('input_mask: %s' , ' '.join([str(__UpperCAmelCase ) for x in input_mask] ) ) logger.info('segment_ids: %s' , ' '.join([str(__UpperCAmelCase ) for x in segment_ids] ) ) logger.info('label_ids: %s' , ' '.join([str(__UpperCAmelCase ) for x in label_ids] ) ) if "token_type_ids" not in tokenizer.model_input_names: __UpperCamelCase = None features.append( InputFeatures( input_ids=__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , label_ids=__UpperCAmelCase ) ) return features if is_torch_available(): import torch from torch import nn from torch.utils.data import Dataset class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): lowercase = 42 lowercase = nn.CrossEntropyLoss().ignore_index def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase=False , __UpperCAmelCase = Split.train , ): '''simple docstring''' __UpperCamelCase = os.path.join( __UpperCAmelCase , 'cached_{}_{}_{}'.format(mode.value , tokenizer.__class__.__name__ , str(__UpperCAmelCase ) ) , ) # Make sure only the first process in distributed training processes the dataset, # and the others will use the cache. __UpperCamelCase = cached_features_file + '.lock' with FileLock(__UpperCAmelCase ): if os.path.exists(__UpperCAmelCase ) and not overwrite_cache: logger.info(F'Loading features from cached file {cached_features_file}' ) __UpperCamelCase = torch.load(__UpperCAmelCase ) else: logger.info(F'Creating features from dataset file at {data_dir}' ) __UpperCamelCase = token_classification_task.read_examples_from_file(__UpperCAmelCase , __UpperCAmelCase ) # TODO clean up all this to leverage built-in features of tokenizers __UpperCamelCase = token_classification_task.convert_examples_to_features( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , cls_token_at_end=bool(model_type in ['xlnet'] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ['xlnet'] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=__UpperCAmelCase , pad_on_left=bool(tokenizer.padding_side == 'left' ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , ) logger.info(F'Saving features into cached file {cached_features_file}' ) torch.save(self.features , __UpperCAmelCase ) def __len__( self ): '''simple docstring''' return len(self.features ) def __getitem__( self , __UpperCAmelCase ): '''simple docstring''' return self.features[i] if is_tf_available(): import tensorflow as tf class __lowerCAmelCase : lowercase = 42 lowercase = -100 def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase=False , __UpperCAmelCase = Split.train , ): '''simple docstring''' __UpperCamelCase = token_classification_task.read_examples_from_file(__UpperCAmelCase , __UpperCAmelCase ) # TODO clean up all this to leverage built-in features of tokenizers __UpperCamelCase = token_classification_task.convert_examples_to_features( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , cls_token_at_end=bool(model_type in ['xlnet'] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ['xlnet'] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=__UpperCAmelCase , pad_on_left=bool(tokenizer.padding_side == 'left' ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , ) def gen(): for ex in self.features: if ex.token_type_ids is None: yield ( {"input_ids": ex.input_ids, "attention_mask": ex.attention_mask}, ex.label_ids, ) else: yield ( { "input_ids": ex.input_ids, "attention_mask": ex.attention_mask, "token_type_ids": ex.token_type_ids, }, ex.label_ids, ) if "token_type_ids" not in tokenizer.model_input_names: __UpperCamelCase = tf.data.Dataset.from_generator( __UpperCAmelCase , ({'input_ids': tf.intaa, 'attention_mask': tf.intaa}, tf.intaa) , ( {'input_ids': tf.TensorShape([None] ), 'attention_mask': tf.TensorShape([None] )}, tf.TensorShape([None] ), ) , ) else: __UpperCamelCase = tf.data.Dataset.from_generator( __UpperCAmelCase , ({'input_ids': tf.intaa, 'attention_mask': tf.intaa, 'token_type_ids': tf.intaa}, tf.intaa) , ( { 'input_ids': tf.TensorShape([None] ), 'attention_mask': tf.TensorShape([None] ), 'token_type_ids': tf.TensorShape([None] ), }, tf.TensorShape([None] ), ) , ) def UpperCAmelCase ( self ): '''simple docstring''' __UpperCamelCase = self.dataset.apply(tf.data.experimental.assert_cardinality(len(self.features ) ) ) return self.dataset def __len__( self ): '''simple docstring''' return len(self.features ) def __getitem__( self , __UpperCAmelCase ): '''simple docstring''' return self.features[i]
316
"""simple docstring""" import random import unittest import torch from diffusers import IFInpaintingSuperResolutionPipeline from diffusers.utils import floats_tensor from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import skip_mps, torch_device from ..pipeline_params import ( TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS, ) from ..test_pipelines_common import PipelineTesterMixin from . import IFPipelineTesterMixin @skip_mps class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ): lowercase = IFInpaintingSuperResolutionPipeline lowercase = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"width", "height"} lowercase = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({"original_image"} ) lowercase = PipelineTesterMixin.required_optional_params - {"latents"} def UpperCAmelCase ( self ): '''simple docstring''' return self._get_superresolution_dummy_components() def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase=0 ): '''simple docstring''' if str(__UpperCAmelCase ).startswith('mps' ): __UpperCamelCase = torch.manual_seed(__UpperCAmelCase ) else: __UpperCamelCase = torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase ) __UpperCamelCase = floats_tensor((1, 3, 16, 16) , rng=random.Random(__UpperCAmelCase ) ).to(__UpperCAmelCase ) __UpperCamelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(__UpperCAmelCase ) ).to(__UpperCAmelCase ) __UpperCamelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(__UpperCAmelCase ) ).to(__UpperCAmelCase ) __UpperCamelCase = { 'prompt': 'A painting of a squirrel eating a burger', 'image': image, 'original_image': original_image, 'mask_image': mask_image, 'generator': generator, 'num_inference_steps': 2, 'output_type': 'numpy', } return inputs @unittest.skipIf( torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , ) def UpperCAmelCase ( self ): '''simple docstring''' self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 ) def UpperCAmelCase ( self ): '''simple docstring''' self._test_save_load_optional_components() @unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA' ) def UpperCAmelCase ( self ): '''simple docstring''' super().test_save_load_floataa(expected_max_diff=1E-1 ) def UpperCAmelCase ( self ): '''simple docstring''' self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 ) def UpperCAmelCase ( self ): '''simple docstring''' self._test_save_load_local() def UpperCAmelCase ( self ): '''simple docstring''' self._test_inference_batch_single_identical( expected_max_diff=1E-2 , )
316
1
"""simple docstring""" from __future__ import annotations def A ( snake_case :str , snake_case :list[str] | None = None , snake_case :dict[str, float] | None = None , snake_case :bool = False , ) -> tuple[int, float, str]: __UpperCamelCase = cipher_alphabet or [chr(snake_case ) for i in range(9_7 , 1_2_3 )] # If the argument is None or the user provided an empty dictionary if not frequencies_dict: # Frequencies of letters in the english language (how much they show up) __UpperCamelCase = { 'a': 0.08_497, 'b': 0.01_492, 'c': 0.02_202, 'd': 0.04_253, 'e': 0.11_162, 'f': 0.02_228, 'g': 0.02_015, 'h': 0.06_094, 'i': 0.07_546, 'j': 0.00_153, 'k': 0.01_292, 'l': 0.04_025, 'm': 0.02_406, 'n': 0.06_749, 'o': 0.07_507, 'p': 0.01_929, 'q': 0.00_095, 'r': 0.07_587, 's': 0.06_327, 't': 0.09_356, 'u': 0.02_758, 'v': 0.00_978, 'w': 0.02_560, 'x': 0.00_150, 'y': 0.01_994, 'z': 0.00_077, } else: # Custom frequencies dictionary __UpperCamelCase = frequencies_dict if not case_sensitive: __UpperCamelCase = ciphertext.lower() # Chi squared statistic values __UpperCamelCase = {} # cycle through all of the shifts for shift in range(len(snake_case ) ): __UpperCamelCase = '' # decrypt the message with the shift for letter in ciphertext: try: # Try to index the letter in the alphabet __UpperCamelCase = (alphabet_letters.index(letter.lower() ) - shift) % len( snake_case ) decrypted_with_shift += ( alphabet_letters[new_key].upper() if case_sensitive and letter.isupper() else alphabet_letters[new_key] ) except ValueError: # Append the character if it isn't in the alphabet decrypted_with_shift += letter __UpperCamelCase = 0.0 # Loop through each letter in the decoded message with the shift for letter in decrypted_with_shift: if case_sensitive: __UpperCamelCase = letter.lower() if letter in frequencies: # Get the amount of times the letter occurs in the message __UpperCamelCase = decrypted_with_shift.lower().count(snake_case ) # Get the excepcted amount of times the letter should appear based # on letter frequencies __UpperCamelCase = frequencies[letter] * occurrences # Complete the chi squared statistic formula __UpperCamelCase = ((occurrences - expected) ** 2) / expected # Add the margin of error to the total chi squared statistic chi_squared_statistic += chi_letter_value else: if letter.lower() in frequencies: # Get the amount of times the letter occurs in the message __UpperCamelCase = decrypted_with_shift.count(snake_case ) # Get the excepcted amount of times the letter should appear based # on letter frequencies __UpperCamelCase = frequencies[letter] * occurrences # Complete the chi squared statistic formula __UpperCamelCase = ((occurrences - expected) ** 2) / expected # Add the margin of error to the total chi squared statistic chi_squared_statistic += chi_letter_value # Add the data to the chi_squared_statistic_values dictionary __UpperCamelCase = ( chi_squared_statistic, decrypted_with_shift, ) # Get the most likely cipher by finding the cipher with the smallest chi squared # statistic def chi_squared_statistic_values_sorting_key(snake_case :int ) -> tuple[float, str]: return chi_squared_statistic_values[key] __UpperCamelCase = min( snake_case , key=snake_case , ) # Get all the data from the most likely cipher (key, decoded message) ( ( __UpperCamelCase ) , ( __UpperCamelCase ) , ) = chi_squared_statistic_values[most_likely_cipher] # Return the data on the most likely shift return ( most_likely_cipher, most_likely_cipher_chi_squared_value, decoded_most_likely_cipher, )
316
"""simple docstring""" def A ( snake_case :int ) -> int: __UpperCamelCase = [1] __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = 0, 0, 0 __UpperCamelCase = ugly_nums[ia] * 2 __UpperCamelCase = ugly_nums[ia] * 3 __UpperCamelCase = ugly_nums[ia] * 5 for _ in range(1 , snake_case ): __UpperCamelCase = min(snake_case , snake_case , snake_case ) ugly_nums.append(snake_case ) if next_num == next_a: ia += 1 __UpperCamelCase = ugly_nums[ia] * 2 if next_num == next_a: ia += 1 __UpperCamelCase = ugly_nums[ia] * 3 if next_num == next_a: ia += 1 __UpperCamelCase = ugly_nums[ia] * 5 return ugly_nums[-1] if __name__ == "__main__": from doctest import testmod testmod(verbose=True) print(f'''{ugly_numbers(2_0_0) = }''')
316
1
"""simple docstring""" import math UpperCamelCase : Union[str, Any] = 1_0 UpperCamelCase : List[str] = 7 UpperCamelCase : Any = BALLS_PER_COLOUR * NUM_COLOURS def A ( snake_case :int = 2_0 ) -> str: __UpperCamelCase = math.comb(snake_case , snake_case ) __UpperCamelCase = math.comb(NUM_BALLS - BALLS_PER_COLOUR , snake_case ) __UpperCamelCase = NUM_COLOURS * (1 - missing_colour / total) return f'{result:.9f}' if __name__ == "__main__": print(solution(2_0))
316
"""simple docstring""" import warnings from typing import List import numpy as np from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding from ...utils import is_flax_available, is_tf_available, is_torch_available class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): lowercase = ["image_processor", "tokenizer"] lowercase = "OwlViTImageProcessor" lowercase = ("CLIPTokenizer", "CLIPTokenizerFast") def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase ): '''simple docstring''' __UpperCamelCase = None if "feature_extractor" in kwargs: warnings.warn( 'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`' ' instead.' , __UpperCAmelCase , ) __UpperCamelCase = kwargs.pop('feature_extractor' ) __UpperCamelCase = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('You need to specify an `image_processor`.' ) if tokenizer is None: raise ValueError('You need to specify a `tokenizer`.' ) super().__init__(__UpperCAmelCase , __UpperCAmelCase ) def __call__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase="max_length" , __UpperCAmelCase="np" , **__UpperCAmelCase ): '''simple docstring''' if text is None and query_images is None and images is None: raise ValueError( 'You have to specify at least one text or query image or image. All three cannot be none.' ) if text is not None: if isinstance(__UpperCAmelCase , __UpperCAmelCase ) or (isinstance(__UpperCAmelCase , __UpperCAmelCase ) and not isinstance(text[0] , __UpperCAmelCase )): __UpperCamelCase = [self.tokenizer(__UpperCAmelCase , padding=__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase )] elif isinstance(__UpperCAmelCase , __UpperCAmelCase ) and isinstance(text[0] , __UpperCAmelCase ): __UpperCamelCase = [] # Maximum number of queries across batch __UpperCamelCase = max([len(__UpperCAmelCase ) for t in text] ) # Pad all batch samples to max number of text queries for t in text: if len(__UpperCAmelCase ) != max_num_queries: __UpperCamelCase = t + [' '] * (max_num_queries - len(__UpperCAmelCase )) __UpperCamelCase = self.tokenizer(__UpperCAmelCase , padding=__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase ) encodings.append(__UpperCAmelCase ) else: raise TypeError('Input text should be a string, a list of strings or a nested list of strings' ) if return_tensors == "np": __UpperCamelCase = np.concatenate([encoding['input_ids'] for encoding in encodings] , axis=0 ) __UpperCamelCase = np.concatenate([encoding['attention_mask'] for encoding in encodings] , axis=0 ) elif return_tensors == "jax" and is_flax_available(): import jax.numpy as jnp __UpperCamelCase = jnp.concatenate([encoding['input_ids'] for encoding in encodings] , axis=0 ) __UpperCamelCase = jnp.concatenate([encoding['attention_mask'] for encoding in encodings] , axis=0 ) elif return_tensors == "pt" and is_torch_available(): import torch __UpperCamelCase = torch.cat([encoding['input_ids'] for encoding in encodings] , dim=0 ) __UpperCamelCase = torch.cat([encoding['attention_mask'] for encoding in encodings] , dim=0 ) elif return_tensors == "tf" and is_tf_available(): import tensorflow as tf __UpperCamelCase = tf.stack([encoding['input_ids'] for encoding in encodings] , axis=0 ) __UpperCamelCase = tf.stack([encoding['attention_mask'] for encoding in encodings] , axis=0 ) else: raise ValueError('Target return tensor type could not be returned' ) __UpperCamelCase = BatchEncoding() __UpperCamelCase = input_ids __UpperCamelCase = attention_mask if query_images is not None: __UpperCamelCase = BatchEncoding() __UpperCamelCase = self.image_processor( __UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase ).pixel_values __UpperCamelCase = query_pixel_values if images is not None: __UpperCamelCase = self.image_processor(__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase ) if text is not None and images is not None: __UpperCamelCase = image_features.pixel_values return encoding elif query_images is not None and images is not None: __UpperCamelCase = image_features.pixel_values return encoding elif text is not None or query_images is not None: return encoding else: return BatchEncoding(data=dict(**__UpperCAmelCase ) , tensor_type=__UpperCAmelCase ) def UpperCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase ): '''simple docstring''' return self.image_processor.post_process(*__UpperCAmelCase , **__UpperCAmelCase ) def UpperCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase ): '''simple docstring''' return self.image_processor.post_process_object_detection(*__UpperCAmelCase , **__UpperCAmelCase ) def UpperCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase ): '''simple docstring''' return self.image_processor.post_process_image_guided_detection(*__UpperCAmelCase , **__UpperCAmelCase ) def UpperCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase ): '''simple docstring''' return self.tokenizer.batch_decode(*__UpperCAmelCase , **__UpperCAmelCase ) def UpperCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase ): '''simple docstring''' return self.tokenizer.decode(*__UpperCAmelCase , **__UpperCAmelCase ) @property def UpperCAmelCase ( self ): '''simple docstring''' warnings.warn( '`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , __UpperCAmelCase , ) return self.image_processor_class @property def UpperCAmelCase ( self ): '''simple docstring''' warnings.warn( '`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , __UpperCAmelCase , ) return self.image_processor
316
1
"""simple docstring""" import math import sys def A ( snake_case :int ) -> int: if number != int(snake_case ): raise ValueError('the value of input must be a natural number' ) if number < 0: raise ValueError('the value of input must not be a negative number' ) if number == 0: return 1 __UpperCamelCase = [-1] * (number + 1) __UpperCamelCase = 0 for i in range(1 , number + 1 ): __UpperCamelCase = sys.maxsize __UpperCamelCase = int(math.sqrt(snake_case ) ) for j in range(1 , root + 1 ): __UpperCamelCase = 1 + answers[i - (j**2)] __UpperCamelCase = min(snake_case , snake_case ) __UpperCamelCase = answer return answers[number] if __name__ == "__main__": import doctest doctest.testmod()
316
"""simple docstring""" import tempfile import unittest import numpy as np import transformers from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow from ...generation.test_flax_utils import FlaxGenerationTesterMixin from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax import jax.numpy as jnp from transformers.modeling_flax_pytorch_utils import ( convert_pytorch_state_dict_to_flax, load_flax_weights_in_pytorch_model, ) from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel if is_torch_available(): import torch class __lowerCAmelCase : def __init__( self , __UpperCAmelCase , __UpperCAmelCase=14 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase=99 , __UpperCAmelCase=32 , __UpperCAmelCase=4 , __UpperCAmelCase=4 , __UpperCAmelCase=4 , __UpperCAmelCase=37 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=512 , __UpperCAmelCase=0.0_2 , ): '''simple docstring''' __UpperCamelCase = parent __UpperCamelCase = batch_size __UpperCamelCase = seq_length __UpperCamelCase = is_training __UpperCamelCase = use_input_mask __UpperCamelCase = use_token_type_ids __UpperCamelCase = use_labels __UpperCamelCase = vocab_size __UpperCamelCase = hidden_size __UpperCamelCase = rotary_dim __UpperCamelCase = num_hidden_layers __UpperCamelCase = num_attention_heads __UpperCamelCase = intermediate_size __UpperCamelCase = hidden_act __UpperCamelCase = hidden_dropout_prob __UpperCamelCase = attention_probs_dropout_prob __UpperCamelCase = max_position_embeddings __UpperCamelCase = initializer_range __UpperCamelCase = None __UpperCamelCase = vocab_size - 1 __UpperCamelCase = vocab_size - 1 __UpperCamelCase = vocab_size - 1 def UpperCAmelCase ( self ): '''simple docstring''' __UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __UpperCamelCase = None if self.use_input_mask: __UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] ) __UpperCamelCase = GPTJConfig( vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , use_cache=__UpperCAmelCase , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , rotary_dim=self.rotary_dim , ) return (config, input_ids, input_mask) def UpperCAmelCase ( self ): '''simple docstring''' __UpperCamelCase = self.prepare_config_and_inputs() __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = config_and_inputs __UpperCamelCase = {'input_ids': input_ids, 'attention_mask': attention_mask} return config, inputs_dict def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' __UpperCamelCase = 20 __UpperCamelCase = model_class_name(__UpperCAmelCase ) __UpperCamelCase = model.init_cache(input_ids.shape[0] , __UpperCAmelCase ) __UpperCamelCase = jnp.ones((input_ids.shape[0], max_decoder_length) , dtype='i4' ) __UpperCamelCase = jnp.broadcast_to( jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) ) __UpperCamelCase = model( input_ids[:, :-1] , attention_mask=__UpperCAmelCase , past_key_values=__UpperCAmelCase , position_ids=__UpperCAmelCase , ) __UpperCamelCase = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype='i4' ) __UpperCamelCase = model( input_ids[:, -1:] , attention_mask=__UpperCAmelCase , past_key_values=outputs_cache.past_key_values , position_ids=__UpperCAmelCase , ) __UpperCamelCase = model(__UpperCAmelCase ) __UpperCamelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1E-3 , msg=F'Max diff is {diff}' ) def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' __UpperCamelCase = 20 __UpperCamelCase = model_class_name(__UpperCAmelCase ) __UpperCamelCase = jnp.concatenate( [attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]) )] , axis=-1 , ) __UpperCamelCase = model.init_cache(input_ids.shape[0] , __UpperCAmelCase ) __UpperCamelCase = jnp.broadcast_to( jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) ) __UpperCamelCase = model( input_ids[:, :-1] , attention_mask=__UpperCAmelCase , past_key_values=__UpperCAmelCase , position_ids=__UpperCAmelCase , ) __UpperCamelCase = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype='i4' ) __UpperCamelCase = model( input_ids[:, -1:] , past_key_values=outputs_cache.past_key_values , attention_mask=__UpperCAmelCase , position_ids=__UpperCAmelCase , ) __UpperCamelCase = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase ) __UpperCamelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1E-3 , msg=F'Max diff is {diff}' ) @require_flax class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ): lowercase = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else () lowercase = (FlaxGPTJForCausalLM,) if is_flax_available() else () def UpperCAmelCase ( self ): '''simple docstring''' __UpperCamelCase = FlaxGPTJModelTester(self ) def UpperCAmelCase ( self ): '''simple docstring''' for model_class_name in self.all_model_classes: __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.check_use_cache_forward(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) def UpperCAmelCase ( self ): '''simple docstring''' for model_class_name in self.all_model_classes: __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.check_use_cache_forward_with_attn_mask( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) @tooslow def UpperCAmelCase ( self ): '''simple docstring''' __UpperCamelCase = GPTaTokenizer.from_pretrained('gpt2' , pad_token='<|endoftext|>' , padding_side='left' ) __UpperCamelCase = tokenizer(['Hello this is a long string', 'Hey'] , return_tensors='np' , padding=__UpperCAmelCase , truncation=__UpperCAmelCase ) __UpperCamelCase = FlaxGPTJForCausalLM.from_pretrained('EleutherAI/gpt-j-6B' ) __UpperCamelCase = False __UpperCamelCase = model.config.eos_token_id __UpperCamelCase = jax.jit(model.generate ) __UpperCamelCase = jit_generate( inputs['input_ids'] , attention_mask=inputs['attention_mask'] , pad_token_id=tokenizer.pad_token_id ).sequences __UpperCamelCase = tokenizer.batch_decode(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase ) __UpperCamelCase = [ 'Hello this is a long string of text.\n\nI\'m trying to get the text of the', 'Hey, I\'m a little late to the party. I\'m going to', ] self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase ) @is_pt_flax_cross_test def UpperCAmelCase ( self ): '''simple docstring''' __UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): # prepare inputs __UpperCamelCase = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) __UpperCamelCase = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()} # load corresponding PyTorch class __UpperCamelCase = model_class.__name__[4:] # Skip the "Flax" at the beginning __UpperCamelCase = getattr(__UpperCAmelCase , __UpperCAmelCase ) __UpperCamelCase , __UpperCamelCase = pt_inputs['input_ids'].shape __UpperCamelCase = np.random.randint(0 , seq_length - 1 , size=(batch_size,) ) for batch_idx, start_index in enumerate(__UpperCAmelCase ): __UpperCamelCase = 0 __UpperCamelCase = 1 __UpperCamelCase = 0 __UpperCamelCase = 1 __UpperCamelCase = pt_model_class(__UpperCAmelCase ).eval() __UpperCamelCase = model_class(__UpperCAmelCase , dtype=jnp.floataa ) __UpperCamelCase = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , __UpperCAmelCase ) __UpperCamelCase = fx_state with torch.no_grad(): __UpperCamelCase = pt_model(**__UpperCAmelCase ).to_tuple() __UpperCamelCase = fx_model(**__UpperCAmelCase ).to_tuple() self.assertEqual(len(__UpperCAmelCase ) , len(__UpperCAmelCase ) , 'Output lengths differ between Flax and PyTorch' ) for fx_output, pt_output in zip(__UpperCAmelCase , __UpperCAmelCase ): self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 ) with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(__UpperCAmelCase ) __UpperCamelCase = model_class.from_pretrained(__UpperCAmelCase , from_pt=__UpperCAmelCase ) __UpperCamelCase = fx_model_loaded(**__UpperCAmelCase ).to_tuple() self.assertEqual( len(__UpperCAmelCase ) , len(__UpperCAmelCase ) , 'Output lengths differ between Flax and PyTorch' ) for fx_output_loaded, pt_output in zip(__UpperCAmelCase , __UpperCAmelCase ): self.assert_almost_equals(fx_output_loaded[:, -1] , pt_output[:, -1].numpy() , 4E-2 ) @is_pt_flax_cross_test def UpperCAmelCase ( self ): '''simple docstring''' __UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): # prepare inputs __UpperCamelCase = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) __UpperCamelCase = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()} # load corresponding PyTorch class __UpperCamelCase = model_class.__name__[4:] # Skip the "Flax" at the beginning __UpperCamelCase = getattr(__UpperCAmelCase , __UpperCAmelCase ) __UpperCamelCase = pt_model_class(__UpperCAmelCase ).eval() __UpperCamelCase = model_class(__UpperCAmelCase , dtype=jnp.floataa ) __UpperCamelCase = load_flax_weights_in_pytorch_model(__UpperCAmelCase , fx_model.params ) __UpperCamelCase , __UpperCamelCase = pt_inputs['input_ids'].shape __UpperCamelCase = np.random.randint(0 , seq_length - 1 , size=(batch_size,) ) for batch_idx, start_index in enumerate(__UpperCAmelCase ): __UpperCamelCase = 0 __UpperCamelCase = 1 __UpperCamelCase = 0 __UpperCamelCase = 1 # make sure weights are tied in PyTorch pt_model.tie_weights() with torch.no_grad(): __UpperCamelCase = pt_model(**__UpperCAmelCase ).to_tuple() __UpperCamelCase = fx_model(**__UpperCAmelCase ).to_tuple() self.assertEqual(len(__UpperCAmelCase ) , len(__UpperCAmelCase ) , 'Output lengths differ between Flax and PyTorch' ) for fx_output, pt_output in zip(__UpperCAmelCase , __UpperCAmelCase ): self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 ) with tempfile.TemporaryDirectory() as tmpdirname: fx_model.save_pretrained(__UpperCAmelCase ) __UpperCamelCase = pt_model_class.from_pretrained(__UpperCAmelCase , from_flax=__UpperCAmelCase ) with torch.no_grad(): __UpperCamelCase = pt_model_loaded(**__UpperCAmelCase ).to_tuple() self.assertEqual( len(__UpperCAmelCase ) , len(__UpperCAmelCase ) , 'Output lengths differ between Flax and PyTorch' ) for fx_output, pt_output in zip(__UpperCAmelCase , __UpperCAmelCase ): self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 ) @tooslow def UpperCAmelCase ( self ): '''simple docstring''' for model_class_name in self.all_model_classes: __UpperCamelCase = model_class_name.from_pretrained('EleutherAI/gpt-j-6B' ) __UpperCamelCase = model(np.ones((1, 1) ) ) self.assertIsNotNone(__UpperCAmelCase )
316
1
"""simple docstring""" import tempfile import torch from diffusers import IPNDMScheduler from .test_schedulers import SchedulerCommonTest class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): lowercase = (IPNDMScheduler,) lowercase = (("num_inference_steps", 50),) def UpperCAmelCase ( self , **__UpperCAmelCase ): '''simple docstring''' __UpperCamelCase = {'num_train_timesteps': 1000} config.update(**__UpperCAmelCase ) return config def UpperCAmelCase ( self , __UpperCAmelCase=0 , **__UpperCAmelCase ): '''simple docstring''' __UpperCamelCase = dict(self.forward_default_kwargs ) __UpperCamelCase = kwargs.pop('num_inference_steps' , __UpperCAmelCase ) __UpperCamelCase = self.dummy_sample __UpperCamelCase = 0.1 * sample __UpperCamelCase = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5] for scheduler_class in self.scheduler_classes: __UpperCamelCase = self.get_scheduler_config(**__UpperCAmelCase ) __UpperCamelCase = scheduler_class(**__UpperCAmelCase ) scheduler.set_timesteps(__UpperCAmelCase ) # copy over dummy past residuals __UpperCamelCase = dummy_past_residuals[:] if time_step is None: __UpperCamelCase = scheduler.timesteps[len(scheduler.timesteps ) // 2] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(__UpperCAmelCase ) __UpperCamelCase = scheduler_class.from_pretrained(__UpperCAmelCase ) new_scheduler.set_timesteps(__UpperCAmelCase ) # copy over dummy past residuals __UpperCamelCase = dummy_past_residuals[:] __UpperCamelCase = scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase ).prev_sample __UpperCamelCase = new_scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" __UpperCamelCase = scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase ).prev_sample __UpperCamelCase = new_scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def UpperCAmelCase ( self ): '''simple docstring''' pass def UpperCAmelCase ( self , __UpperCAmelCase=0 , **__UpperCAmelCase ): '''simple docstring''' __UpperCamelCase = dict(self.forward_default_kwargs ) __UpperCamelCase = kwargs.pop('num_inference_steps' , __UpperCAmelCase ) __UpperCamelCase = self.dummy_sample __UpperCamelCase = 0.1 * sample __UpperCamelCase = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5] for scheduler_class in self.scheduler_classes: __UpperCamelCase = self.get_scheduler_config() __UpperCamelCase = scheduler_class(**__UpperCAmelCase ) scheduler.set_timesteps(__UpperCAmelCase ) # copy over dummy past residuals (must be after setting timesteps) __UpperCamelCase = dummy_past_residuals[:] if time_step is None: __UpperCamelCase = scheduler.timesteps[len(scheduler.timesteps ) // 2] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(__UpperCAmelCase ) __UpperCamelCase = scheduler_class.from_pretrained(__UpperCAmelCase ) # copy over dummy past residuals new_scheduler.set_timesteps(__UpperCAmelCase ) # copy over dummy past residual (must be after setting timesteps) __UpperCamelCase = dummy_past_residuals[:] __UpperCamelCase = scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase ).prev_sample __UpperCamelCase = new_scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" __UpperCamelCase = scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase ).prev_sample __UpperCamelCase = new_scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def UpperCAmelCase ( self , **__UpperCAmelCase ): '''simple docstring''' __UpperCamelCase = self.scheduler_classes[0] __UpperCamelCase = self.get_scheduler_config(**__UpperCAmelCase ) __UpperCamelCase = scheduler_class(**__UpperCAmelCase ) __UpperCamelCase = 10 __UpperCamelCase = self.dummy_model() __UpperCamelCase = self.dummy_sample_deter scheduler.set_timesteps(__UpperCAmelCase ) for i, t in enumerate(scheduler.timesteps ): __UpperCamelCase = model(__UpperCAmelCase , __UpperCAmelCase ) __UpperCamelCase = scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ).prev_sample for i, t in enumerate(scheduler.timesteps ): __UpperCamelCase = model(__UpperCAmelCase , __UpperCAmelCase ) __UpperCamelCase = scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ).prev_sample return sample def UpperCAmelCase ( self ): '''simple docstring''' __UpperCamelCase = dict(self.forward_default_kwargs ) __UpperCamelCase = kwargs.pop('num_inference_steps' , __UpperCAmelCase ) for scheduler_class in self.scheduler_classes: __UpperCamelCase = self.get_scheduler_config() __UpperCamelCase = scheduler_class(**__UpperCAmelCase ) __UpperCamelCase = self.dummy_sample __UpperCamelCase = 0.1 * sample if num_inference_steps is not None and hasattr(__UpperCAmelCase , 'set_timesteps' ): scheduler.set_timesteps(__UpperCAmelCase ) elif num_inference_steps is not None and not hasattr(__UpperCAmelCase , 'set_timesteps' ): __UpperCamelCase = num_inference_steps # copy over dummy past residuals (must be done after set_timesteps) __UpperCamelCase = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5] __UpperCamelCase = dummy_past_residuals[:] __UpperCamelCase = scheduler.timesteps[5] __UpperCamelCase = scheduler.timesteps[6] __UpperCamelCase = scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase ).prev_sample __UpperCamelCase = scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) __UpperCamelCase = scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase ).prev_sample __UpperCamelCase = scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) def UpperCAmelCase ( self ): '''simple docstring''' for timesteps in [100, 1000]: self.check_over_configs(num_train_timesteps=__UpperCAmelCase , time_step=__UpperCAmelCase ) def UpperCAmelCase ( self ): '''simple docstring''' for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100] ): self.check_over_forward(num_inference_steps=__UpperCAmelCase , time_step=__UpperCAmelCase ) def UpperCAmelCase ( self ): '''simple docstring''' __UpperCamelCase = self.full_loop() __UpperCamelCase = torch.mean(torch.abs(__UpperCAmelCase ) ) assert abs(result_mean.item() - 254_0529 ) < 10
316
"""simple docstring""" def A ( snake_case :list[int] , snake_case :list[int] ) -> None: __UpperCamelCase = len(snake_case ) print('The following activities are selected:' ) # The first activity is always selected __UpperCamelCase = 0 print(snake_case , end=',' ) # Consider rest of the activities for j in range(snake_case ): # If this activity has start time greater than # or equal to the finish time of previously # selected activity, then select it if start[j] >= finish[i]: print(snake_case , end=',' ) __UpperCamelCase = j if __name__ == "__main__": import doctest doctest.testmod() UpperCamelCase : int = [1, 3, 0, 5, 8, 5] UpperCamelCase : str = [2, 4, 6, 7, 9, 9] print_max_activities(start, finish)
316
1
"""simple docstring""" from typing import Dict, List from nltk.translate import gleu_score import datasets from datasets import MetricInfo UpperCamelCase : int = "\\n@misc{wu2016googles,\n title={Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},\n author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey\n and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin\n Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto\n Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and\n Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes\n and Jeffrey Dean},\n year={2016},\n eprint={1609.08144},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n" UpperCamelCase : str = "\\nThe BLEU score has some undesirable properties when used for single\nsentences, as it was designed to be a corpus measure. We therefore\nuse a slightly different score for our RL experiments which we call\nthe 'GLEU score'. For the GLEU score, we record all sub-sequences of\n1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then\ncompute a recall, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the target (ground truth) sequence,\nand a precision, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the generated output sequence. Then\nGLEU score is simply the minimum of recall and precision. This GLEU\nscore's range is always between 0 (no matches) and 1 (all match) and\nit is symmetrical when switching output and target. According to\nour experiments, GLEU score correlates quite well with the BLEU\nmetric on a corpus level but does not have its drawbacks for our per\nsentence reward objective.\n" UpperCamelCase : List[Any] = "\\nComputes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.\nInstead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching\ntokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.\n\nArgs:\n predictions (list of str): list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references (list of list of str): list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.\n max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.\n\nReturns:\n 'google_bleu': google_bleu score\n\nExamples:\n Example 1:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.44\n\n Example 2:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.61\n\n Example 3:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.53\n\n Example 4:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.4\n" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __lowerCAmelCase ( datasets.Metric ): def UpperCAmelCase ( self ): '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { 'predictions': datasets.Sequence(datasets.Value('string' , id='token' ) , id='sequence' ), 'references': datasets.Sequence( datasets.Sequence(datasets.Value('string' , id='token' ) , id='sequence' ) , id='references' ), } ) , ) def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = 1 , __UpperCAmelCase = 4 , ): '''simple docstring''' return { "google_bleu": gleu_score.corpus_gleu( list_of_references=__UpperCAmelCase , hypotheses=__UpperCAmelCase , min_len=__UpperCAmelCase , max_len=__UpperCAmelCase ) }
316
"""simple docstring""" def A ( snake_case :int ) -> list: # bit count represents no. of bits in the gray code if bit_count < 0: raise ValueError('The given input must be positive' ) # get the generated string sequence __UpperCamelCase = gray_code_sequence_string(snake_case ) # # convert them to integers for i in range(len(snake_case ) ): __UpperCamelCase = int(sequence[i] , 2 ) return sequence def A ( snake_case :int ) -> list: # The approach is a recursive one # Base case achieved when either n = 0 or n=1 if bit_count == 0: return ["0"] if bit_count == 1: return ["0", "1"] __UpperCamelCase = 1 << bit_count # defines the length of the sequence # 1<< n is equivalent to 2^n # recursive answer will generate answer for n-1 bits __UpperCamelCase = gray_code_sequence_string(bit_count - 1 ) __UpperCamelCase = [] # append 0 to first half of the smaller sequence generated for i in range(seq_len // 2 ): __UpperCamelCase = '0' + smaller_sequence[i] sequence.append(snake_case ) # append 1 to second half ... start from the end of the list for i in reversed(range(seq_len // 2 ) ): __UpperCamelCase = '1' + smaller_sequence[i] sequence.append(snake_case ) return sequence if __name__ == "__main__": import doctest doctest.testmod()
316
1
"""simple docstring""" import argparse import json import os import tensorstore as ts import torch from flax import serialization from flax.traverse_util import flatten_dict, unflatten_dict from tensorflow.io import gfile from transformers.modeling_utils import dtype_byte_size from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import ( rename_keys, ) from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME from transformers.utils.hub import convert_file_size_to_int def A ( snake_case :Optional[int] , snake_case :int ) -> int: if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3: # expert layer __UpperCamelCase = flax_key_tuple[:-1] + ('weight',) __UpperCamelCase = torch.permute(snake_case , (0, 2, 1) ) elif flax_key_tuple[-1] == "kernel" and ".".join(snake_case ): # linear layer __UpperCamelCase = flax_key_tuple[:-1] + ('weight',) __UpperCamelCase = flax_tensor.T elif flax_key_tuple[-1] in ["scale", "embedding"]: __UpperCamelCase = flax_key_tuple[:-1] + ('weight',) return flax_key_tuple, flax_tensor def A ( snake_case :List[str] , snake_case :List[str] , snake_case :Union[str, Any] ) -> Tuple: if "metadata" in layer: __UpperCamelCase = layer.split('metadata' ) __UpperCamelCase = ''.join(split_layer[0] )[:-1] __UpperCamelCase = [tuple(('metadata' + split_layer[1]).split('/' ) )] elif "kvstore" in layer: __UpperCamelCase = layer.split('kvstore' ) __UpperCamelCase = ''.join(split_layer[0] )[:-1] __UpperCamelCase = [tuple(('kvstore' + split_layer[1]).split('/' ) )] else: __UpperCamelCase = layer.split('/' ) __UpperCamelCase = '/'.join(split_layer[:-1] ) __UpperCamelCase = (split_layer[-1],) if "kvstore/path" in layer: __UpperCamelCase = f'{switch_checkpoint_path}/{checkpoint_info[layer]}' elif "kvstore/driver" in layer: __UpperCamelCase = 'file' else: __UpperCamelCase = checkpoint_info[layer] return curr_real_layer_name, split_layer, content def A ( snake_case :List[str] , snake_case :str ) -> List[str]: __UpperCamelCase = rename_keys(snake_case ) __UpperCamelCase = {} for k, v in current_block.items(): __UpperCamelCase = v __UpperCamelCase = new_current_block torch.save(snake_case , snake_case ) def A ( snake_case :Optional[int] , snake_case :Tuple , snake_case :Optional[Any] , snake_case :Dict , snake_case :str = WEIGHTS_NAME ) -> str: __UpperCamelCase = convert_file_size_to_int(snake_case ) __UpperCamelCase = [] __UpperCamelCase = {} __UpperCamelCase = 0 __UpperCamelCase = 0 os.makedirs(snake_case , exist_ok=snake_case ) with gfile.GFile(switch_checkpoint_path + '/checkpoint' , 'rb' ) as fp: __UpperCamelCase = serialization.msgpack_restore(fp.read() )['optimizer']['target'] __UpperCamelCase = flatten_dict(snake_case , sep='/' ) __UpperCamelCase = {} for layer in checkpoint_info.keys(): __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = get_key_and_tensorstore_dict( snake_case , snake_case , snake_case ) if curr_real_layer_name in all_layers: __UpperCamelCase = content else: __UpperCamelCase = {split_layer[-1]: content} for key in all_layers.keys(): # open tensorstore file __UpperCamelCase = ts.open(unflatten_dict(all_layers[key] ) ).result().read().result() __UpperCamelCase = torch.tensor(snake_case ) __UpperCamelCase = raw_weights.numel() * dtype_byte_size(raw_weights.dtype ) # use the renaming pattern from the small conversion scripts __UpperCamelCase , __UpperCamelCase = rename_base_flax_keys(tuple(key.split('/' ) ) , snake_case ) __UpperCamelCase = '/'.join(snake_case ) # If this weight is going to tip up over the maximal size, we split. if current_block_size + weight_size > max_shard_size: __UpperCamelCase = os.path.join( snake_case , weights_name.replace('.bin' , f'-{len(snake_case )+1:05d}-of-???.bin' ) ) rename_and_save_block(snake_case , snake_case ) sharded_state_dicts.append(current_block.keys() ) del current_block __UpperCamelCase = {} __UpperCamelCase = 0 __UpperCamelCase = raw_weights.to(getattr(snake_case , snake_case ) ) current_block_size += weight_size total_size += weight_size # Add the last block __UpperCamelCase = os.path.join(snake_case , weights_name.replace('.bin' , f'-{len(snake_case )+1:05d}-of-???.bin' ) ) rename_and_save_block(snake_case , snake_case ) sharded_state_dicts.append(current_block.keys() ) # If we only have one shard, we return it if len(snake_case ) == 1: return {weights_name: sharded_state_dicts[0]}, None # Otherwise, let's build the index __UpperCamelCase = {} __UpperCamelCase = {} for idx, shard in enumerate(snake_case ): __UpperCamelCase = weights_name.replace( '.bin' , f'-{idx+1:05d}-of-{len(snake_case ):05d}.bin' ) # len(sharded_state_dicts):05d} __UpperCamelCase = os.path.join(snake_case , weights_name.replace('.bin' , f'-{idx+1:05d}-of-???.bin' ) ) os.rename(snake_case , os.path.join(snake_case , snake_case ) ) __UpperCamelCase = shard for key in shard: __UpperCamelCase = shard_file # Add the metadata __UpperCamelCase = {'total_size': total_size} __UpperCamelCase = {'metadata': metadata, 'weight_map': weight_map} with open(os.path.join(snake_case , snake_case ) , 'w' , encoding='utf-8' ) as f: __UpperCamelCase = json.dumps(snake_case , indent=2 , sort_keys=snake_case ) + '\n' f.write(snake_case ) return metadata, index if __name__ == "__main__": UpperCamelCase : Any = argparse.ArgumentParser() # Required parameters parser.add_argument( "--switch_t5x_checkpoint_path", default="/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600", type=str, required=False, help="Path to a directory containing a folder per layer. Follows the original Google format.", ) parser.add_argument("--max_shard_size", default="10GB", required=False, help="Max shard size") parser.add_argument("--dtype", default="bfloat16", type=str, required=False, help="dtype of the saved model") parser.add_argument( "--pytorch_dump_folder_path", default="/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted", type=str, required=False, help="Path to the output pytorch model.", ) UpperCamelCase : List[Any] = parser.parse_args() shard_on_the_fly( args.switch_tax_checkpoint_path, args.pytorch_dump_folder_path, args.max_shard_size, args.dtype, ) def A ( ) -> List[str]: from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer __UpperCamelCase = SwitchTransformersConfig.from_pretrained('google/switch-base-8' ) config.save_pretrained('/home/arthur_huggingface_co/transformers/switch_converted' ) __UpperCamelCase = SwitchTransformersForConditionalGeneration.from_pretrained( '/home/arthur_huggingface_co/transformers/switch_converted' , device_map='auto' ) __UpperCamelCase = TaTokenizer.from_pretrained('t5-small' ) __UpperCamelCase = 'A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.' __UpperCamelCase = tokenizer(snake_case , return_tensors='pt' ).input_ids __UpperCamelCase = model.generate(snake_case , decoder_start_token_id=0 ) print(tokenizer.decode(out[0] ) )
316
"""simple docstring""" import inspect import unittest from datasets import load_dataset from packaging import version from transformers import BeitConfig from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( MODEL_MAPPING, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation, BeitModel, ) from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): import PIL from PIL import Image from transformers import BeitImageProcessor class __lowerCAmelCase : def __init__( self , __UpperCAmelCase , __UpperCAmelCase=100 , __UpperCAmelCase=13 , __UpperCAmelCase=30 , __UpperCAmelCase=2 , __UpperCAmelCase=3 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=32 , __UpperCAmelCase=4 , __UpperCAmelCase=4 , __UpperCAmelCase=37 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=10 , __UpperCAmelCase=0.0_2 , __UpperCAmelCase=3 , __UpperCAmelCase=None , __UpperCAmelCase=[0, 1, 2, 3] , ): '''simple docstring''' __UpperCamelCase = parent __UpperCamelCase = 100 __UpperCamelCase = batch_size __UpperCamelCase = image_size __UpperCamelCase = patch_size __UpperCamelCase = num_channels __UpperCamelCase = is_training __UpperCamelCase = use_labels __UpperCamelCase = hidden_size __UpperCamelCase = num_hidden_layers __UpperCamelCase = num_attention_heads __UpperCamelCase = intermediate_size __UpperCamelCase = hidden_act __UpperCamelCase = hidden_dropout_prob __UpperCamelCase = attention_probs_dropout_prob __UpperCamelCase = type_sequence_label_size __UpperCamelCase = initializer_range __UpperCamelCase = scope __UpperCamelCase = out_indices __UpperCamelCase = num_labels # in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) __UpperCamelCase = (image_size // patch_size) ** 2 __UpperCamelCase = num_patches + 1 def UpperCAmelCase ( self ): '''simple docstring''' __UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __UpperCamelCase = None __UpperCamelCase = None if self.use_labels: __UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __UpperCamelCase = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels ) __UpperCamelCase = self.get_config() return config, pixel_values, labels, pixel_labels def UpperCAmelCase ( self ): '''simple docstring''' return BeitConfig( vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__UpperCAmelCase , initializer_range=self.initializer_range , out_indices=self.out_indices , ) def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' __UpperCamelCase = BeitModel(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __UpperCamelCase = model(__UpperCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' __UpperCamelCase = BeitForMaskedImageModeling(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __UpperCamelCase = model(__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) ) def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' __UpperCamelCase = self.type_sequence_label_size __UpperCamelCase = BeitForImageClassification(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __UpperCamelCase = model(__UpperCAmelCase , labels=__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images __UpperCamelCase = 1 __UpperCamelCase = BeitForImageClassification(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __UpperCamelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) __UpperCamelCase = model(__UpperCAmelCase , labels=__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' __UpperCamelCase = self.num_labels __UpperCamelCase = BeitForSemanticSegmentation(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __UpperCamelCase = model(__UpperCAmelCase ) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) ) __UpperCamelCase = model(__UpperCAmelCase , labels=__UpperCAmelCase ) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) ) def UpperCAmelCase ( self ): '''simple docstring''' __UpperCamelCase = self.prepare_config_and_inputs() __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = config_and_inputs __UpperCamelCase = {'pixel_values': pixel_values} return config, inputs_dict @require_torch class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ): lowercase = ( (BeitModel, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation) if is_torch_available() else () ) lowercase = ( { "feature-extraction": BeitModel, "image-classification": BeitForImageClassification, "image-segmentation": BeitForSemanticSegmentation, } if is_torch_available() else {} ) lowercase = False lowercase = False lowercase = False def UpperCAmelCase ( self ): '''simple docstring''' __UpperCamelCase = BeitModelTester(self ) __UpperCamelCase = ConfigTester(self , config_class=__UpperCAmelCase , has_text_modality=__UpperCAmelCase , hidden_size=37 ) def UpperCAmelCase ( self ): '''simple docstring''' self.config_tester.run_common_tests() @unittest.skip(reason='BEiT does not use inputs_embeds' ) def UpperCAmelCase ( self ): '''simple docstring''' pass @require_torch_multi_gpu @unittest.skip(reason='BEiT has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`' ) def UpperCAmelCase ( self ): '''simple docstring''' pass def UpperCAmelCase ( self ): '''simple docstring''' __UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __UpperCamelCase = model_class(__UpperCAmelCase ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) __UpperCamelCase = model.get_output_embeddings() self.assertTrue(x is None or isinstance(__UpperCAmelCase , nn.Linear ) ) def UpperCAmelCase ( self ): '''simple docstring''' __UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __UpperCamelCase = model_class(__UpperCAmelCase ) __UpperCamelCase = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __UpperCamelCase = [*signature.parameters.keys()] __UpperCamelCase = ['pixel_values'] self.assertListEqual(arg_names[:1] , __UpperCAmelCase ) def UpperCAmelCase ( self ): '''simple docstring''' __UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__UpperCAmelCase ) def UpperCAmelCase ( self ): '''simple docstring''' __UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*__UpperCAmelCase ) def UpperCAmelCase ( self ): '''simple docstring''' __UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__UpperCAmelCase ) def UpperCAmelCase ( self ): '''simple docstring''' __UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*__UpperCAmelCase ) def UpperCAmelCase ( self ): '''simple docstring''' if not self.model_tester.is_training: return __UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() __UpperCamelCase = True for model_class in self.all_model_classes: # we don't test BeitForMaskedImageModeling if model_class in [*get_values(__UpperCAmelCase ), BeitForMaskedImageModeling]: continue __UpperCamelCase = model_class(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.train() __UpperCamelCase = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase , return_labels=__UpperCAmelCase ) __UpperCamelCase = model(**__UpperCAmelCase ).loss loss.backward() def UpperCAmelCase ( self ): '''simple docstring''' __UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() if not self.model_tester.is_training: return __UpperCamelCase = False __UpperCamelCase = True for model_class in self.all_model_classes: # we don't test BeitForMaskedImageModeling if ( model_class in [*get_values(__UpperCAmelCase ), BeitForMaskedImageModeling] or not model_class.supports_gradient_checkpointing ): continue __UpperCamelCase = model_class(__UpperCAmelCase ) model.gradient_checkpointing_enable() model.to(__UpperCAmelCase ) model.train() __UpperCamelCase = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase , return_labels=__UpperCAmelCase ) __UpperCamelCase = model(**__UpperCAmelCase ).loss loss.backward() def UpperCAmelCase ( self ): '''simple docstring''' __UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() __UpperCamelCase = _config_zero_init(__UpperCAmelCase ) for model_class in self.all_model_classes: __UpperCamelCase = model_class(config=__UpperCAmelCase ) for name, param in model.named_parameters(): # we skip lambda parameters as these require special initial values # determined by config.layer_scale_init_value if "lambda" in name: continue if param.requires_grad: self.assertIn( ((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=F'Parameter {name} of model {model_class} seems not properly initialized' , ) @slow def UpperCAmelCase ( self ): '''simple docstring''' for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __UpperCamelCase = BeitModel.from_pretrained(__UpperCAmelCase ) self.assertIsNotNone(__UpperCAmelCase ) def A ( ) -> int: __UpperCamelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_torch @require_vision class __lowerCAmelCase ( unittest.TestCase ): @cached_property def UpperCAmelCase ( self ): '''simple docstring''' return BeitImageProcessor.from_pretrained('microsoft/beit-base-patch16-224' ) if is_vision_available() else None @slow def UpperCAmelCase ( self ): '''simple docstring''' __UpperCamelCase = BeitForMaskedImageModeling.from_pretrained('microsoft/beit-base-patch16-224-pt22k' ).to(__UpperCAmelCase ) __UpperCamelCase = self.default_image_processor __UpperCamelCase = prepare_img() __UpperCamelCase = image_processor(images=__UpperCAmelCase , return_tensors='pt' ).pixel_values.to(__UpperCAmelCase ) # prepare bool_masked_pos __UpperCamelCase = torch.ones((1, 196) , dtype=torch.bool ).to(__UpperCAmelCase ) # forward pass with torch.no_grad(): __UpperCamelCase = model(pixel_values=__UpperCAmelCase , bool_masked_pos=__UpperCAmelCase ) __UpperCamelCase = outputs.logits # verify the logits __UpperCamelCase = torch.Size((1, 196, 8192) ) self.assertEqual(logits.shape , __UpperCAmelCase ) __UpperCamelCase = torch.tensor( [[-3.2_4_3_7, 0.5_0_7_2, -1_3.9_1_7_4], [-3.2_4_5_6, 0.4_9_4_8, -1_3.9_4_0_1], [-3.2_0_3_3, 0.5_1_2_1, -1_3.8_5_5_0]] ).to(__UpperCAmelCase ) self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3] , __UpperCAmelCase , atol=1E-2 ) ) @slow def UpperCAmelCase ( self ): '''simple docstring''' __UpperCamelCase = BeitForImageClassification.from_pretrained('microsoft/beit-base-patch16-224' ).to(__UpperCAmelCase ) __UpperCamelCase = self.default_image_processor __UpperCamelCase = prepare_img() __UpperCamelCase = image_processor(images=__UpperCAmelCase , return_tensors='pt' ).to(__UpperCAmelCase ) # forward pass with torch.no_grad(): __UpperCamelCase = model(**__UpperCAmelCase ) __UpperCamelCase = outputs.logits # verify the logits __UpperCamelCase = torch.Size((1, 1000) ) self.assertEqual(logits.shape , __UpperCAmelCase ) __UpperCamelCase = torch.tensor([-1.2_3_8_5, -1.0_9_8_7, -1.0_1_0_8] ).to(__UpperCAmelCase ) self.assertTrue(torch.allclose(logits[0, :3] , __UpperCAmelCase , atol=1E-4 ) ) __UpperCamelCase = 281 self.assertEqual(logits.argmax(-1 ).item() , __UpperCAmelCase ) @slow def UpperCAmelCase ( self ): '''simple docstring''' __UpperCamelCase = BeitForImageClassification.from_pretrained('microsoft/beit-large-patch16-224-pt22k-ft22k' ).to( __UpperCAmelCase ) __UpperCamelCase = self.default_image_processor __UpperCamelCase = prepare_img() __UpperCamelCase = image_processor(images=__UpperCAmelCase , return_tensors='pt' ).to(__UpperCAmelCase ) # forward pass with torch.no_grad(): __UpperCamelCase = model(**__UpperCAmelCase ) __UpperCamelCase = outputs.logits # verify the logits __UpperCamelCase = torch.Size((1, 2_1841) ) self.assertEqual(logits.shape , __UpperCAmelCase ) __UpperCamelCase = torch.tensor([1.6_8_8_1, -0.2_7_8_7, 0.5_9_0_1] ).to(__UpperCAmelCase ) self.assertTrue(torch.allclose(logits[0, :3] , __UpperCAmelCase , atol=1E-4 ) ) __UpperCamelCase = 2396 self.assertEqual(logits.argmax(-1 ).item() , __UpperCAmelCase ) @slow def UpperCAmelCase ( self ): '''simple docstring''' __UpperCamelCase = BeitForSemanticSegmentation.from_pretrained('microsoft/beit-base-finetuned-ade-640-640' ) __UpperCamelCase = model.to(__UpperCAmelCase ) __UpperCamelCase = BeitImageProcessor(do_resize=__UpperCAmelCase , size=640 , do_center_crop=__UpperCAmelCase ) __UpperCamelCase = load_dataset('hf-internal-testing/fixtures_ade20k' , split='test' ) __UpperCamelCase = Image.open(ds[0]['file'] ) __UpperCamelCase = image_processor(images=__UpperCAmelCase , return_tensors='pt' ).to(__UpperCAmelCase ) # forward pass with torch.no_grad(): __UpperCamelCase = model(**__UpperCAmelCase ) __UpperCamelCase = outputs.logits # verify the logits __UpperCamelCase = torch.Size((1, 150, 160, 160) ) self.assertEqual(logits.shape , __UpperCAmelCase ) __UpperCamelCase = version.parse(PIL.__version__ ) < version.parse('9.0.0' ) if is_pillow_less_than_a: __UpperCamelCase = torch.tensor( [ [[-4.9_2_2_5, -2.3_9_5_4, -3.0_5_2_2], [-2.8_8_2_2, -1.0_0_4_6, -1.7_5_6_1], [-2.9_5_4_9, -1.3_2_2_8, -2.1_3_4_7]], [[-5.8_1_6_8, -3.4_1_2_9, -4.0_7_7_8], [-3.8_6_5_1, -2.2_2_1_4, -3.0_2_7_7], [-3.8_3_5_6, -2.4_6_4_3, -3.3_5_3_5]], [[-0.0_0_7_8, 3.9_9_5_2, 4.0_7_5_4], [2.9_8_5_6, 4.6_9_4_4, 5.0_0_3_5], [3.2_4_1_3, 4.7_8_1_3, 4.9_9_6_9]], ] , device=__UpperCAmelCase , ) else: __UpperCamelCase = torch.tensor( [ [[-4.8_9_6_0, -2.3_6_8_8, -3.0_3_5_5], [-2.8_4_7_8, -0.9_8_3_6, -1.7_4_1_8], [-2.9_4_4_9, -1.3_3_3_2, -2.1_4_5_6]], [[-5.8_0_8_1, -3.4_1_2_4, -4.1_0_0_6], [-3.8_5_6_1, -2.2_0_8_1, -3.0_3_2_3], [-3.8_3_6_5, -2.4_6_0_1, -3.3_6_6_9]], [[-0.0_3_0_9, 3.9_8_6_8, 4.0_5_4_0], [2.9_6_4_0, 4.6_8_7_7, 4.9_9_7_6], [3.2_0_8_1, 4.7_6_9_0, 4.9_9_4_2]], ] , device=__UpperCAmelCase , ) self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , __UpperCAmelCase , atol=1E-4 ) ) @slow def UpperCAmelCase ( self ): '''simple docstring''' __UpperCamelCase = BeitForSemanticSegmentation.from_pretrained('microsoft/beit-base-finetuned-ade-640-640' ) __UpperCamelCase = model.to(__UpperCAmelCase ) __UpperCamelCase = BeitImageProcessor(do_resize=__UpperCAmelCase , size=640 , do_center_crop=__UpperCAmelCase ) __UpperCamelCase = load_dataset('hf-internal-testing/fixtures_ade20k' , split='test' ) __UpperCamelCase = Image.open(ds[0]['file'] ) __UpperCamelCase = image_processor(images=__UpperCAmelCase , return_tensors='pt' ).to(__UpperCAmelCase ) # forward pass with torch.no_grad(): __UpperCamelCase = model(**__UpperCAmelCase ) __UpperCamelCase = outputs.logits.detach().cpu() __UpperCamelCase = image_processor.post_process_semantic_segmentation(outputs=__UpperCAmelCase , target_sizes=[(500, 300)] ) __UpperCamelCase = torch.Size((500, 300) ) self.assertEqual(segmentation[0].shape , __UpperCAmelCase ) __UpperCamelCase = image_processor.post_process_semantic_segmentation(outputs=__UpperCAmelCase ) __UpperCamelCase = torch.Size((160, 160) ) self.assertEqual(segmentation[0].shape , __UpperCAmelCase )
316
1
"""simple docstring""" import numpy as np def A ( snake_case :np.array ) -> np.array: return 1 / (1 + np.exp(-vector )) def A ( snake_case :np.array ) -> np.array: return vector * sigmoid(1.702 * vector ) if __name__ == "__main__": import doctest doctest.testmod()
316
"""simple docstring""" def A ( snake_case :int = 1_0 , snake_case :int = 2_2 ) -> int: __UpperCamelCase = range(1 , snake_case ) __UpperCamelCase = range(1 , snake_case ) return sum( 1 for power in powers for base in bases if len(str(base**power ) ) == power ) if __name__ == "__main__": print(f'''{solution(1_0, 2_2) = }''')
316
1
"""simple docstring""" def A ( snake_case :int , snake_case :int ) -> bool: return numa ^ numa < 0 if __name__ == "__main__": import doctest doctest.testmod()
316
"""simple docstring""" # this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.: # python ./utils/get_modified_files.py utils src tests examples # # it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered # since the output of this script is fed into Makefile commands it doesn't print a newline after the results import re import subprocess import sys UpperCamelCase : Union[str, Any] = subprocess.check_output("git merge-base main HEAD".split()).decode("utf-8") UpperCamelCase : Any = subprocess.check_output(f'''git diff --name-only {fork_point_sha}'''.split()).decode("utf-8").split() UpperCamelCase : Tuple = "|".join(sys.argv[1:]) UpperCamelCase : Optional[int] = re.compile(Rf'''^({joined_dirs}).*?\.py$''') UpperCamelCase : Optional[Any] = [x for x in modified_files if regex.match(x)] print(" ".join(relevant_modified_files), end="")
316
1
"""simple docstring""" from ..utils import is_flax_available, is_torch_available if is_torch_available(): from .autoencoder_kl import AutoencoderKL from .controlnet import ControlNetModel from .dual_transformer_ad import DualTransformeraDModel from .modeling_utils import ModelMixin from .prior_transformer import PriorTransformer from .ta_film_transformer import TaFilmDecoder from .transformer_ad import TransformeraDModel from .unet_ad import UNetaDModel from .unet_ad import UNetaDModel from .unet_ad_condition import UNetaDConditionModel from .unet_ad_condition import UNetaDConditionModel from .vq_model import VQModel if is_flax_available(): from .controlnet_flax import FlaxControlNetModel from .unet_ad_condition_flax import FlaxUNetaDConditionModel from .vae_flax import FlaxAutoencoderKL
316
"""simple docstring""" from typing import Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images from ...utils import TensorType, logging UpperCamelCase : Any = logging.get_logger(__name__) class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): lowercase = ["pixel_values"] def __init__( self , __UpperCAmelCase = True , __UpperCAmelCase = 1 / 255 , __UpperCAmelCase = True , __UpperCAmelCase = 8 , **__UpperCAmelCase , ): '''simple docstring''' super().__init__(**__UpperCAmelCase ) __UpperCamelCase = do_rescale __UpperCamelCase = rescale_factor __UpperCamelCase = do_pad __UpperCamelCase = pad_size def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , **__UpperCAmelCase ): '''simple docstring''' return rescale(__UpperCAmelCase , scale=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase ) def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None ): '''simple docstring''' __UpperCamelCase , __UpperCamelCase = get_image_size(__UpperCAmelCase ) __UpperCamelCase = (old_height // size + 1) * size - old_height __UpperCamelCase = (old_width // size + 1) * size - old_width return pad(__UpperCAmelCase , ((0, pad_height), (0, pad_width)) , mode='symmetric' , data_format=__UpperCAmelCase ) def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = ChannelDimension.FIRST , **__UpperCAmelCase , ): '''simple docstring''' __UpperCamelCase = do_rescale if do_rescale is not None else self.do_rescale __UpperCamelCase = rescale_factor if rescale_factor is not None else self.rescale_factor __UpperCamelCase = do_pad if do_pad is not None else self.do_pad __UpperCamelCase = pad_size if pad_size is not None else self.pad_size __UpperCamelCase = make_list_of_images(__UpperCAmelCase ) if not valid_images(__UpperCAmelCase ): raise ValueError( 'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ' 'torch.Tensor, tf.Tensor or jax.ndarray.' ) if do_rescale and rescale_factor is None: raise ValueError('Rescale factor must be specified if do_rescale is True.' ) # All transformations expect numpy arrays. __UpperCamelCase = [to_numpy_array(__UpperCAmelCase ) for image in images] if do_rescale: __UpperCamelCase = [self.rescale(image=__UpperCAmelCase , scale=__UpperCAmelCase ) for image in images] if do_pad: __UpperCamelCase = [self.pad(__UpperCAmelCase , size=__UpperCAmelCase ) for image in images] __UpperCamelCase = [to_channel_dimension_format(__UpperCAmelCase , __UpperCAmelCase ) for image in images] __UpperCamelCase = {'pixel_values': images} return BatchFeature(data=__UpperCAmelCase , tensor_type=__UpperCAmelCase )
316
1
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCamelCase : int = logging.get_logger(__name__) UpperCamelCase : Optional[int] = { "transfo-xl-wt103": "https://huggingface.co/transfo-xl-wt103/resolve/main/config.json", } class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): lowercase = "transfo-xl" lowercase = ["mems"] lowercase = { "n_token": "vocab_size", "hidden_size": "d_model", "num_attention_heads": "n_head", "num_hidden_layers": "n_layer", } def __init__( self , __UpperCAmelCase=26_7735 , __UpperCAmelCase=[2_0000, 4_0000, 20_0000] , __UpperCAmelCase=1024 , __UpperCAmelCase=1024 , __UpperCAmelCase=16 , __UpperCAmelCase=64 , __UpperCAmelCase=4096 , __UpperCAmelCase=4 , __UpperCAmelCase=False , __UpperCAmelCase=18 , __UpperCAmelCase=1600 , __UpperCAmelCase=1000 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=0 , __UpperCAmelCase=-1 , __UpperCAmelCase=True , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.0 , __UpperCAmelCase=True , __UpperCAmelCase="normal" , __UpperCAmelCase=0.0_1 , __UpperCAmelCase=0.0_1 , __UpperCAmelCase=0.0_2 , __UpperCAmelCase=1E-5 , __UpperCAmelCase=0 , **__UpperCAmelCase , ): '''simple docstring''' __UpperCamelCase = vocab_size __UpperCamelCase = [] self.cutoffs.extend(__UpperCAmelCase ) if proj_share_all_but_first: __UpperCamelCase = [False] + [True] * len(self.cutoffs ) else: __UpperCamelCase = [False] + [False] * len(self.cutoffs ) __UpperCamelCase = d_model __UpperCamelCase = d_embed __UpperCamelCase = d_head __UpperCamelCase = d_inner __UpperCamelCase = div_val __UpperCamelCase = pre_lnorm __UpperCamelCase = n_layer __UpperCamelCase = n_head __UpperCamelCase = mem_len __UpperCamelCase = same_length __UpperCamelCase = attn_type __UpperCamelCase = clamp_len __UpperCamelCase = sample_softmax __UpperCamelCase = adaptive __UpperCamelCase = dropout __UpperCamelCase = dropatt __UpperCamelCase = untie_r __UpperCamelCase = init __UpperCamelCase = init_range __UpperCamelCase = proj_init_std __UpperCamelCase = init_std __UpperCamelCase = layer_norm_epsilon super().__init__(eos_token_id=__UpperCAmelCase , **__UpperCAmelCase ) @property def UpperCAmelCase ( self ): '''simple docstring''' logger.info(F'The model {self.model_type} is one of the few models that has no sequence length limit.' ) return -1 @max_position_embeddings.setter def UpperCAmelCase ( self , __UpperCAmelCase ): '''simple docstring''' raise NotImplementedError( F'The model {self.model_type} is one of the few models that has no sequence length limit.' )
316
"""simple docstring""" from __future__ import annotations import unittest from transformers import is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import numpy as np import tensorflow as tf from transformers import ( TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST, FlaubertConfig, TFFlaubertForMultipleChoice, TFFlaubertForQuestionAnsweringSimple, TFFlaubertForSequenceClassification, TFFlaubertForTokenClassification, TFFlaubertModel, TFFlaubertWithLMHeadModel, ) class __lowerCAmelCase : def __init__( self , __UpperCAmelCase , ): '''simple docstring''' __UpperCamelCase = parent __UpperCamelCase = 13 __UpperCamelCase = 7 __UpperCamelCase = True __UpperCamelCase = True __UpperCamelCase = True __UpperCamelCase = True __UpperCamelCase = True __UpperCamelCase = False __UpperCamelCase = False __UpperCamelCase = False __UpperCamelCase = 2 __UpperCamelCase = 99 __UpperCamelCase = 0 __UpperCamelCase = 32 __UpperCamelCase = 2 __UpperCamelCase = 4 __UpperCamelCase = 0.1 __UpperCamelCase = 0.1 __UpperCamelCase = 512 __UpperCamelCase = 16 __UpperCamelCase = 2 __UpperCamelCase = 0.0_2 __UpperCamelCase = 3 __UpperCamelCase = 4 __UpperCamelCase = 'last' __UpperCamelCase = True __UpperCamelCase = None __UpperCamelCase = 0 def UpperCAmelCase ( self ): '''simple docstring''' __UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] , dtype=tf.floataa ) __UpperCamelCase = None if self.use_input_lengths: __UpperCamelCase = ( ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2 ) # small variation of seq_length __UpperCamelCase = None if self.use_token_type_ids: __UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.n_langs ) __UpperCamelCase = None __UpperCamelCase = None __UpperCamelCase = None if self.use_labels: __UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __UpperCamelCase = ids_tensor([self.batch_size] , 2 , dtype=tf.floataa ) __UpperCamelCase = ids_tensor([self.batch_size] , self.num_choices ) __UpperCamelCase = FlaubertConfig( vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , bos_token_id=self.bos_token_id , ) return ( config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ) def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ): '''simple docstring''' __UpperCamelCase = TFFlaubertModel(config=__UpperCAmelCase ) __UpperCamelCase = {'input_ids': input_ids, 'lengths': input_lengths, 'langs': token_type_ids} __UpperCamelCase = model(__UpperCAmelCase ) __UpperCamelCase = [input_ids, input_mask] __UpperCamelCase = model(__UpperCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ): '''simple docstring''' __UpperCamelCase = TFFlaubertWithLMHeadModel(__UpperCAmelCase ) __UpperCamelCase = {'input_ids': input_ids, 'lengths': input_lengths, 'langs': token_type_ids} __UpperCamelCase = model(__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ): '''simple docstring''' __UpperCamelCase = TFFlaubertForQuestionAnsweringSimple(__UpperCAmelCase ) __UpperCamelCase = {'input_ids': input_ids, 'lengths': input_lengths} __UpperCamelCase = model(__UpperCAmelCase ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ): '''simple docstring''' __UpperCamelCase = TFFlaubertForSequenceClassification(__UpperCAmelCase ) __UpperCamelCase = {'input_ids': input_ids, 'lengths': input_lengths} __UpperCamelCase = model(__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ): '''simple docstring''' __UpperCamelCase = self.num_labels __UpperCamelCase = TFFlaubertForTokenClassification(config=__UpperCAmelCase ) __UpperCamelCase = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids} __UpperCamelCase = model(__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ): '''simple docstring''' __UpperCamelCase = self.num_choices __UpperCamelCase = TFFlaubertForMultipleChoice(config=__UpperCAmelCase ) __UpperCamelCase = tf.tile(tf.expand_dims(__UpperCAmelCase , 1 ) , (1, self.num_choices, 1) ) __UpperCamelCase = tf.tile(tf.expand_dims(__UpperCAmelCase , 1 ) , (1, self.num_choices, 1) ) __UpperCamelCase = tf.tile(tf.expand_dims(__UpperCAmelCase , 1 ) , (1, self.num_choices, 1) ) __UpperCamelCase = { 'input_ids': multiple_choice_inputs_ids, 'attention_mask': multiple_choice_input_mask, 'token_type_ids': multiple_choice_token_type_ids, } __UpperCamelCase = model(__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def UpperCAmelCase ( self ): '''simple docstring''' __UpperCamelCase = self.prepare_config_and_inputs() ( ( __UpperCamelCase ) , ( __UpperCamelCase ) , ( __UpperCamelCase ) , ( __UpperCamelCase ) , ( __UpperCamelCase ) , ( __UpperCamelCase ) , ( __UpperCamelCase ) , ( __UpperCamelCase ) , ( __UpperCamelCase ) , ) = config_and_inputs __UpperCamelCase = { 'input_ids': input_ids, 'token_type_ids': token_type_ids, 'langs': token_type_ids, 'lengths': input_lengths, } return config, inputs_dict @require_tf class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ): lowercase = ( ( TFFlaubertModel, TFFlaubertWithLMHeadModel, TFFlaubertForSequenceClassification, TFFlaubertForQuestionAnsweringSimple, TFFlaubertForTokenClassification, TFFlaubertForMultipleChoice, ) if is_tf_available() else () ) lowercase = ( (TFFlaubertWithLMHeadModel,) if is_tf_available() else () ) # TODO (PVP): Check other models whether language generation is also applicable lowercase = ( { "feature-extraction": TFFlaubertModel, "fill-mask": TFFlaubertWithLMHeadModel, "question-answering": TFFlaubertForQuestionAnsweringSimple, "text-classification": TFFlaubertForSequenceClassification, "token-classification": TFFlaubertForTokenClassification, "zero-shot": TFFlaubertForSequenceClassification, } if is_tf_available() else {} ) lowercase = False lowercase = False def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' if ( pipeline_test_casse_name == "QAPipelineTests" and tokenizer_name is not None and not tokenizer_name.endswith('Fast' ) ): # `QAPipelineTests` fails for a few models when the slower tokenizer are used. # (The slower tokenizers were never used for pipeline tests before the pipeline testing rework) # TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer return True return False def UpperCAmelCase ( self ): '''simple docstring''' __UpperCamelCase = TFFlaubertModelTester(self ) __UpperCamelCase = ConfigTester(self , config_class=__UpperCAmelCase , emb_dim=37 ) def UpperCAmelCase ( self ): '''simple docstring''' self.config_tester.run_common_tests() def UpperCAmelCase ( self ): '''simple docstring''' __UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_model(*__UpperCAmelCase ) def UpperCAmelCase ( self ): '''simple docstring''' __UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_lm_head(*__UpperCAmelCase ) def UpperCAmelCase ( self ): '''simple docstring''' __UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_qa(*__UpperCAmelCase ) def UpperCAmelCase ( self ): '''simple docstring''' __UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_sequence_classif(*__UpperCAmelCase ) def UpperCAmelCase ( self ): '''simple docstring''' __UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_for_token_classification(*__UpperCAmelCase ) def UpperCAmelCase ( self ): '''simple docstring''' __UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_for_multiple_choice(*__UpperCAmelCase ) @slow def UpperCAmelCase ( self ): '''simple docstring''' for model_name in TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __UpperCamelCase = TFFlaubertModel.from_pretrained(__UpperCAmelCase ) self.assertIsNotNone(__UpperCAmelCase ) @require_tf @require_sentencepiece @require_tokenizers class __lowerCAmelCase ( unittest.TestCase ): @slow def UpperCAmelCase ( self ): '''simple docstring''' __UpperCamelCase = TFFlaubertModel.from_pretrained('jplu/tf-flaubert-small-cased' ) __UpperCamelCase = tf.convert_to_tensor( [[0, 158, 735, 2592, 1424, 6727, 82, 1]] , dtype=tf.intaa , ) # "J'aime flaubert !" __UpperCamelCase = model(__UpperCAmelCase )[0] __UpperCamelCase = tf.TensorShape((1, 8, 512) ) self.assertEqual(output.shape , __UpperCAmelCase ) # compare the actual values for a slice. __UpperCamelCase = tf.convert_to_tensor( [ [ [-1.8_7_6_8_7_7_3, -1.5_6_6_5_5_5, 0.2_7_0_7_2_4_1_8], [-1.6_9_2_0_0_3_8, -0.5_8_7_3_5_0_5, 1.9_3_2_9_5_9_9], [-2.9_5_6_3_9_8_5, -1.6_9_9_3_8_3_5, 1.7_9_7_2_0_5_2], ] ] , dtype=tf.floataa , ) self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
316
1
"""simple docstring""" def A ( snake_case :int = 1_0 , snake_case :int = 2_2 ) -> int: __UpperCamelCase = range(1 , snake_case ) __UpperCamelCase = range(1 , snake_case ) return sum( 1 for power in powers for base in bases if len(str(base**power ) ) == power ) if __name__ == "__main__": print(f'''{solution(1_0, 2_2) = }''')
316
"""simple docstring""" import argparse import os import torch from transformers import FlavaImageCodebook, FlavaImageCodebookConfig def A ( snake_case :Union[str, Any] , snake_case :Any , snake_case :Union[str, Any] , snake_case :Any ) -> str: __UpperCamelCase = s.rsplit(snake_case , snake_case ) return new.join(snake_case ) def A ( snake_case :List[Any] ) -> int: # encoder.embeddings are double copied in original FLAVA return sum(param.float().sum() if 'encoder.embeddings' not in key else 0 for key, param in state_dict.items() ) def A ( snake_case :str ) -> Union[str, Any]: __UpperCamelCase = {} __UpperCamelCase = ['group_1', 'group_2', 'group_3', 'group_4'] for key, value in state_dict.items(): for group_key in group_keys: if group_key in key: __UpperCamelCase = key.replace(f'{group_key}.' , f'{group_key}.group.' ) if "res_path" in key: __UpperCamelCase = key.replace('res_path.' , 'res_path.path.' ) if key.endswith('.w' ): __UpperCamelCase = rreplace(snake_case , '.w' , '.weight' , 1 ) if key.endswith('.b' ): __UpperCamelCase = rreplace(snake_case , '.b' , '.bias' , 1 ) __UpperCamelCase = value.float() return upgrade @torch.no_grad() def A ( snake_case :List[str] , snake_case :Tuple , snake_case :List[Any]=None , snake_case :str=True ) -> int: from dall_e import Encoder __UpperCamelCase = Encoder() if os.path.exists(snake_case ): __UpperCamelCase = torch.load(snake_case ) else: __UpperCamelCase = torch.hub.load_state_dict_from_url(snake_case ) if isinstance(snake_case , snake_case ): __UpperCamelCase = ckpt.state_dict() encoder.load_state_dict(snake_case ) if config_path is not None: __UpperCamelCase = FlavaImageCodebookConfig.from_pretrained(snake_case ) else: __UpperCamelCase = FlavaImageCodebookConfig() __UpperCamelCase = FlavaImageCodebook(snake_case ).eval() __UpperCamelCase = encoder.state_dict() __UpperCamelCase = upgrade_state_dict(snake_case ) hf_model.load_state_dict(snake_case ) __UpperCamelCase = hf_model.state_dict() __UpperCamelCase = count_parameters(snake_case ) __UpperCamelCase = count_parameters(snake_case ) assert torch.allclose(snake_case , snake_case , atol=1e-3 ) if save_checkpoint: hf_model.save_pretrained(snake_case ) else: return hf_state_dict if __name__ == "__main__": UpperCamelCase : Any = argparse.ArgumentParser() parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to flava checkpoint") parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert") UpperCamelCase : int = parser.parse_args() convert_dalle_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
316
1
"""simple docstring""" from typing import Any class __lowerCAmelCase : def __init__( self , __UpperCAmelCase ): '''simple docstring''' __UpperCamelCase = data __UpperCamelCase = None def __repr__( self ): '''simple docstring''' return F'Node({self.data})' class __lowerCAmelCase : def __init__( self ): '''simple docstring''' __UpperCamelCase = None def __iter__( self ): '''simple docstring''' __UpperCamelCase = self.head while node: yield node.data __UpperCamelCase = node.next def __len__( self ): '''simple docstring''' return sum(1 for _ in self ) def __repr__( self ): '''simple docstring''' return "->".join([str(__UpperCAmelCase ) for item in self] ) def __getitem__( self , __UpperCAmelCase ): '''simple docstring''' if not 0 <= index < len(self ): raise ValueError('list index out of range.' ) for i, node in enumerate(self ): if i == index: return node return None def __setitem__( self , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' if not 0 <= index < len(self ): raise ValueError('list index out of range.' ) __UpperCamelCase = self.head for _ in range(__UpperCAmelCase ): __UpperCamelCase = current.next __UpperCamelCase = data def UpperCAmelCase ( self , __UpperCAmelCase ): '''simple docstring''' self.insert_nth(len(self ) , __UpperCAmelCase ) def UpperCAmelCase ( self , __UpperCAmelCase ): '''simple docstring''' self.insert_nth(0 , __UpperCAmelCase ) def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' if not 0 <= index <= len(self ): raise IndexError('list index out of range' ) __UpperCamelCase = Node(__UpperCAmelCase ) if self.head is None: __UpperCamelCase = new_node elif index == 0: __UpperCamelCase = self.head # link new_node to head __UpperCamelCase = new_node else: __UpperCamelCase = self.head for _ in range(index - 1 ): __UpperCamelCase = temp.next __UpperCamelCase = temp.next __UpperCamelCase = new_node def UpperCAmelCase ( self ): # print every node data '''simple docstring''' print(self ) def UpperCAmelCase ( self ): '''simple docstring''' return self.delete_nth(0 ) def UpperCAmelCase ( self ): # delete from tail '''simple docstring''' return self.delete_nth(len(self ) - 1 ) def UpperCAmelCase ( self , __UpperCAmelCase = 0 ): '''simple docstring''' if not 0 <= index <= len(self ) - 1: # test if index is valid raise IndexError('List index out of range.' ) __UpperCamelCase = self.head # default first node if index == 0: __UpperCamelCase = self.head.next else: __UpperCamelCase = self.head for _ in range(index - 1 ): __UpperCamelCase = temp.next __UpperCamelCase = temp.next __UpperCamelCase = temp.next.next return delete_node.data def UpperCAmelCase ( self ): '''simple docstring''' return self.head is None def UpperCAmelCase ( self ): '''simple docstring''' __UpperCamelCase = None __UpperCamelCase = self.head while current: # Store the current node's next node. __UpperCamelCase = current.next # Make the current node's next point backwards __UpperCamelCase = prev # Make the previous node be the current node __UpperCamelCase = current # Make the current node the next node (to progress iteration) __UpperCamelCase = next_node # Return prev in order to put the head at the end __UpperCamelCase = prev def A ( ) -> None: __UpperCamelCase = LinkedList() assert linked_list.is_empty() is True assert str(snake_case ) == "" try: linked_list.delete_head() raise AssertionError # This should not happen. except IndexError: assert True # This should happen. try: linked_list.delete_tail() raise AssertionError # This should not happen. except IndexError: assert True # This should happen. for i in range(1_0 ): assert len(snake_case ) == i linked_list.insert_nth(snake_case , i + 1 ) assert str(snake_case ) == "->".join(str(snake_case ) for i in range(1 , 1_1 ) ) linked_list.insert_head(0 ) linked_list.insert_tail(1_1 ) assert str(snake_case ) == "->".join(str(snake_case ) for i in range(0 , 1_2 ) ) assert linked_list.delete_head() == 0 assert linked_list.delete_nth(9 ) == 1_0 assert linked_list.delete_tail() == 1_1 assert len(snake_case ) == 9 assert str(snake_case ) == "->".join(str(snake_case ) for i in range(1 , 1_0 ) ) assert all(linked_list[i] == i + 1 for i in range(0 , 9 ) ) is True for i in range(0 , 9 ): __UpperCamelCase = -i assert all(linked_list[i] == -i for i in range(0 , 9 ) ) is True linked_list.reverse() assert str(snake_case ) == "->".join(str(snake_case ) for i in range(-8 , 1 ) ) def A ( ) -> None: __UpperCamelCase = [ -9, 1_0_0, Node(7_7_3_4_5_1_1_2 ), 'dlrow olleH', 7, 5_5_5_5, 0, -192.55_555, 'Hello, world!', 77.9, Node(1_0 ), None, None, 12.20, ] __UpperCamelCase = LinkedList() for i in test_input: linked_list.insert_tail(snake_case ) # Check if it's empty or not assert linked_list.is_empty() is False assert ( str(snake_case ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->" "-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2" ) # Delete the head __UpperCamelCase = linked_list.delete_head() assert result == -9 assert ( str(snake_case ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->" "Hello, world!->77.9->Node(10)->None->None->12.2" ) # Delete the tail __UpperCamelCase = linked_list.delete_tail() assert result == 12.2 assert ( str(snake_case ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->" "Hello, world!->77.9->Node(10)->None->None" ) # Delete a node in specific location in linked list __UpperCamelCase = linked_list.delete_nth(1_0 ) assert result is None assert ( str(snake_case ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->" "Hello, world!->77.9->Node(10)->None" ) # Add a Node instance to its head linked_list.insert_head(Node('Hello again, world!' ) ) assert ( str(snake_case ) == "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->" "7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None" ) # Add None to its tail linked_list.insert_tail(snake_case ) assert ( str(snake_case ) == "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->" "7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None" ) # Reverse the linked list linked_list.reverse() assert ( str(snake_case ) == "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->" "7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)" ) def A ( ) -> str: from doctest import testmod testmod() __UpperCamelCase = LinkedList() linked_list.insert_head(input('Inserting 1st at head ' ).strip() ) linked_list.insert_head(input('Inserting 2nd at head ' ).strip() ) print('\nPrint list:' ) linked_list.print_list() linked_list.insert_tail(input('\nInserting 1st at tail ' ).strip() ) linked_list.insert_tail(input('Inserting 2nd at tail ' ).strip() ) print('\nPrint list:' ) linked_list.print_list() print('\nDelete head' ) linked_list.delete_head() print('Delete tail' ) linked_list.delete_tail() print('\nPrint list:' ) linked_list.print_list() print('\nReverse linked list' ) linked_list.reverse() print('\nPrint list:' ) linked_list.print_list() print('\nString representation of linked list:' ) print(snake_case ) print('\nReading/changing Node data using indexing:' ) print(f'Element at Position 1: {linked_list[1]}' ) __UpperCamelCase = input('Enter New Value: ' ).strip() print('New list:' ) print(snake_case ) print(f'length of linked_list is : {len(snake_case )}' ) if __name__ == "__main__": main()
316
"""simple docstring""" import logging from dataclasses import dataclass, field from pathlib import Path from typing import Optional, Union from .generation.configuration_utils import GenerationConfig from .training_args import TrainingArguments from .utils import add_start_docstrings UpperCamelCase : str = logging.getLogger(__name__) @dataclass @add_start_docstrings(TrainingArguments.__doc__ ) class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): lowercase = field(default=__SCREAMING_SNAKE_CASE , metadata={"help": "Whether to use SortishSampler or not."} ) lowercase = field( default=__SCREAMING_SNAKE_CASE , metadata={"help": "Whether to use generate to calculate generative metrics (ROUGE, BLEU)."} ) lowercase = field( default=__SCREAMING_SNAKE_CASE , metadata={ "help": ( "The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default " "to the `max_length` value of the model configuration." ) } , ) lowercase = field( default=__SCREAMING_SNAKE_CASE , metadata={ "help": ( "The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default " "to the `num_beams` value of the model configuration." ) } , ) lowercase = field( default=__SCREAMING_SNAKE_CASE , metadata={ "help": "Model id, file path or url pointing to a GenerationConfig json file, to use during prediction." } , ) def UpperCAmelCase ( self ): '''simple docstring''' __UpperCamelCase = super().to_dict() for k, v in d.items(): if isinstance(__UpperCAmelCase , __UpperCAmelCase ): __UpperCamelCase = v.to_dict() return d
316
1
"""simple docstring""" from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import ShapEPipeline else: from .camera import create_pan_cameras from .pipeline_shap_e import ShapEPipeline from .pipeline_shap_e_img2img import ShapEImgaImgPipeline from .renderer import ( BoundingBoxVolume, ImportanceRaySampler, MLPNeRFModelOutput, MLPNeRSTFModel, ShapEParamsProjModel, ShapERenderer, StratifiedRaySampler, VoidNeRFModel, )
316
"""simple docstring""" from collections.abc import Iterator, MutableMapping from dataclasses import dataclass from typing import Generic, TypeVar UpperCamelCase : List[str] = TypeVar("KEY") UpperCamelCase : List[str] = TypeVar("VAL") @dataclass(frozen=__SCREAMING_SNAKE_CASE , slots=__SCREAMING_SNAKE_CASE ) class __lowerCAmelCase ( Generic[KEY, VAL] ): lowercase = 42 lowercase = 42 class __lowerCAmelCase ( _Item ): def __init__( self ): '''simple docstring''' super().__init__(__UpperCAmelCase , __UpperCAmelCase ) def __bool__( self ): '''simple docstring''' return False UpperCamelCase : Any = _DeletedItem() class __lowerCAmelCase ( MutableMapping[KEY, VAL] ): def __init__( self , __UpperCAmelCase = 8 , __UpperCAmelCase = 0.7_5 ): '''simple docstring''' __UpperCamelCase = initial_block_size __UpperCamelCase = [None] * initial_block_size assert 0.0 < capacity_factor < 1.0 __UpperCamelCase = capacity_factor __UpperCamelCase = 0 def UpperCAmelCase ( self , __UpperCAmelCase ): '''simple docstring''' return hash(__UpperCAmelCase ) % len(self._buckets ) def UpperCAmelCase ( self , __UpperCAmelCase ): '''simple docstring''' return (ind + 1) % len(self._buckets ) def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' __UpperCamelCase = self._buckets[ind] if not stored: __UpperCamelCase = _Item(__UpperCAmelCase , __UpperCAmelCase ) self._len += 1 return True elif stored.key == key: __UpperCamelCase = _Item(__UpperCAmelCase , __UpperCAmelCase ) return True else: return False def UpperCAmelCase ( self ): '''simple docstring''' __UpperCamelCase = len(self._buckets ) * self._capacity_factor return len(self ) >= int(__UpperCAmelCase ) def UpperCAmelCase ( self ): '''simple docstring''' if len(self._buckets ) <= self._initial_block_size: return False __UpperCamelCase = len(self._buckets ) * self._capacity_factor / 2 return len(self ) < limit def UpperCAmelCase ( self , __UpperCAmelCase ): '''simple docstring''' __UpperCamelCase = self._buckets __UpperCamelCase = [None] * new_size __UpperCamelCase = 0 for item in old_buckets: if item: self._add_item(item.key , item.val ) def UpperCAmelCase ( self ): '''simple docstring''' self._resize(len(self._buckets ) * 2 ) def UpperCAmelCase ( self ): '''simple docstring''' self._resize(len(self._buckets ) // 2 ) def UpperCAmelCase ( self , __UpperCAmelCase ): '''simple docstring''' __UpperCamelCase = self._get_bucket_index(__UpperCAmelCase ) for _ in range(len(self._buckets ) ): yield ind __UpperCamelCase = self._get_next_ind(__UpperCAmelCase ) def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' for ind in self._iterate_buckets(__UpperCAmelCase ): if self._try_set(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): break def __setitem__( self , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' if self._is_full(): self._size_up() self._add_item(__UpperCAmelCase , __UpperCAmelCase ) def __delitem__( self , __UpperCAmelCase ): '''simple docstring''' for ind in self._iterate_buckets(__UpperCAmelCase ): __UpperCamelCase = self._buckets[ind] if item is None: raise KeyError(__UpperCAmelCase ) if item is _deleted: continue if item.key == key: __UpperCamelCase = _deleted self._len -= 1 break if self._is_sparse(): self._size_down() def __getitem__( self , __UpperCAmelCase ): '''simple docstring''' for ind in self._iterate_buckets(__UpperCAmelCase ): __UpperCamelCase = self._buckets[ind] if item is None: break if item is _deleted: continue if item.key == key: return item.val raise KeyError(__UpperCAmelCase ) def __len__( self ): '''simple docstring''' return self._len def __iter__( self ): '''simple docstring''' yield from (item.key for item in self._buckets if item) def __repr__( self ): '''simple docstring''' __UpperCamelCase = ' ,'.join( F'{item.key}: {item.val}' for item in self._buckets if item ) return F'HashMap({val_string})'
316
1
"""simple docstring""" import unittest from transformers.models.xlm_prophetnet.tokenization_xlm_prophetnet import SPIECE_UNDERLINE, XLMProphetNetTokenizer from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin UpperCamelCase : Dict = get_tests_dir("fixtures/test_sentencepiece.model") @require_sentencepiece class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ): lowercase = XLMProphetNetTokenizer lowercase = False lowercase = True def UpperCAmelCase ( self ): '''simple docstring''' super().setUp() # We have a SentencePiece fixture for testing __UpperCamelCase = XLMProphetNetTokenizer(__UpperCAmelCase , keep_accents=__UpperCAmelCase ) tokenizer.save_pretrained(self.tmpdirname ) def UpperCAmelCase ( self ): '''simple docstring''' __UpperCamelCase = '[PAD]' __UpperCamelCase = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(__UpperCAmelCase ) , __UpperCAmelCase ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(__UpperCAmelCase ) , __UpperCAmelCase ) def UpperCAmelCase ( self ): '''simple docstring''' __UpperCamelCase = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '[PAD]' ) self.assertEqual(vocab_keys[1] , '[CLS]' ) self.assertEqual(vocab_keys[-1] , 'j' ) self.assertEqual(len(__UpperCAmelCase ) , 1012 ) def UpperCAmelCase ( self ): '''simple docstring''' self.assertEqual(self.get_tokenizer().vocab_size , 1012 ) def UpperCAmelCase ( self ): '''simple docstring''' __UpperCamelCase = XLMProphetNetTokenizer(__UpperCAmelCase , keep_accents=__UpperCAmelCase ) __UpperCamelCase = tokenizer.tokenize('This is a test' ) self.assertListEqual(__UpperCAmelCase , ['▁This', '▁is', '▁a', '▁t', 'est'] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(__UpperCAmelCase ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , ) __UpperCamelCase = tokenizer.tokenize('I was born in 92000, and this is falsé.' ) self.assertListEqual( __UpperCAmelCase , [ SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '9', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', 'é', '.', ] , ) __UpperCamelCase = tokenizer.convert_tokens_to_ids(__UpperCAmelCase ) self.assertListEqual( __UpperCAmelCase , [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, -9, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, -9, 4] ] , ) __UpperCamelCase = tokenizer.convert_ids_to_tokens(__UpperCAmelCase ) self.assertListEqual( __UpperCAmelCase , [ SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '[UNK]', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', '[UNK]', '.', ] , ) @cached_property def UpperCAmelCase ( self ): '''simple docstring''' return XLMProphetNetTokenizer.from_pretrained('microsoft/xprophetnet-large-wiki100-cased' ) @slow def UpperCAmelCase ( self ): '''simple docstring''' __UpperCamelCase = 'Hello World!' __UpperCamelCase = [3_5389, 6672, 49, 2] self.assertListEqual(__UpperCAmelCase , self.big_tokenizer.encode(__UpperCAmelCase ) ) @slow def UpperCAmelCase ( self ): '''simple docstring''' __UpperCamelCase = {'input_ids': [[1_1073, 8_2783, 18, 26, 8_2783, 549, 5_1540, 248, 1_7209, 1301, 217, 20, 21_5186, 1325, 147, 1_7209, 1301, 217, 20, 5_6370, 53, 12_2020, 20, 1_6477, 27, 8_7355, 4548, 20, 4728, 7_8392, 17, 15_9969, 18, 26, 2_4491, 629, 15, 538, 2_2704, 5439, 15, 2788, 2_4491, 9885, 15, 4_3534, 605, 15, 814, 1_8403, 3_3200, 29, 15, 4_3534, 2_4458, 1_2410, 111, 2_4966, 8_3669, 9637, 14_4068, 26, 850, 2_2346, 27, 147, 2_4966, 8_3669, 8_3490, 26, 3_9113, 735, 27, 689, 656, 2800, 1339, 4600, 53, 12_2020, 11_5785, 34, 816, 1339, 4_6887, 18, 147, 5_3905, 1951, 4_2238, 4_1170, 1_7732, 834, 436, 15, 2_7523, 9_8733, 217, 147, 5542, 4981, 930, 1_7347, 16, 2], [2_0091, 629, 94, 8_2786, 58, 490, 20, 1528, 84, 5_3905, 344, 8_0592, 11_0128, 1_8822, 5267, 1306, 62, 15_2537, 308, 7997, 401, 12_4427, 549, 3_5442, 225, 109, 1_5055, 2_5748, 147, 7119, 4_3712, 34, 767, 13_5366, 18, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [592, 6_3784, 11_9466, 17, 14_7808, 8_8214, 18, 656, 81, 32, 3296, 1_0280, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=__UpperCAmelCase , model_name='microsoft/xprophetnet-large-wiki100-cased' , revision='1acad1643ddd54a44df6a1b797ada8373685d90e' , )
316
"""simple docstring""" def A ( snake_case :int , snake_case :int ) -> bool: return numa ^ numa < 0 if __name__ == "__main__": import doctest doctest.testmod()
316
1
"""simple docstring""" def A ( snake_case :float , snake_case :list[float] ) -> float: if discount_rate < 0: raise ValueError('Discount rate cannot be negative' ) if not cash_flows: raise ValueError('Cash flows list cannot be empty' ) __UpperCamelCase = sum( cash_flow / ((1 + discount_rate) ** i) for i, cash_flow in enumerate(snake_case ) ) return round(snake_case , ndigits=2 ) if __name__ == "__main__": import doctest doctest.testmod()
316
"""simple docstring""" from typing import List, Optional, Tuple, Union import torch from ...models import UNetaDModel from ...schedulers import ScoreSdeVeScheduler from ...utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): lowercase = 42 lowercase = 42 def __init__( self , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' super().__init__() self.register_modules(unet=__UpperCAmelCase , scheduler=__UpperCAmelCase ) @torch.no_grad() def __call__( self , __UpperCAmelCase = 1 , __UpperCAmelCase = 2000 , __UpperCAmelCase = None , __UpperCAmelCase = "pil" , __UpperCAmelCase = True , **__UpperCAmelCase , ): '''simple docstring''' __UpperCamelCase = self.unet.config.sample_size __UpperCamelCase = (batch_size, 3, img_size, img_size) __UpperCamelCase = self.unet __UpperCamelCase = randn_tensor(__UpperCAmelCase , generator=__UpperCAmelCase ) * self.scheduler.init_noise_sigma __UpperCamelCase = sample.to(self.device ) self.scheduler.set_timesteps(__UpperCAmelCase ) self.scheduler.set_sigmas(__UpperCAmelCase ) for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ): __UpperCamelCase = self.scheduler.sigmas[i] * torch.ones(shape[0] , device=self.device ) # correction step for _ in range(self.scheduler.config.correct_steps ): __UpperCamelCase = self.unet(__UpperCAmelCase , __UpperCAmelCase ).sample __UpperCamelCase = self.scheduler.step_correct(__UpperCAmelCase , __UpperCAmelCase , generator=__UpperCAmelCase ).prev_sample # prediction step __UpperCamelCase = model(__UpperCAmelCase , __UpperCAmelCase ).sample __UpperCamelCase = self.scheduler.step_pred(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , generator=__UpperCAmelCase ) __UpperCamelCase , __UpperCamelCase = output.prev_sample, output.prev_sample_mean __UpperCamelCase = sample_mean.clamp(0 , 1 ) __UpperCamelCase = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": __UpperCamelCase = self.numpy_to_pil(__UpperCAmelCase ) if not return_dict: return (sample,) return ImagePipelineOutput(images=__UpperCAmelCase )
316
1
"""simple docstring""" import argparse import os import torch from diffusers import ( CMStochasticIterativeScheduler, ConsistencyModelPipeline, UNetaDModel, ) UpperCamelCase : Tuple = { "sample_size": 3_2, "in_channels": 3, "out_channels": 3, "layers_per_block": 2, "num_class_embeds": 1_0_0_0, "block_out_channels": [3_2, 6_4], "attention_head_dim": 8, "down_block_types": [ "ResnetDownsampleBlock2D", "AttnDownBlock2D", ], "up_block_types": [ "AttnUpBlock2D", "ResnetUpsampleBlock2D", ], "resnet_time_scale_shift": "scale_shift", "upsample_type": "resnet", "downsample_type": "resnet", } UpperCamelCase : List[Any] = { "sample_size": 6_4, "in_channels": 3, "out_channels": 3, "layers_per_block": 3, "num_class_embeds": 1_0_0_0, "block_out_channels": [1_9_2, 1_9_2 * 2, 1_9_2 * 3, 1_9_2 * 4], "attention_head_dim": 6_4, "down_block_types": [ "ResnetDownsampleBlock2D", "AttnDownBlock2D", "AttnDownBlock2D", "AttnDownBlock2D", ], "up_block_types": [ "AttnUpBlock2D", "AttnUpBlock2D", "AttnUpBlock2D", "ResnetUpsampleBlock2D", ], "resnet_time_scale_shift": "scale_shift", "upsample_type": "resnet", "downsample_type": "resnet", } UpperCamelCase : Any = { "sample_size": 2_5_6, "in_channels": 3, "out_channels": 3, "layers_per_block": 2, "num_class_embeds": None, "block_out_channels": [2_5_6, 2_5_6, 2_5_6 * 2, 2_5_6 * 2, 2_5_6 * 4, 2_5_6 * 4], "attention_head_dim": 6_4, "down_block_types": [ "ResnetDownsampleBlock2D", "ResnetDownsampleBlock2D", "ResnetDownsampleBlock2D", "AttnDownBlock2D", "AttnDownBlock2D", "AttnDownBlock2D", ], "up_block_types": [ "AttnUpBlock2D", "AttnUpBlock2D", "AttnUpBlock2D", "ResnetUpsampleBlock2D", "ResnetUpsampleBlock2D", "ResnetUpsampleBlock2D", ], "resnet_time_scale_shift": "default", "upsample_type": "resnet", "downsample_type": "resnet", } UpperCamelCase : str = { "num_train_timesteps": 4_0, "sigma_min": 0.0_02, "sigma_max": 80.0, } UpperCamelCase : Union[str, Any] = { "num_train_timesteps": 2_0_1, "sigma_min": 0.0_02, "sigma_max": 80.0, } UpperCamelCase : Any = { "num_train_timesteps": 1_5_1, "sigma_min": 0.0_02, "sigma_max": 80.0, } def A ( snake_case :Dict ) -> int: if isinstance(snake_case , snake_case ): return v if v.lower() in ("yes", "true", "t", "y", "1"): return True elif v.lower() in ("no", "false", "f", "n", "0"): return False else: raise argparse.ArgumentTypeError('boolean value expected' ) def A ( snake_case :List[str] , snake_case :List[Any] , snake_case :Dict , snake_case :List[str] , snake_case :Any=False ) -> Optional[Any]: __UpperCamelCase = checkpoint[f'{old_prefix}.in_layers.0.weight'] __UpperCamelCase = checkpoint[f'{old_prefix}.in_layers.0.bias'] __UpperCamelCase = checkpoint[f'{old_prefix}.in_layers.2.weight'] __UpperCamelCase = checkpoint[f'{old_prefix}.in_layers.2.bias'] __UpperCamelCase = checkpoint[f'{old_prefix}.emb_layers.1.weight'] __UpperCamelCase = checkpoint[f'{old_prefix}.emb_layers.1.bias'] __UpperCamelCase = checkpoint[f'{old_prefix}.out_layers.0.weight'] __UpperCamelCase = checkpoint[f'{old_prefix}.out_layers.0.bias'] __UpperCamelCase = checkpoint[f'{old_prefix}.out_layers.3.weight'] __UpperCamelCase = checkpoint[f'{old_prefix}.out_layers.3.bias'] if has_skip: __UpperCamelCase = checkpoint[f'{old_prefix}.skip_connection.weight'] __UpperCamelCase = checkpoint[f'{old_prefix}.skip_connection.bias'] return new_checkpoint def A ( snake_case :Union[str, Any] , snake_case :str , snake_case :List[str] , snake_case :List[Any] , snake_case :Union[str, Any]=None ) -> List[str]: __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = checkpoint[f'{old_prefix}.qkv.weight'].chunk(3 , dim=0 ) __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = checkpoint[f'{old_prefix}.qkv.bias'].chunk(3 , dim=0 ) __UpperCamelCase = checkpoint[f'{old_prefix}.norm.weight'] __UpperCamelCase = checkpoint[f'{old_prefix}.norm.bias'] __UpperCamelCase = weight_q.squeeze(-1 ).squeeze(-1 ) __UpperCamelCase = bias_q.squeeze(-1 ).squeeze(-1 ) __UpperCamelCase = weight_k.squeeze(-1 ).squeeze(-1 ) __UpperCamelCase = bias_k.squeeze(-1 ).squeeze(-1 ) __UpperCamelCase = weight_v.squeeze(-1 ).squeeze(-1 ) __UpperCamelCase = bias_v.squeeze(-1 ).squeeze(-1 ) __UpperCamelCase = ( checkpoint[f'{old_prefix}.proj_out.weight'].squeeze(-1 ).squeeze(-1 ) ) __UpperCamelCase = checkpoint[f'{old_prefix}.proj_out.bias'].squeeze(-1 ).squeeze(-1 ) return new_checkpoint def A ( snake_case :str , snake_case :List[Any] ) -> Tuple: __UpperCamelCase = torch.load(snake_case , map_location='cpu' ) __UpperCamelCase = {} __UpperCamelCase = checkpoint['time_embed.0.weight'] __UpperCamelCase = checkpoint['time_embed.0.bias'] __UpperCamelCase = checkpoint['time_embed.2.weight'] __UpperCamelCase = checkpoint['time_embed.2.bias'] if unet_config["num_class_embeds"] is not None: __UpperCamelCase = checkpoint['label_emb.weight'] __UpperCamelCase = checkpoint['input_blocks.0.0.weight'] __UpperCamelCase = checkpoint['input_blocks.0.0.bias'] __UpperCamelCase = unet_config['down_block_types'] __UpperCamelCase = unet_config['layers_per_block'] __UpperCamelCase = unet_config['attention_head_dim'] __UpperCamelCase = unet_config['block_out_channels'] __UpperCamelCase = 1 __UpperCamelCase = channels_list[0] for i, layer_type in enumerate(snake_case ): __UpperCamelCase = channels_list[i] __UpperCamelCase = current_channels != prev_channels if layer_type == "ResnetDownsampleBlock2D": for j in range(snake_case ): __UpperCamelCase = f'down_blocks.{i}.resnets.{j}' __UpperCamelCase = f'input_blocks.{current_layer}.0' __UpperCamelCase = True if j == 0 and downsample_block_has_skip else False __UpperCamelCase = convert_resnet(snake_case , snake_case , snake_case , snake_case , has_skip=snake_case ) current_layer += 1 elif layer_type == "AttnDownBlock2D": for j in range(snake_case ): __UpperCamelCase = f'down_blocks.{i}.resnets.{j}' __UpperCamelCase = f'input_blocks.{current_layer}.0' __UpperCamelCase = True if j == 0 and downsample_block_has_skip else False __UpperCamelCase = convert_resnet(snake_case , snake_case , snake_case , snake_case , has_skip=snake_case ) __UpperCamelCase = f'down_blocks.{i}.attentions.{j}' __UpperCamelCase = f'input_blocks.{current_layer}.1' __UpperCamelCase = convert_attention( snake_case , snake_case , snake_case , snake_case , snake_case ) current_layer += 1 if i != len(snake_case ) - 1: __UpperCamelCase = f'down_blocks.{i}.downsamplers.0' __UpperCamelCase = f'input_blocks.{current_layer}.0' __UpperCamelCase = convert_resnet(snake_case , snake_case , snake_case , snake_case ) current_layer += 1 __UpperCamelCase = current_channels # hardcoded the mid-block for now __UpperCamelCase = 'mid_block.resnets.0' __UpperCamelCase = 'middle_block.0' __UpperCamelCase = convert_resnet(snake_case , snake_case , snake_case , snake_case ) __UpperCamelCase = 'mid_block.attentions.0' __UpperCamelCase = 'middle_block.1' __UpperCamelCase = convert_attention(snake_case , snake_case , snake_case , snake_case , snake_case ) __UpperCamelCase = 'mid_block.resnets.1' __UpperCamelCase = 'middle_block.2' __UpperCamelCase = convert_resnet(snake_case , snake_case , snake_case , snake_case ) __UpperCamelCase = 0 __UpperCamelCase = unet_config['up_block_types'] for i, layer_type in enumerate(snake_case ): if layer_type == "ResnetUpsampleBlock2D": for j in range(layers_per_block + 1 ): __UpperCamelCase = f'up_blocks.{i}.resnets.{j}' __UpperCamelCase = f'output_blocks.{current_layer}.0' __UpperCamelCase = convert_resnet(snake_case , snake_case , snake_case , snake_case , has_skip=snake_case ) current_layer += 1 if i != len(snake_case ) - 1: __UpperCamelCase = f'up_blocks.{i}.upsamplers.0' __UpperCamelCase = f'output_blocks.{current_layer-1}.1' __UpperCamelCase = convert_resnet(snake_case , snake_case , snake_case , snake_case ) elif layer_type == "AttnUpBlock2D": for j in range(layers_per_block + 1 ): __UpperCamelCase = f'up_blocks.{i}.resnets.{j}' __UpperCamelCase = f'output_blocks.{current_layer}.0' __UpperCamelCase = convert_resnet(snake_case , snake_case , snake_case , snake_case , has_skip=snake_case ) __UpperCamelCase = f'up_blocks.{i}.attentions.{j}' __UpperCamelCase = f'output_blocks.{current_layer}.1' __UpperCamelCase = convert_attention( snake_case , snake_case , snake_case , snake_case , snake_case ) current_layer += 1 if i != len(snake_case ) - 1: __UpperCamelCase = f'up_blocks.{i}.upsamplers.0' __UpperCamelCase = f'output_blocks.{current_layer-1}.2' __UpperCamelCase = convert_resnet(snake_case , snake_case , snake_case , snake_case ) __UpperCamelCase = checkpoint['out.0.weight'] __UpperCamelCase = checkpoint['out.0.bias'] __UpperCamelCase = checkpoint['out.2.weight'] __UpperCamelCase = checkpoint['out.2.bias'] return new_checkpoint if __name__ == "__main__": UpperCamelCase : List[str] = argparse.ArgumentParser() parser.add_argument("--unet_path", default=None, type=str, required=True, help="Path to the unet.pt to convert.") parser.add_argument( "--dump_path", default=None, type=str, required=True, help="Path to output the converted UNet model." ) parser.add_argument("--class_cond", default=True, type=str, help="Whether the model is class-conditional.") UpperCamelCase : Dict = parser.parse_args() UpperCamelCase : List[Any] = strabool(args.class_cond) UpperCamelCase : List[str] = os.path.basename(args.unet_path) print(f'''Checkpoint: {ckpt_name}''') # Get U-Net config if "imagenet64" in ckpt_name: UpperCamelCase : Optional[int] = IMAGENET_64_UNET_CONFIG elif "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)): UpperCamelCase : int = LSUN_256_UNET_CONFIG elif "test" in ckpt_name: UpperCamelCase : int = TEST_UNET_CONFIG else: raise ValueError(f'''Checkpoint type {ckpt_name} is not currently supported.''') if not args.class_cond: UpperCamelCase : Optional[int] = None UpperCamelCase : Dict = con_pt_to_diffuser(args.unet_path, unet_config) UpperCamelCase : List[Any] = UNetaDModel(**unet_config) image_unet.load_state_dict(converted_unet_ckpt) # Get scheduler config if "cd" in ckpt_name or "test" in ckpt_name: UpperCamelCase : str = CD_SCHEDULER_CONFIG elif "ct" in ckpt_name and "imagenet64" in ckpt_name: UpperCamelCase : int = CT_IMAGENET_64_SCHEDULER_CONFIG elif "ct" in ckpt_name and "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)): UpperCamelCase : Union[str, Any] = CT_LSUN_256_SCHEDULER_CONFIG else: raise ValueError(f'''Checkpoint type {ckpt_name} is not currently supported.''') UpperCamelCase : Optional[Any] = CMStochasticIterativeScheduler(**scheduler_config) UpperCamelCase : Tuple = ConsistencyModelPipeline(unet=image_unet, scheduler=cm_scheduler) consistency_model.save_pretrained(args.dump_path)
316
"""simple docstring""" def A ( snake_case :list[int] , snake_case :int ) -> bool: __UpperCamelCase = len(snake_case ) __UpperCamelCase = [[False] * (required_sum + 1) for _ in range(arr_len + 1 )] # for each arr value, a sum of zero(0) can be formed by not taking any element # hence True/1 for i in range(arr_len + 1 ): __UpperCamelCase = True # sum is not zero and set is empty then false for i in range(1 , required_sum + 1 ): __UpperCamelCase = False for i in range(1 , arr_len + 1 ): for j in range(1 , required_sum + 1 ): if arr[i - 1] > j: __UpperCamelCase = subset[i - 1][j] if arr[i - 1] <= j: __UpperCamelCase = subset[i - 1][j] or subset[i - 1][j - arr[i - 1]] return subset[arr_len][required_sum] if __name__ == "__main__": import doctest doctest.testmod()
316
1
"""simple docstring""" import argparse from torch import nn # transformers_old should correspond to branch `save_old_prophetnet_model_structure` here # original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively from transformers_old.modeling_prophetnet import ( ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld, ) from transformers_old.modeling_xlm_prophetnet import ( XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld, ) from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging UpperCamelCase : Optional[Any] = logging.get_logger(__name__) logging.set_verbosity_info() def A ( snake_case :str , snake_case :str ) -> Any: if "xprophetnet" in prophetnet_checkpoint_path: __UpperCamelCase = XLMProphetNetForConditionalGenerationOld.from_pretrained(snake_case ) __UpperCamelCase , __UpperCamelCase = XLMProphetNetForConditionalGeneration.from_pretrained( snake_case , output_loading_info=snake_case ) else: __UpperCamelCase = ProphetNetForConditionalGenerationOld.from_pretrained(snake_case ) __UpperCamelCase , __UpperCamelCase = ProphetNetForConditionalGeneration.from_pretrained( snake_case , output_loading_info=snake_case ) __UpperCamelCase = ['key_proj', 'value_proj', 'query_proj'] __UpperCamelCase = { 'self_attn': 'ngram_self_attn', 'cross_attn': 'encoder_attn', 'cross_attn_layer_norm': 'encoder_attn_layer_norm', 'feed_forward_layer_norm': 'final_layer_norm', 'feed_forward': '', 'intermediate': 'fc1', 'output': 'fc2', 'key_proj': 'k_proj', 'query_proj': 'q_proj', 'value_proj': 'v_proj', 'word_embeddings': 'embed_tokens', 'embeddings_layer_norm': 'emb_layer_norm', 'relative_pos_embeddings': 'relative_linear', 'ngram_embeddings': 'ngram_input_embed', 'position_embeddings': 'embed_positions', } for key in loading_info["missing_keys"]: __UpperCamelCase = key.split('.' ) if attributes[0] == "lm_head": __UpperCamelCase = prophet __UpperCamelCase = prophet_old else: __UpperCamelCase = prophet.prophetnet __UpperCamelCase = prophet_old.model __UpperCamelCase = False for attribute in attributes: if attribute in mapping: __UpperCamelCase = mapping[attribute] if not hasattr(snake_case , snake_case ) and len(snake_case ) > 0: __UpperCamelCase = attribute elif hasattr(snake_case , snake_case ): __UpperCamelCase = attribute if attribute == "weight": assert old_model.weight.shape == model.weight.shape, "Shapes have to match!" __UpperCamelCase = old_model.weight logger.info(f'{attribute} is initialized.' ) __UpperCamelCase = True break elif attribute == "bias": assert old_model.bias.shape == model.bias.shape, "Shapes have to match!" __UpperCamelCase = old_model.bias logger.info(f'{attribute} is initialized' ) __UpperCamelCase = True break elif attribute in special_keys and hasattr(snake_case , 'in_proj_weight' ): __UpperCamelCase = old_model.in_proj_weight.shape[0] // 3 __UpperCamelCase = getattr(snake_case , snake_case ) param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match" param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match" if attribute == "query_proj": __UpperCamelCase = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] ) __UpperCamelCase = nn.Parameter(old_model.in_proj_bias[:embed_dim] ) elif attribute == "key_proj": __UpperCamelCase = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] ) __UpperCamelCase = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] ) elif attribute == "value_proj": __UpperCamelCase = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] ) __UpperCamelCase = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] ) __UpperCamelCase = True break elif attribute == "position_embeddings": assert ( model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1] ), "Hidden size has to match" assert model.position_embeddings.weight.shape[0] == 5_1_2, "We want 512 position_embeddings." __UpperCamelCase = nn.Parameter(old_model.embed_positions.weight[:5_1_2, :] ) __UpperCamelCase = True break if attribute.isdigit(): __UpperCamelCase = model[int(snake_case )] __UpperCamelCase = old_model[int(snake_case )] else: __UpperCamelCase = getattr(snake_case , snake_case ) if old_attribute == "": __UpperCamelCase = old_model else: if not hasattr(snake_case , snake_case ): raise ValueError(f'{old_model} does not have {old_attribute}' ) __UpperCamelCase = getattr(snake_case , snake_case ) if not is_key_init: raise ValueError(f'{key} was not correctly initialized!' ) print(f'Saving model to {pytorch_dump_folder_path}' ) prophet.save_pretrained(snake_case ) if __name__ == "__main__": UpperCamelCase : str = argparse.ArgumentParser() # Required parameters parser.add_argument( "--prophetnet_checkpoint_path", default=None, type=str, required=True, help="Path the official PyTorch dump." ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) UpperCamelCase : Optional[Any] = parser.parse_args() convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
316
"""simple docstring""" import argparse import logging import os from pathlib import Path from typing import Any, Dict import pytorch_lightning as pl from pytorch_lightning.utilities import rank_zero_info from transformers import ( AdamW, AutoConfig, AutoModel, AutoModelForPreTraining, AutoModelForQuestionAnswering, AutoModelForSeqaSeqLM, AutoModelForSequenceClassification, AutoModelForTokenClassification, AutoModelWithLMHead, AutoTokenizer, PretrainedConfig, PreTrainedTokenizer, ) from transformers.optimization import ( Adafactor, get_cosine_schedule_with_warmup, get_cosine_with_hard_restarts_schedule_with_warmup, get_linear_schedule_with_warmup, get_polynomial_decay_schedule_with_warmup, ) from transformers.utils.versions import require_version UpperCamelCase : Union[str, Any] = logging.getLogger(__name__) require_version("pytorch_lightning>=1.0.4") UpperCamelCase : int = { "base": AutoModel, "sequence-classification": AutoModelForSequenceClassification, "question-answering": AutoModelForQuestionAnswering, "pretraining": AutoModelForPreTraining, "token-classification": AutoModelForTokenClassification, "language-modeling": AutoModelWithLMHead, "summarization": AutoModelForSeqaSeqLM, "translation": AutoModelForSeqaSeqLM, } # update this and the import above to support new schedulers from transformers.optimization UpperCamelCase : Optional[Any] = { "linear": get_linear_schedule_with_warmup, "cosine": get_cosine_schedule_with_warmup, "cosine_w_restarts": get_cosine_with_hard_restarts_schedule_with_warmup, "polynomial": get_polynomial_decay_schedule_with_warmup, # '': get_constant_schedule, # not supported for now # '': get_constant_schedule_with_warmup, # not supported for now } UpperCamelCase : str = sorted(arg_to_scheduler.keys()) UpperCamelCase : List[str] = "{" + ", ".join(arg_to_scheduler_choices) + "}" class __lowerCAmelCase ( pl.LightningModule ): def __init__( self , __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase="base" , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase , ): '''simple docstring''' super().__init__() # TODO: move to self.save_hyperparameters() # self.save_hyperparameters() # can also expand arguments into trainer signature for easier reading self.save_hyperparameters(__UpperCAmelCase ) __UpperCamelCase = 0 __UpperCamelCase = Path(self.hparams.output_dir ) __UpperCamelCase = self.hparams.cache_dir if self.hparams.cache_dir else None if config is None: __UpperCamelCase = AutoConfig.from_pretrained( self.hparams.config_name if self.hparams.config_name else self.hparams.model_name_or_path , **({'num_labels': num_labels} if num_labels is not None else {}) , cache_dir=__UpperCAmelCase , **__UpperCAmelCase , ) else: __UpperCamelCase = config __UpperCamelCase = ('encoder_layerdrop', 'decoder_layerdrop', 'dropout', 'attention_dropout') for p in extra_model_params: if getattr(self.hparams , __UpperCAmelCase , __UpperCAmelCase ): assert hasattr(self.config , __UpperCAmelCase ), F'model config doesn\'t have a `{p}` attribute' setattr(self.config , __UpperCAmelCase , getattr(self.hparams , __UpperCAmelCase ) ) if tokenizer is None: __UpperCamelCase = AutoTokenizer.from_pretrained( self.hparams.tokenizer_name if self.hparams.tokenizer_name else self.hparams.model_name_or_path , cache_dir=__UpperCAmelCase , ) else: __UpperCamelCase = tokenizer __UpperCamelCase = MODEL_MODES[mode] if model is None: __UpperCamelCase = self.model_type.from_pretrained( self.hparams.model_name_or_path , from_tf=bool('.ckpt' in self.hparams.model_name_or_path ) , config=self.config , cache_dir=__UpperCAmelCase , ) else: __UpperCamelCase = model def UpperCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase ): '''simple docstring''' __UpperCamelCase = self.model_type.from_pretrained(*__UpperCAmelCase , **__UpperCAmelCase ) def UpperCAmelCase ( self ): '''simple docstring''' __UpperCamelCase = arg_to_scheduler[self.hparams.lr_scheduler] __UpperCamelCase = get_schedule_func( self.opt , num_warmup_steps=self.hparams.warmup_steps , num_training_steps=self.total_steps() ) __UpperCamelCase = {'scheduler': scheduler, 'interval': 'step', 'frequency': 1} return scheduler def UpperCAmelCase ( self ): '''simple docstring''' __UpperCamelCase = self.model __UpperCamelCase = ['bias', 'LayerNorm.weight'] __UpperCamelCase = [ { 'params': [ p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay ) ], # check this named paramters 'weight_decay': self.hparams.weight_decay, }, { 'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay )], 'weight_decay': 0.0, }, ] if self.hparams.adafactor: __UpperCamelCase = Adafactor( __UpperCAmelCase , lr=self.hparams.learning_rate , scale_parameter=__UpperCAmelCase , relative_step=__UpperCAmelCase ) else: __UpperCamelCase = AdamW( __UpperCAmelCase , lr=self.hparams.learning_rate , eps=self.hparams.adam_epsilon ) __UpperCamelCase = optimizer __UpperCamelCase = self.get_lr_scheduler() return [optimizer], [scheduler] def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' return self.validation_step(__UpperCAmelCase , __UpperCAmelCase ) def UpperCAmelCase ( self , __UpperCAmelCase ): '''simple docstring''' return self.validation_end(__UpperCAmelCase ) def UpperCAmelCase ( self ): '''simple docstring''' __UpperCamelCase = max(1 , self.hparams.gpus ) # TODO: consider num_tpu_cores __UpperCamelCase = self.hparams.train_batch_size * self.hparams.accumulate_grad_batches * num_devices return (self.dataset_size / effective_batch_size) * self.hparams.max_epochs def UpperCAmelCase ( self , __UpperCAmelCase ): '''simple docstring''' if stage == "test": __UpperCamelCase = len(self.test_dataloader().dataset ) else: __UpperCamelCase = self.get_dataloader('train' , self.hparams.train_batch_size , shuffle=__UpperCAmelCase ) __UpperCamelCase = len(self.train_dataloader().dataset ) def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = False ): '''simple docstring''' raise NotImplementedError('You must implement this for your task' ) def UpperCAmelCase ( self ): '''simple docstring''' return self.train_loader def UpperCAmelCase ( self ): '''simple docstring''' return self.get_dataloader('dev' , self.hparams.eval_batch_size , shuffle=__UpperCAmelCase ) def UpperCAmelCase ( self ): '''simple docstring''' return self.get_dataloader('test' , self.hparams.eval_batch_size , shuffle=__UpperCAmelCase ) def UpperCAmelCase ( self , __UpperCAmelCase ): '''simple docstring''' return os.path.join( self.hparams.data_dir , 'cached_{}_{}_{}'.format( __UpperCAmelCase , list(filter(__UpperCAmelCase , self.hparams.model_name_or_path.split('/' ) ) ).pop() , str(self.hparams.max_seq_length ) , ) , ) @pl.utilities.rank_zero_only def UpperCAmelCase ( self , __UpperCAmelCase ): '''simple docstring''' __UpperCamelCase = self.output_dir.joinpath('best_tfmr' ) __UpperCamelCase = self.step_count self.model.save_pretrained(__UpperCAmelCase ) self.tokenizer.save_pretrained(__UpperCAmelCase ) @staticmethod def UpperCAmelCase ( __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' parser.add_argument( '--model_name_or_path' , default=__UpperCAmelCase , type=__UpperCAmelCase , required=__UpperCAmelCase , help='Path to pretrained model or model identifier from huggingface.co/models' , ) parser.add_argument( '--config_name' , default='' , type=__UpperCAmelCase , help='Pretrained config name or path if not the same as model_name' ) parser.add_argument( '--tokenizer_name' , default=__UpperCAmelCase , type=__UpperCAmelCase , help='Pretrained tokenizer name or path if not the same as model_name' , ) parser.add_argument( '--cache_dir' , default=str(Path(__UpperCAmelCase ).parent / 'test_run' / 'cache' ) , type=__UpperCAmelCase , help='Where do you want to store the pre-trained models downloaded from huggingface.co' , ) parser.add_argument( '--encoder_layerdrop' , type=__UpperCAmelCase , help='Encoder layer dropout probability (Optional). Goes into model.config' , ) parser.add_argument( '--decoder_layerdrop' , type=__UpperCAmelCase , help='Decoder layer dropout probability (Optional). Goes into model.config' , ) parser.add_argument( '--dropout' , type=__UpperCAmelCase , help='Dropout probability (Optional). Goes into model.config' , ) parser.add_argument( '--attention_dropout' , type=__UpperCAmelCase , help='Attention dropout probability (Optional). Goes into model.config' , ) parser.add_argument('--learning_rate' , default=5E-5 , type=__UpperCAmelCase , help='The initial learning rate for Adam.' ) parser.add_argument( '--lr_scheduler' , default='linear' , choices=__UpperCAmelCase , metavar=__UpperCAmelCase , type=__UpperCAmelCase , help='Learning rate scheduler' , ) parser.add_argument('--weight_decay' , default=0.0 , type=__UpperCAmelCase , help='Weight decay if we apply some.' ) parser.add_argument('--adam_epsilon' , default=1E-8 , type=__UpperCAmelCase , help='Epsilon for Adam optimizer.' ) parser.add_argument('--warmup_steps' , default=0 , type=__UpperCAmelCase , help='Linear warmup over warmup_steps.' ) parser.add_argument('--num_workers' , default=4 , type=__UpperCAmelCase , help='kwarg passed to DataLoader' ) parser.add_argument('--num_train_epochs' , dest='max_epochs' , default=3 , type=__UpperCAmelCase ) parser.add_argument('--train_batch_size' , default=32 , type=__UpperCAmelCase ) parser.add_argument('--eval_batch_size' , default=32 , type=__UpperCAmelCase ) parser.add_argument('--adafactor' , action='store_true' ) class __lowerCAmelCase ( pl.Callback ): def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' if ( trainer.is_global_zero and trainer.global_rank == 0 ): # we initialize the retriever only on master worker with RAY. In new pytorch-lightning accelorators are removed. pl_module.model.rag.retriever.init_retrieval() # better to use hook functions. class __lowerCAmelCase ( pl.Callback ): def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' for name, param in pl_module.model.rag.named_parameters(): if param.grad is None: print(__UpperCAmelCase ) class __lowerCAmelCase ( pl.Callback ): def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' __UpperCamelCase = trainer.lr_schedulers[0]['scheduler'] __UpperCamelCase = {F'lr_group_{i}': lr for i, lr in enumerate(lr_scheduler.get_lr() )} pl_module.logger.log_metrics(__UpperCAmelCase ) def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' rank_zero_info('***** Validation results *****' ) __UpperCamelCase = trainer.callback_metrics # Log results for key in sorted(__UpperCAmelCase ): if key not in ["log", "progress_bar"]: rank_zero_info('{} = {}\n'.format(__UpperCAmelCase , str(metrics[key] ) ) ) def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' rank_zero_info('***** Test results *****' ) __UpperCamelCase = trainer.callback_metrics # Log and save results to file __UpperCamelCase = os.path.join(pl_module.hparams.output_dir , 'test_results.txt' ) with open(__UpperCAmelCase , 'w' ) as writer: for key in sorted(__UpperCAmelCase ): if key not in ["log", "progress_bar"]: rank_zero_info('{} = {}\n'.format(__UpperCAmelCase , str(metrics[key] ) ) ) writer.write('{} = {}\n'.format(__UpperCAmelCase , str(metrics[key] ) ) ) def A ( snake_case :Any , snake_case :int ) -> None: # To allow all pl args uncomment the following line # parser = pl.Trainer.add_argparse_args(parser) parser.add_argument( '--output_dir' , default=str(Path(snake_case ).parent / 'test_run' / 'model_checkpoints' ) , type=snake_case , help='The output directory where the model predictions and checkpoints will be written.' , ) parser.add_argument( '--fp16' , action='store_true' , help='Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit' , ) parser.add_argument( '--fp16_opt_level' , type=snake_case , default='O2' , help=( 'For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].' 'See details at https://nvidia.github.io/apex/amp.html' ) , ) parser.add_argument('--n_tpu_cores' , dest='tpu_cores' , type=snake_case ) parser.add_argument('--max_grad_norm' , dest='gradient_clip_val' , default=1.0 , type=snake_case , help='Max gradient norm' ) parser.add_argument('--do_train' , action='store_true' , help='Whether to run training.' ) parser.add_argument('--do_predict' , action='store_true' , help='Whether to run predictions on the test set.' ) parser.add_argument( '--gradient_accumulation_steps' , dest='accumulate_grad_batches' , type=snake_case , default=1 , help='Number of updates steps to accumulate before performing a backward/update pass.' , ) parser.add_argument('--seed' , type=snake_case , default=4_2 , help='random seed for initialization' ) parser.add_argument( '--data_dir' , default=str(Path(snake_case ).parent / 'test_run' / 'dummy-train-data' ) , type=snake_case , help='The input data dir. Should contain the training files for the CoNLL-2003 NER task.' , ) def A ( snake_case :BaseTransformer , snake_case :argparse.Namespace , snake_case :Union[str, Any]=None , snake_case :Union[str, Any]=True , snake_case :Any=[] , snake_case :Tuple=None , snake_case :List[str]=None , **snake_case :Union[str, Any] , ) -> Optional[int]: pl.seed_everything(args.seed ) # init model __UpperCamelCase = Path(model.hparams.output_dir ) odir.mkdir(exist_ok=snake_case ) # add custom checkpoints if checkpoint_callback is None: __UpperCamelCase = pl.callbacks.ModelCheckpoint( filepath=args.output_dir , prefix='checkpoint' , monitor='val_loss' , mode='min' , save_top_k=1 ) if early_stopping_callback: extra_callbacks.append(snake_case ) if logging_callback is None: __UpperCamelCase = LoggingCallback() __UpperCamelCase = {} if args.fpaa: __UpperCamelCase = 1_6 if args.gpus > 1: __UpperCamelCase = 'auto' __UpperCamelCase = 'ddp' __UpperCamelCase = args.accumulate_grad_batches __UpperCamelCase = None __UpperCamelCase = 'auto' __UpperCamelCase = pl.Trainer.from_argparse_args( snake_case , weights_summary=snake_case , callbacks=[logging_callback] + extra_callbacks + [InitCallback()] + [checkpoint_callback] , logger=snake_case , val_check_interval=1 , num_sanity_val_steps=2 , **snake_case , ) if args.do_train: trainer.fit(snake_case ) else: print('RAG modeling tests with new set functions successfuly executed!' ) return trainer
316
1
"""simple docstring""" import numpy as np class __lowerCAmelCase : def __init__( self ): '''simple docstring''' __UpperCamelCase = (0, 0) __UpperCamelCase = None __UpperCamelCase = 0 __UpperCamelCase = 0 __UpperCamelCase = 0 def __eq__( self , __UpperCAmelCase ): '''simple docstring''' return self.position == cell.position def UpperCAmelCase ( self ): '''simple docstring''' print(self.position ) class __lowerCAmelCase : def __init__( self , __UpperCAmelCase=(5, 5) ): '''simple docstring''' __UpperCamelCase = np.zeros(__UpperCAmelCase ) __UpperCamelCase = world_size[0] __UpperCamelCase = world_size[1] def UpperCAmelCase ( self ): '''simple docstring''' print(self.w ) def UpperCAmelCase ( self , __UpperCAmelCase ): '''simple docstring''' __UpperCamelCase = [ (-1, -1), (-1, 0), (-1, 1), (0, -1), (0, 1), (1, -1), (1, 0), (1, 1), ] __UpperCamelCase = cell.position[0] __UpperCamelCase = cell.position[1] __UpperCamelCase = [] for n in neughbour_cord: __UpperCamelCase = current_x + n[0] __UpperCamelCase = current_y + n[1] if 0 <= x < self.world_x_limit and 0 <= y < self.world_y_limit: __UpperCamelCase = Cell() __UpperCamelCase = (x, y) __UpperCamelCase = cell neighbours.append(__UpperCAmelCase ) return neighbours def A ( snake_case :Dict , snake_case :Tuple , snake_case :int ) -> Any: __UpperCamelCase = [] __UpperCamelCase = [] _open.append(snake_case ) while _open: __UpperCamelCase = np.argmin([n.f for n in _open] ) __UpperCamelCase = _open[min_f] _closed.append(_open.pop(snake_case ) ) if current == goal: break for n in world.get_neigbours(snake_case ): for c in _closed: if c == n: continue __UpperCamelCase = current.g + 1 __UpperCamelCase , __UpperCamelCase = n.position __UpperCamelCase , __UpperCamelCase = goal.position __UpperCamelCase = (ya - ya) ** 2 + (xa - xa) ** 2 __UpperCamelCase = n.h + n.g for c in _open: if c == n and c.f < n.f: continue _open.append(snake_case ) __UpperCamelCase = [] while current.parent is not None: path.append(current.position ) __UpperCamelCase = current.parent path.append(current.position ) return path[::-1] if __name__ == "__main__": UpperCamelCase : Optional[int] = Gridworld() # Start position and goal UpperCamelCase : List[Any] = Cell() UpperCamelCase : int = (0, 0) UpperCamelCase : str = Cell() UpperCamelCase : Optional[int] = (4, 4) print(f'''path from {start.position} to {goal.position}''') UpperCamelCase : Optional[Any] = astar(world, start, goal) # Just for visual reasons. for i in s: UpperCamelCase : Tuple = 1 print(world.w)
316
"""simple docstring""" import json from typing import TYPE_CHECKING, List, Optional, Tuple from tokenizers import pre_tokenizers from ...tokenization_utils_base import BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_gpta import GPTaTokenizer if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation UpperCamelCase : Any = logging.get_logger(__name__) UpperCamelCase : Any = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"} UpperCamelCase : Dict = { "vocab_file": { "gpt2": "https://huggingface.co/gpt2/resolve/main/vocab.json", "gpt2-medium": "https://huggingface.co/gpt2-medium/resolve/main/vocab.json", "gpt2-large": "https://huggingface.co/gpt2-large/resolve/main/vocab.json", "gpt2-xl": "https://huggingface.co/gpt2-xl/resolve/main/vocab.json", "distilgpt2": "https://huggingface.co/distilgpt2/resolve/main/vocab.json", }, "merges_file": { "gpt2": "https://huggingface.co/gpt2/resolve/main/merges.txt", "gpt2-medium": "https://huggingface.co/gpt2-medium/resolve/main/merges.txt", "gpt2-large": "https://huggingface.co/gpt2-large/resolve/main/merges.txt", "gpt2-xl": "https://huggingface.co/gpt2-xl/resolve/main/merges.txt", "distilgpt2": "https://huggingface.co/distilgpt2/resolve/main/merges.txt", }, "tokenizer_file": { "gpt2": "https://huggingface.co/gpt2/resolve/main/tokenizer.json", "gpt2-medium": "https://huggingface.co/gpt2-medium/resolve/main/tokenizer.json", "gpt2-large": "https://huggingface.co/gpt2-large/resolve/main/tokenizer.json", "gpt2-xl": "https://huggingface.co/gpt2-xl/resolve/main/tokenizer.json", "distilgpt2": "https://huggingface.co/distilgpt2/resolve/main/tokenizer.json", }, } UpperCamelCase : Dict = { "gpt2": 1_0_2_4, "gpt2-medium": 1_0_2_4, "gpt2-large": 1_0_2_4, "gpt2-xl": 1_0_2_4, "distilgpt2": 1_0_2_4, } class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): lowercase = VOCAB_FILES_NAMES lowercase = PRETRAINED_VOCAB_FILES_MAP lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase = ["input_ids", "attention_mask"] lowercase = GPTaTokenizer def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase="<|endoftext|>" , __UpperCAmelCase="<|endoftext|>" , __UpperCAmelCase="<|endoftext|>" , __UpperCAmelCase=False , **__UpperCAmelCase , ): '''simple docstring''' super().__init__( __UpperCAmelCase , __UpperCAmelCase , tokenizer_file=__UpperCAmelCase , unk_token=__UpperCAmelCase , bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase , **__UpperCAmelCase , ) __UpperCamelCase = kwargs.pop('add_bos_token' , __UpperCAmelCase ) __UpperCamelCase = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get('add_prefix_space' , __UpperCAmelCase ) != add_prefix_space: __UpperCamelCase = getattr(__UpperCAmelCase , pre_tok_state.pop('type' ) ) __UpperCamelCase = add_prefix_space __UpperCamelCase = pre_tok_class(**__UpperCAmelCase ) __UpperCamelCase = add_prefix_space def UpperCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase ): '''simple docstring''' __UpperCamelCase = kwargs.get('is_split_into_words' , __UpperCAmelCase ) assert self.add_prefix_space or not is_split_into_words, ( F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True ' "to use it with pretokenized inputs." ) return super()._batch_encode_plus(*__UpperCAmelCase , **__UpperCAmelCase ) def UpperCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase ): '''simple docstring''' __UpperCamelCase = kwargs.get('is_split_into_words' , __UpperCAmelCase ) assert self.add_prefix_space or not is_split_into_words, ( F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True ' "to use it with pretokenized inputs." ) return super()._encode_plus(*__UpperCAmelCase , **__UpperCAmelCase ) def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ): '''simple docstring''' __UpperCamelCase = self._tokenizer.model.save(__UpperCAmelCase , name=__UpperCAmelCase ) return tuple(__UpperCAmelCase ) def UpperCAmelCase ( self , __UpperCAmelCase ): '''simple docstring''' __UpperCamelCase = [] for is_user, text in conversation.iter_texts(): input_ids.extend(self.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ) + [self.eos_token_id] ) if len(__UpperCAmelCase ) > self.model_max_length: __UpperCamelCase = input_ids[-self.model_max_length :] return input_ids
316
1
"""simple docstring""" import warnings from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCamelCase : Dict = logging.get_logger(__name__) UpperCamelCase : Union[str, Any] = { "RUCAIBox/mvp": "https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json", } class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): lowercase = "mvp" lowercase = ["past_key_values"] lowercase = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"} def __init__( self , __UpperCAmelCase=5_0267 , __UpperCAmelCase=1024 , __UpperCAmelCase=12 , __UpperCAmelCase=4096 , __UpperCAmelCase=16 , __UpperCAmelCase=12 , __UpperCAmelCase=4096 , __UpperCAmelCase=16 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase="gelu" , __UpperCAmelCase=1024 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0_2 , __UpperCAmelCase=0.0 , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase=1 , __UpperCAmelCase=0 , __UpperCAmelCase=2 , __UpperCAmelCase=True , __UpperCAmelCase=2 , __UpperCAmelCase=2 , __UpperCAmelCase=False , __UpperCAmelCase=100 , __UpperCAmelCase=800 , **__UpperCAmelCase , ): '''simple docstring''' __UpperCamelCase = vocab_size __UpperCamelCase = max_position_embeddings __UpperCamelCase = d_model __UpperCamelCase = encoder_ffn_dim __UpperCamelCase = encoder_layers __UpperCamelCase = encoder_attention_heads __UpperCamelCase = decoder_ffn_dim __UpperCamelCase = decoder_layers __UpperCamelCase = decoder_attention_heads __UpperCamelCase = dropout __UpperCamelCase = attention_dropout __UpperCamelCase = activation_dropout __UpperCamelCase = activation_function __UpperCamelCase = init_std __UpperCamelCase = encoder_layerdrop __UpperCamelCase = decoder_layerdrop __UpperCamelCase = classifier_dropout __UpperCamelCase = use_cache __UpperCamelCase = encoder_layers __UpperCamelCase = scale_embedding # scale factor will be sqrt(d_model) if True __UpperCamelCase = use_prompt __UpperCamelCase = prompt_length __UpperCamelCase = prompt_mid_dim super().__init__( pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , is_encoder_decoder=__UpperCAmelCase , decoder_start_token_id=__UpperCAmelCase , forced_eos_token_id=__UpperCAmelCase , **__UpperCAmelCase , ) if self.forced_bos_token_id is None and kwargs.get('force_bos_token_to_be_generated' , __UpperCAmelCase ): __UpperCamelCase = self.bos_token_id warnings.warn( F'Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. ' 'The config can simply be saved and uploaded again to be fixed.' )
316
"""simple docstring""" import argparse from pathlib import Path import torch from packaging import version from torch.onnx import export from diffusers import AutoencoderKL UpperCamelCase : Union[str, Any] = version.parse(version.parse(torch.__version__).base_version) < version.parse("1.11") def A ( snake_case :str , snake_case :tuple , snake_case :Path , snake_case :Dict , snake_case :int , snake_case :List[str] , snake_case :Union[str, Any] , snake_case :Union[str, Any]=False , ) -> str: output_path.parent.mkdir(parents=snake_case , exist_ok=snake_case ) # PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11, # so we check the torch version for backwards compatibility if is_torch_less_than_1_11: export( snake_case , snake_case , f=output_path.as_posix() , input_names=snake_case , output_names=snake_case , dynamic_axes=snake_case , do_constant_folding=snake_case , use_external_data_format=snake_case , enable_onnx_checker=snake_case , opset_version=snake_case , ) else: export( snake_case , snake_case , f=output_path.as_posix() , input_names=snake_case , output_names=snake_case , dynamic_axes=snake_case , do_constant_folding=snake_case , opset_version=snake_case , ) @torch.no_grad() def A ( snake_case :str , snake_case :str , snake_case :int , snake_case :bool = False ) -> List[str]: __UpperCamelCase = torch.floataa if fpaa else torch.floataa if fpaa and torch.cuda.is_available(): __UpperCamelCase = 'cuda' elif fpaa and not torch.cuda.is_available(): raise ValueError('`float16` model export is only supported on GPUs with CUDA' ) else: __UpperCamelCase = 'cpu' __UpperCamelCase = Path(snake_case ) # VAE DECODER __UpperCamelCase = AutoencoderKL.from_pretrained(model_path + '/vae' ) __UpperCamelCase = vae_decoder.config.latent_channels # forward only through the decoder part __UpperCamelCase = vae_decoder.decode onnx_export( snake_case , model_args=( torch.randn(1 , snake_case , 2_5 , 2_5 ).to(device=snake_case , dtype=snake_case ), False, ) , output_path=output_path / 'vae_decoder' / 'model.onnx' , ordered_input_names=['latent_sample', 'return_dict'] , output_names=['sample'] , dynamic_axes={ 'latent_sample': {0: 'batch', 1: 'channels', 2: 'height', 3: 'width'}, } , opset=snake_case , ) del vae_decoder if __name__ == "__main__": UpperCamelCase : Dict = argparse.ArgumentParser() parser.add_argument( "--model_path", type=str, required=True, help="Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).", ) parser.add_argument("--output_path", type=str, required=True, help="Path to the output model.") parser.add_argument( "--opset", default=1_4, type=int, help="The version of the ONNX operator set to use.", ) parser.add_argument("--fp16", action="store_true", default=False, help="Export the models in `float16` mode") UpperCamelCase : List[Any] = parser.parse_args() print(args.output_path) convert_models(args.model_path, args.output_path, args.opset, args.fpaa) print("SD: Done: ONNX")
316
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) UpperCamelCase : Optional[Any] = {"configuration_xglm": ["XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "XGLMConfig"]} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase : Dict = ["XGLMTokenizer"] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase : int = ["XGLMTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase : Dict = [ "XGLM_PRETRAINED_MODEL_ARCHIVE_LIST", "XGLMForCausalLM", "XGLMModel", "XGLMPreTrainedModel", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase : Optional[Any] = [ "FlaxXGLMForCausalLM", "FlaxXGLMModel", "FlaxXGLMPreTrainedModel", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase : Optional[Any] = [ "TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST", "TFXGLMForCausalLM", "TFXGLMModel", "TFXGLMPreTrainedModel", ] if TYPE_CHECKING: from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xglm import XGLMTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xglm_fast import XGLMTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_xglm import ( TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFXGLMForCausalLM, TFXGLMModel, TFXGLMPreTrainedModel, ) else: import sys UpperCamelCase : int = _LazyModule(__name__, globals()["__file__"], _import_structure)
316
"""simple docstring""" from __future__ import annotations import string from itertools import cycle, product from pathlib import Path UpperCamelCase : str = ( string.ascii_letters + string.digits + string.punctuation + string.whitespace ) UpperCamelCase : list[int] = [ord(letter) for letter in string.ascii_lowercase] UpperCamelCase : set[int] = {ord(char) for char in VALID_CHARS} UpperCamelCase : list[str] = ["the", "be", "to", "of", "and", "in", "that", "have"] def A ( snake_case :list[int] , snake_case :tuple[int, ...] ) -> str | None: __UpperCamelCase = "" __UpperCamelCase = 42 __UpperCamelCase = 42 __UpperCamelCase = 42 for keychar, cipherchar in zip(cycle(snake_case ) , snake_case ): __UpperCamelCase = cipherchar ^ keychar if decodedchar not in VALID_INTS: return None decoded += chr(snake_case ) return decoded def A ( snake_case :list[int] ) -> list[str]: __UpperCamelCase = [] for key in product(snake_case , repeat=3 ): __UpperCamelCase = try_key(snake_case , snake_case ) if encoded is not None: possibles.append(snake_case ) return possibles def A ( snake_case :list[str] , snake_case :str ) -> list[str]: return [possible for possible in possibles if common_word in possible.lower()] def A ( snake_case :str = "p059_cipher.txt" ) -> int: __UpperCamelCase = 42 __UpperCamelCase = 42 __UpperCamelCase = 42 __UpperCamelCase = 42 __UpperCamelCase = Path(snake_case ).parent.joinpath(snake_case ).read_text(encoding='utf-8' ) __UpperCamelCase = [int(snake_case ) for number in data.strip().split(',' )] __UpperCamelCase = filter_valid_chars(snake_case ) for common_word in COMMON_WORDS: __UpperCamelCase = filter_common_word(snake_case , snake_case ) if len(snake_case ) == 1: break __UpperCamelCase = possibles[0] return sum(ord(snake_case ) for char in decoded_text ) if __name__ == "__main__": print(f'''{solution() = }''')
316
1
"""simple docstring""" import json from typing import TYPE_CHECKING, List, Optional, Tuple from tokenizers import pre_tokenizers from ...tokenization_utils_base import BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_gpta import GPTaTokenizer if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation UpperCamelCase : Any = logging.get_logger(__name__) UpperCamelCase : Any = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"} UpperCamelCase : Dict = { "vocab_file": { "gpt2": "https://huggingface.co/gpt2/resolve/main/vocab.json", "gpt2-medium": "https://huggingface.co/gpt2-medium/resolve/main/vocab.json", "gpt2-large": "https://huggingface.co/gpt2-large/resolve/main/vocab.json", "gpt2-xl": "https://huggingface.co/gpt2-xl/resolve/main/vocab.json", "distilgpt2": "https://huggingface.co/distilgpt2/resolve/main/vocab.json", }, "merges_file": { "gpt2": "https://huggingface.co/gpt2/resolve/main/merges.txt", "gpt2-medium": "https://huggingface.co/gpt2-medium/resolve/main/merges.txt", "gpt2-large": "https://huggingface.co/gpt2-large/resolve/main/merges.txt", "gpt2-xl": "https://huggingface.co/gpt2-xl/resolve/main/merges.txt", "distilgpt2": "https://huggingface.co/distilgpt2/resolve/main/merges.txt", }, "tokenizer_file": { "gpt2": "https://huggingface.co/gpt2/resolve/main/tokenizer.json", "gpt2-medium": "https://huggingface.co/gpt2-medium/resolve/main/tokenizer.json", "gpt2-large": "https://huggingface.co/gpt2-large/resolve/main/tokenizer.json", "gpt2-xl": "https://huggingface.co/gpt2-xl/resolve/main/tokenizer.json", "distilgpt2": "https://huggingface.co/distilgpt2/resolve/main/tokenizer.json", }, } UpperCamelCase : Dict = { "gpt2": 1_0_2_4, "gpt2-medium": 1_0_2_4, "gpt2-large": 1_0_2_4, "gpt2-xl": 1_0_2_4, "distilgpt2": 1_0_2_4, } class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): lowercase = VOCAB_FILES_NAMES lowercase = PRETRAINED_VOCAB_FILES_MAP lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase = ["input_ids", "attention_mask"] lowercase = GPTaTokenizer def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase="<|endoftext|>" , __UpperCAmelCase="<|endoftext|>" , __UpperCAmelCase="<|endoftext|>" , __UpperCAmelCase=False , **__UpperCAmelCase , ): '''simple docstring''' super().__init__( __UpperCAmelCase , __UpperCAmelCase , tokenizer_file=__UpperCAmelCase , unk_token=__UpperCAmelCase , bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase , **__UpperCAmelCase , ) __UpperCamelCase = kwargs.pop('add_bos_token' , __UpperCAmelCase ) __UpperCamelCase = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get('add_prefix_space' , __UpperCAmelCase ) != add_prefix_space: __UpperCamelCase = getattr(__UpperCAmelCase , pre_tok_state.pop('type' ) ) __UpperCamelCase = add_prefix_space __UpperCamelCase = pre_tok_class(**__UpperCAmelCase ) __UpperCamelCase = add_prefix_space def UpperCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase ): '''simple docstring''' __UpperCamelCase = kwargs.get('is_split_into_words' , __UpperCAmelCase ) assert self.add_prefix_space or not is_split_into_words, ( F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True ' "to use it with pretokenized inputs." ) return super()._batch_encode_plus(*__UpperCAmelCase , **__UpperCAmelCase ) def UpperCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase ): '''simple docstring''' __UpperCamelCase = kwargs.get('is_split_into_words' , __UpperCAmelCase ) assert self.add_prefix_space or not is_split_into_words, ( F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True ' "to use it with pretokenized inputs." ) return super()._encode_plus(*__UpperCAmelCase , **__UpperCAmelCase ) def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ): '''simple docstring''' __UpperCamelCase = self._tokenizer.model.save(__UpperCAmelCase , name=__UpperCAmelCase ) return tuple(__UpperCAmelCase ) def UpperCAmelCase ( self , __UpperCAmelCase ): '''simple docstring''' __UpperCamelCase = [] for is_user, text in conversation.iter_texts(): input_ids.extend(self.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ) + [self.eos_token_id] ) if len(__UpperCAmelCase ) > self.model_max_length: __UpperCamelCase = input_ids[-self.model_max_length :] return input_ids
316
"""simple docstring""" UpperCamelCase : dict[str, float] = { "km/h": 1.0, "m/s": 3.6, "mph": 1.60_93_44, "knot": 1.8_52, } UpperCamelCase : dict[str, float] = { "km/h": 1.0, "m/s": 0.2_77_77_77_78, "mph": 0.6_21_37_11_92, "knot": 0.5_39_95_68_03, } def A ( snake_case :float , snake_case :str , snake_case :str ) -> float: if unit_to not in speed_chart or unit_from not in speed_chart_inverse: __UpperCamelCase = ( f'Incorrect \'from_type\' or \'to_type\' value: {unit_from!r}, {unit_to!r}\n' f'Valid values are: {", ".join(snake_case )}' ) raise ValueError(snake_case ) return round(speed * speed_chart[unit_from] * speed_chart_inverse[unit_to] , 3 ) if __name__ == "__main__": import doctest doctest.testmod()
316
1
"""simple docstring""" from __future__ import annotations from collections import deque from collections.abc import Sequence from dataclasses import dataclass from typing import Any @dataclass class __lowerCAmelCase : lowercase = 42 lowercase = None lowercase = None def A ( ) -> Node | None: __UpperCamelCase = Node(1 ) __UpperCamelCase = Node(2 ) __UpperCamelCase = Node(3 ) __UpperCamelCase = Node(4 ) __UpperCamelCase = Node(5 ) return tree def A ( snake_case :Node | None ) -> list[int]: return [root.data, *preorder(root.left ), *preorder(root.right )] if root else [] def A ( snake_case :Node | None ) -> list[int]: return postorder(root.left ) + postorder(root.right ) + [root.data] if root else [] def A ( snake_case :Node | None ) -> list[int]: return [*inorder(root.left ), root.data, *inorder(root.right )] if root else [] def A ( snake_case :Node | None ) -> int: return (max(height(root.left ) , height(root.right ) ) + 1) if root else 0 def A ( snake_case :Node | None ) -> Sequence[Node | None]: __UpperCamelCase = [] if root is None: return output __UpperCamelCase = deque([root] ) while process_queue: __UpperCamelCase = process_queue.popleft() output.append(node.data ) if node.left: process_queue.append(node.left ) if node.right: process_queue.append(node.right ) return output def A ( snake_case :Node | None , snake_case :int ) -> Sequence[Node | None]: __UpperCamelCase = [] def populate_output(snake_case :Node | None , snake_case :int ) -> None: if not root: return if level == 1: output.append(root.data ) elif level > 1: populate_output(root.left , level - 1 ) populate_output(root.right , level - 1 ) populate_output(snake_case , snake_case ) return output def A ( snake_case :Node | None , snake_case :int ) -> Sequence[Node | None]: __UpperCamelCase = [] def populate_output(snake_case :Node | None , snake_case :int ) -> None: if root is None: return if level == 1: output.append(root.data ) elif level > 1: populate_output(root.right , level - 1 ) populate_output(root.left , level - 1 ) populate_output(snake_case , snake_case ) return output def A ( snake_case :Node | None ) -> Sequence[Node | None] | list[Any]: if root is None: return [] __UpperCamelCase = [] __UpperCamelCase = 0 __UpperCamelCase = height(snake_case ) for h in range(1 , height_tree + 1 ): if not flag: output.append(get_nodes_from_left_to_right(snake_case , snake_case ) ) __UpperCamelCase = 1 else: output.append(get_nodes_from_right_to_left(snake_case , snake_case ) ) __UpperCamelCase = 0 return output def A ( ) -> None: # Main function for testing. __UpperCamelCase = make_tree() print(f'In-order Traversal: {inorder(snake_case )}' ) print(f'Pre-order Traversal: {preorder(snake_case )}' ) print(f'Post-order Traversal: {postorder(snake_case )}' , '\n' ) print(f'Height of Tree: {height(snake_case )}' , '\n' ) print('Complete Level Order Traversal: ' ) print(level_order(snake_case ) , '\n' ) print('Level-wise order Traversal: ' ) for level in range(1 , height(snake_case ) + 1 ): print(f'Level {level}:' , get_nodes_from_left_to_right(snake_case , level=snake_case ) ) print('\nZigZag order Traversal: ' ) print(zigzag(snake_case ) ) if __name__ == "__main__": import doctest doctest.testmod() main()
316
"""simple docstring""" import random import unittest import torch from diffusers import IFInpaintingSuperResolutionPipeline from diffusers.utils import floats_tensor from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import skip_mps, torch_device from ..pipeline_params import ( TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS, ) from ..test_pipelines_common import PipelineTesterMixin from . import IFPipelineTesterMixin @skip_mps class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ): lowercase = IFInpaintingSuperResolutionPipeline lowercase = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"width", "height"} lowercase = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({"original_image"} ) lowercase = PipelineTesterMixin.required_optional_params - {"latents"} def UpperCAmelCase ( self ): '''simple docstring''' return self._get_superresolution_dummy_components() def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase=0 ): '''simple docstring''' if str(__UpperCAmelCase ).startswith('mps' ): __UpperCamelCase = torch.manual_seed(__UpperCAmelCase ) else: __UpperCamelCase = torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase ) __UpperCamelCase = floats_tensor((1, 3, 16, 16) , rng=random.Random(__UpperCAmelCase ) ).to(__UpperCAmelCase ) __UpperCamelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(__UpperCAmelCase ) ).to(__UpperCAmelCase ) __UpperCamelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(__UpperCAmelCase ) ).to(__UpperCAmelCase ) __UpperCamelCase = { 'prompt': 'A painting of a squirrel eating a burger', 'image': image, 'original_image': original_image, 'mask_image': mask_image, 'generator': generator, 'num_inference_steps': 2, 'output_type': 'numpy', } return inputs @unittest.skipIf( torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , ) def UpperCAmelCase ( self ): '''simple docstring''' self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 ) def UpperCAmelCase ( self ): '''simple docstring''' self._test_save_load_optional_components() @unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA' ) def UpperCAmelCase ( self ): '''simple docstring''' super().test_save_load_floataa(expected_max_diff=1E-1 ) def UpperCAmelCase ( self ): '''simple docstring''' self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 ) def UpperCAmelCase ( self ): '''simple docstring''' self._test_save_load_local() def UpperCAmelCase ( self ): '''simple docstring''' self._test_inference_batch_single_identical( expected_max_diff=1E-2 , )
316
1
"""simple docstring""" import unittest from parameterized import parameterized from transformers import LlamaConfig, is_torch_available, set_seed from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer class __lowerCAmelCase : def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase=99 , __UpperCAmelCase=32 , __UpperCAmelCase=5 , __UpperCAmelCase=4 , __UpperCAmelCase=37 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=512 , __UpperCAmelCase=16 , __UpperCAmelCase=2 , __UpperCAmelCase=0.0_2 , __UpperCAmelCase=3 , __UpperCAmelCase=4 , __UpperCAmelCase=None , ): '''simple docstring''' __UpperCamelCase = parent __UpperCamelCase = batch_size __UpperCamelCase = seq_length __UpperCamelCase = is_training __UpperCamelCase = use_input_mask __UpperCamelCase = use_token_type_ids __UpperCamelCase = use_labels __UpperCamelCase = vocab_size __UpperCamelCase = hidden_size __UpperCamelCase = num_hidden_layers __UpperCamelCase = num_attention_heads __UpperCamelCase = intermediate_size __UpperCamelCase = hidden_act __UpperCamelCase = hidden_dropout_prob __UpperCamelCase = attention_probs_dropout_prob __UpperCamelCase = max_position_embeddings __UpperCamelCase = type_vocab_size __UpperCamelCase = type_sequence_label_size __UpperCamelCase = initializer_range __UpperCamelCase = num_labels __UpperCamelCase = num_choices __UpperCamelCase = scope def UpperCAmelCase ( self ): '''simple docstring''' __UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __UpperCamelCase = None if self.use_input_mask: __UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] ) __UpperCamelCase = None if self.use_token_type_ids: __UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __UpperCamelCase = None __UpperCamelCase = None __UpperCamelCase = None if self.use_labels: __UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __UpperCamelCase = ids_tensor([self.batch_size] , self.num_choices ) __UpperCamelCase = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def UpperCAmelCase ( self ): '''simple docstring''' return LlamaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__UpperCAmelCase , initializer_range=self.initializer_range , ) def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' __UpperCamelCase = LlamaModel(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __UpperCamelCase = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase ) __UpperCamelCase = model(__UpperCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ): '''simple docstring''' __UpperCamelCase = True __UpperCamelCase = LlamaModel(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __UpperCamelCase = model( __UpperCAmelCase , attention_mask=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , encoder_attention_mask=__UpperCAmelCase , ) __UpperCamelCase = model( __UpperCAmelCase , attention_mask=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , ) __UpperCamelCase = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ): '''simple docstring''' __UpperCamelCase = LlamaForCausalLM(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __UpperCamelCase = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , labels=__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ): '''simple docstring''' __UpperCamelCase = True __UpperCamelCase = True __UpperCamelCase = LlamaForCausalLM(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() # first forward pass __UpperCamelCase = model( __UpperCAmelCase , attention_mask=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , encoder_attention_mask=__UpperCAmelCase , use_cache=__UpperCAmelCase , ) __UpperCamelCase = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids __UpperCamelCase = ids_tensor((self.batch_size, 3) , config.vocab_size ) __UpperCamelCase = ids_tensor((self.batch_size, 3) , vocab_size=2 ) # append to next input_ids and __UpperCamelCase = torch.cat([input_ids, next_tokens] , dim=-1 ) __UpperCamelCase = torch.cat([input_mask, next_mask] , dim=-1 ) __UpperCamelCase = model( __UpperCAmelCase , attention_mask=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , encoder_attention_mask=__UpperCAmelCase , output_hidden_states=__UpperCAmelCase , )['hidden_states'][0] __UpperCamelCase = model( __UpperCAmelCase , attention_mask=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , encoder_attention_mask=__UpperCAmelCase , past_key_values=__UpperCAmelCase , output_hidden_states=__UpperCAmelCase , )['hidden_states'][0] # select random slice __UpperCamelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item() __UpperCamelCase = output_from_no_past[:, -3:, random_slice_idx].detach() __UpperCamelCase = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-3 ) ) def UpperCAmelCase ( self ): '''simple docstring''' __UpperCamelCase = self.prepare_config_and_inputs() ( ( __UpperCamelCase ) , ( __UpperCamelCase ) , ( __UpperCamelCase ) , ( __UpperCamelCase ) , ( __UpperCamelCase ) , ( __UpperCamelCase ) , ( __UpperCamelCase ) , ) = config_and_inputs __UpperCamelCase = {'input_ids': input_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ): lowercase = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else () lowercase = (LlamaForCausalLM,) if is_torch_available() else () lowercase = ( { "feature-extraction": LlamaModel, "text-classification": LlamaForSequenceClassification, "text-generation": LlamaForCausalLM, "zero-shot": LlamaForSequenceClassification, } if is_torch_available() else {} ) lowercase = False lowercase = False def UpperCAmelCase ( self ): '''simple docstring''' __UpperCamelCase = LlamaModelTester(self ) __UpperCamelCase = ConfigTester(self , config_class=__UpperCAmelCase , hidden_size=37 ) def UpperCAmelCase ( self ): '''simple docstring''' self.config_tester.run_common_tests() def UpperCAmelCase ( self ): '''simple docstring''' __UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__UpperCAmelCase ) def UpperCAmelCase ( self ): '''simple docstring''' __UpperCamelCase = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: __UpperCamelCase = type self.model_tester.create_and_check_model(*__UpperCAmelCase ) def UpperCAmelCase ( self ): '''simple docstring''' __UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() __UpperCamelCase = 3 __UpperCamelCase = input_dict['input_ids'] __UpperCamelCase = input_ids.ne(1 ).to(__UpperCAmelCase ) __UpperCamelCase = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size ) __UpperCamelCase = LlamaForSequenceClassification(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __UpperCamelCase = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , labels=__UpperCAmelCase ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def UpperCAmelCase ( self ): '''simple docstring''' __UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() __UpperCamelCase = 3 __UpperCamelCase = 'single_label_classification' __UpperCamelCase = input_dict['input_ids'] __UpperCamelCase = input_ids.ne(1 ).to(__UpperCAmelCase ) __UpperCamelCase = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size ) __UpperCamelCase = LlamaForSequenceClassification(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __UpperCamelCase = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , labels=__UpperCAmelCase ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def UpperCAmelCase ( self ): '''simple docstring''' __UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() __UpperCamelCase = 3 __UpperCamelCase = 'multi_label_classification' __UpperCamelCase = input_dict['input_ids'] __UpperCamelCase = input_ids.ne(1 ).to(__UpperCAmelCase ) __UpperCamelCase = ids_tensor( [self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float ) __UpperCamelCase = LlamaForSequenceClassification(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __UpperCamelCase = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , labels=__UpperCAmelCase ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) @unittest.skip('LLaMA buffers include complex numbers, which breaks this test' ) def UpperCAmelCase ( self ): '''simple docstring''' pass @parameterized.expand([('linear',), ('dynamic',)] ) def UpperCAmelCase ( self , __UpperCAmelCase ): '''simple docstring''' __UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() __UpperCamelCase = ids_tensor([1, 10] , config.vocab_size ) __UpperCamelCase = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size ) set_seed(42 ) # Fixed seed at init time so the two models get the same random weights __UpperCamelCase = LlamaModel(__UpperCAmelCase ) original_model.to(__UpperCAmelCase ) original_model.eval() __UpperCamelCase = original_model(__UpperCAmelCase ).last_hidden_state __UpperCamelCase = original_model(__UpperCAmelCase ).last_hidden_state set_seed(42 ) # Fixed seed at init time so the two models get the same random weights __UpperCamelCase = {'type': scaling_type, 'factor': 1_0.0} __UpperCamelCase = LlamaModel(__UpperCAmelCase ) scaled_model.to(__UpperCAmelCase ) scaled_model.eval() __UpperCamelCase = scaled_model(__UpperCAmelCase ).last_hidden_state __UpperCamelCase = scaled_model(__UpperCAmelCase ).last_hidden_state # Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original # maximum sequence length, so the outputs for the short input should match. if scaling_type == "dynamic": self.assertTrue(torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-5 ) ) else: self.assertFalse(torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-5 ) ) # The output should be different for long inputs self.assertFalse(torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-5 ) ) @require_torch class __lowerCAmelCase ( unittest.TestCase ): @unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' ) @slow def UpperCAmelCase ( self ): '''simple docstring''' __UpperCamelCase = [1, 306, 4658, 278, 6593, 310, 2834, 338] __UpperCamelCase = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-7b-hf' , device_map='auto' ) __UpperCamelCase = model(torch.tensor([input_ids] ) ) # Expected mean on dim = -1 __UpperCamelCase = torch.tensor([[-6.6_5_5_0, -4.1_2_2_7, -4.9_8_5_9, -3.2_4_0_6, 0.8_2_6_2, -3.0_0_3_3, 1.2_9_6_4, -3.3_6_9_9]] ) torch.testing.assert_close(out.mean(-1 ) , __UpperCAmelCase , atol=1E-2 , rtol=1E-2 ) # slicing logits[0, 0, 0:30] # fmt: off __UpperCamelCase = torch.tensor([-1_2.8_2_8_1, -7.4_4_5_3, -0.4_6_3_9, -8.0_6_2_5, -7.2_5_0_0, -8.0_0_0_0, -6.4_8_8_3, -7.7_6_9_5, -7.8_4_3_8, -7.0_3_1_2, -6.2_1_8_8, -7.1_3_2_8, -1.8_4_9_6, 1.9_9_6_1, -8.6_2_5_0, -6.7_2_2_7, -1_2.8_2_8_1, -6.9_4_9_2, -7.0_7_4_2, -7.7_8_5_2, -7.5_8_2_0, -7.9_0_6_2, -6.9_3_7_5, -7.9_8_0_5, -8.3_4_3_8, -8.1_5_6_2, -8.0_4_6_9, -7.6_2_5_0, -7.7_4_2_2, -7.3_3_9_8,] ) # fmt: on torch.testing.assert_close(out[0, 0, :30] , __UpperCAmelCase , atol=1E-5 , rtol=1E-5 ) @unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' ) @slow def UpperCAmelCase ( self ): '''simple docstring''' __UpperCamelCase = [1, 306, 4658, 278, 6593, 310, 2834, 338] __UpperCamelCase = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-13b-hf' , device_map='auto' ) __UpperCamelCase = model(torch.tensor(__UpperCAmelCase ) ) # Expected mean on dim = -1 __UpperCamelCase = torch.tensor([[-2.0_6_2_2, -1.2_7_9_4, -1.1_6_3_8, -0.9_7_8_8, -1.4_6_0_3, -1.0_2_3_8, -1.7_8_9_3, -1.4_4_1_1]] ) torch.testing.assert_close(out.mean(-1 ) , __UpperCAmelCase , atol=1E-2 , rtol=1E-2 ) # slicing logits[0, 0, 0:30] # fmt: off __UpperCamelCase = torch.tensor([-8.1_4_0_6, -8.0_5_4_7, 2.7_4_6_1, -1.2_3_4_4, -0.1_4_4_8, -1.8_2_6_2, -1.0_0_2_0, -1.8_1_5_4, -1.6_8_9_5, -1.8_5_1_6, -2.3_5_7_4, -0.9_2_7_7, 3.7_5_9_8, 6.5_7_4_2, -1.2_9_9_8, -0.1_1_7_7, -8.1_4_0_6, -2.9_6_8_8, -2.9_1_9_9, -3.1_6_9_9, -3.5_2_5_4, -2.3_5_5_5, -2.7_9_8_8, -3.4_1_4_1, -2.8_2_6_2, -4.5_1_9_5, -3.3_3_7_9, -3.3_1_6_4, -2.7_8_3_2, -3.0_2_7_3] ) # fmt: on torch.testing.assert_close(out[0, 0, :30] , __UpperCAmelCase , atol=1E-5 , rtol=1E-5 ) @unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' ) @slow def UpperCAmelCase ( self ): '''simple docstring''' __UpperCamelCase = [1, 306, 4658, 278, 6593, 310, 2834, 338] __UpperCamelCase = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-13b-chat-hf' , device_map='auto' ) __UpperCamelCase = model(torch.tensor(__UpperCAmelCase ) ) # Expected mean on dim = -1 __UpperCamelCase = torch.tensor([[-0.8_5_6_2, -1.8_5_2_0, -0.7_5_5_1, -0.4_1_6_2, -1.5_1_6_1, -1.2_0_3_8, -2.4_8_2_3, -2.3_2_5_4]] ) torch.testing.assert_close(out.mean(-1 ) , __UpperCAmelCase , atol=1E-2 , rtol=1E-2 ) # slicing logits[0, 0, 0:30] # fmt: off __UpperCamelCase = torch.tensor([-2.2_2_2_7, 4.8_8_2_8, 0.9_0_2_3, -0.4_5_7_8, -0.7_8_7_1, -0.1_0_3_3, -0.6_2_2_1, -0.5_7_8_6, -0.7_8_0_3, -1.0_6_7_4, -1.2_9_2_0, -0.1_5_7_0, 0.8_0_0_8, 2.0_7_2_3, -0.9_4_9_7, 0.2_7_7_1, -2.2_2_2_7, -0.7_6_1_2, -1.4_3_4_6, -1.2_0_6_1, -1.6_4_2_6, -0.3_0_0_0, -0.7_1_3_9, -1.1_9_3_4, -1.8_6_9_1, -1.6_9_7_3, -1.5_9_4_7, -1.2_7_0_5, -0.3_5_2_3, -0.5_5_1_3] ) # fmt: on torch.testing.assert_close(out.mean(-1 ) , __UpperCAmelCase , atol=1E-2 , rtol=1E-2 ) @unittest.skip( 'Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test' ) @slow def UpperCAmelCase ( self ): '''simple docstring''' __UpperCamelCase = [1, 306, 4658, 278, 6593, 310, 2834, 338] __UpperCamelCase = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-70b-hf' , device_map='auto' ) __UpperCamelCase = model(torch.tensor(__UpperCAmelCase ) ) __UpperCamelCase = torch.tensor( [[-4.2_3_2_7, -3.3_3_6_0, -4.6_6_6_5, -4.7_6_3_1, -1.8_1_8_0, -3.4_1_7_0, -1.4_2_1_1, -3.1_8_1_0]] , dtype=torch.floataa ) torch.testing.assert_close(out.mean(-1 ) , __UpperCAmelCase , atol=1E-2 , rtol=1E-2 ) # fmt: off __UpperCamelCase = torch.tensor([-9.4_9_2_2, -3.9_5_5_1, 1.7_9_9_8, -5.6_7_5_8, -5.1_0_5_5, -5.8_9_8_4, -4.8_3_2_0, -6.8_0_8_6, -6.5_3_9_1, -5.6_1_7_2, -5.5_8_2_0, -5.5_3_5_2, 1.7_8_8_1, 3.6_2_8_9, -6.5_1_1_7, -3.4_7_8_5, -9.5_0_0_0, -6.0_3_5_2, -6.8_1_2_5, -6.0_1_9_5, -6.6_8_3_6, -5.4_7_2_7, -6.2_8_1_2, -6.0_3_9_1, -7.3_3_9_8, -7.4_2_9_7, -7.4_8_4_4, -6.5_8_2_0, -5.8_7_8_9, -5.5_3_1_2] ) # fmt: on torch.testing.assert_close(out[0, 0, :30] , __UpperCAmelCase , atol=1E-5 , rtol=1E-5 ) @unittest.skip('Model is curently gated' ) @slow def UpperCAmelCase ( self ): '''simple docstring''' __UpperCamelCase = 'Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the "princi' __UpperCamelCase = 'Simply put, the theory of relativity states that ' __UpperCamelCase = LlamaTokenizer.from_pretrained('meta-llama/Llama-2-13b-chat-hf' ) __UpperCamelCase = tokenizer.encode(__UpperCAmelCase , return_tensors='pt' ) __UpperCamelCase = LlamaForCausalLM.from_pretrained( 'meta-llama/Llama-2-13b-chat-hf' , device_map='sequential' , use_safetensors=__UpperCAmelCase ) # greedy generation outputs __UpperCamelCase = model.generate(__UpperCAmelCase , max_new_tokens=64 , top_p=__UpperCAmelCase , temperature=1 , do_sample=__UpperCAmelCase ) __UpperCamelCase = tokenizer.decode(generated_ids[0] , skip_special_tokens=__UpperCAmelCase ) self.assertEqual(__UpperCAmelCase , __UpperCAmelCase )
316
"""simple docstring""" def A ( snake_case :int ) -> int: __UpperCamelCase = [1] __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = 0, 0, 0 __UpperCamelCase = ugly_nums[ia] * 2 __UpperCamelCase = ugly_nums[ia] * 3 __UpperCamelCase = ugly_nums[ia] * 5 for _ in range(1 , snake_case ): __UpperCamelCase = min(snake_case , snake_case , snake_case ) ugly_nums.append(snake_case ) if next_num == next_a: ia += 1 __UpperCamelCase = ugly_nums[ia] * 2 if next_num == next_a: ia += 1 __UpperCamelCase = ugly_nums[ia] * 3 if next_num == next_a: ia += 1 __UpperCamelCase = ugly_nums[ia] * 5 return ugly_nums[-1] if __name__ == "__main__": from doctest import testmod testmod(verbose=True) print(f'''{ugly_numbers(2_0_0) = }''')
316
1