code
stringlengths
87
55.2k
code_codestyle
int64
0
349
style_context
stringlengths
135
49.1k
style_context_codestyle
int64
0
349
label
int64
0
1
import comet # From: unbabel-comet import torch import datasets _SCREAMING_SNAKE_CASE = datasets.logging.get_logger(__name__) _SCREAMING_SNAKE_CASE = """\ @inproceedings{rei-EtAl:2020:WMT, author = {Rei, Ricardo and Stewart, Craig and Farinha, Ana C and Lavie, Alon}, title = {Unbabel's Participation in the WMT20 Metrics Shared Task}, booktitle = {Proceedings of the Fifth Conference on Machine Translation}, month = {November}, year = {2020}, address = {Online}, publisher = {Association for Computational Linguistics}, pages = {909--918}, } @inproceedings{rei-etal-2020-comet, title = \"{COMET}: A Neural Framework for {MT} Evaluation\", author = \"Rei, Ricardo and Stewart, Craig and Farinha, Ana C and Lavie, Alon\", booktitle = \"Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)\", month = nov, year = \"2020\", address = \"Online\", publisher = \"Association for Computational Linguistics\", url = \"https://www.aclweb.org/anthology/2020.emnlp-main.213\", pages = \"2685--2702\", } """ _SCREAMING_SNAKE_CASE = """\ Crosslingual Optimized Metric for Evaluation of Translation (COMET) is an open-source framework used to train Machine Translation metrics that achieve high levels of correlation with different types of human judgments (HTER, DA's or MQM). With the release of the framework the authors also released fully trained models that were used to compete in the WMT20 Metrics Shared Task achieving SOTA in that years competition. See the [README.md] file at https://unbabel.github.io/COMET/html/models.html for more information. """ _SCREAMING_SNAKE_CASE = """ COMET score. Args: `sources` (list of str): Source sentences `predictions` (list of str): candidate translations `references` (list of str): reference translations `cuda` (bool): If set to True, runs COMET using GPU `show_progress` (bool): Shows progress `model`: COMET model to be used. Will default to `wmt-large-da-estimator-1719` if None. Returns: `samples`: List of dictionaries with `src`, `mt`, `ref` and `score`. `scores`: List of scores. Examples: >>> comet_metric = datasets.load_metric('comet') >>> # comet_metric = load_metric('comet', 'wmt20-comet-da') # you can also choose which model to use >>> source = [\"Dem Feuer konnte Einhalt geboten werden\", \"Schulen und Kindergärten wurden eröffnet.\"] >>> hypothesis = [\"The fire could be stopped\", \"Schools and kindergartens were open\"] >>> reference = [\"They were able to control the fire.\", \"Schools and kindergartens opened\"] >>> results = comet_metric.compute(predictions=hypothesis, references=reference, sources=source) >>> print([round(v, 2) for v in results[\"scores\"]]) [0.19, 0.92] """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class SCREAMING_SNAKE_CASE_ ( datasets.Metric ): def UpperCAmelCase_ ( self : List[str] ) -> Union[str, Any]: """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , homepage='https://unbabel.github.io/COMET/html/index.html' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { 'sources': datasets.Value('string' , id='sequence' ), 'predictions': datasets.Value('string' , id='sequence' ), 'references': datasets.Value('string' , id='sequence' ), } ) , codebase_urls=['https://github.com/Unbabel/COMET'] , reference_urls=[ 'https://github.com/Unbabel/COMET', 'https://www.aclweb.org/anthology/2020.emnlp-main.213/', 'http://www.statmt.org/wmt20/pdf/2020.wmt-1.101.pdf6', ] , ) def UpperCAmelCase_ ( self : int , _A : int ) -> Dict: """simple docstring""" if self.config_name == "default": snake_case_ : Tuple = comet.load_from_checkpoint(comet.download_model('wmt20-comet-da' ) ) else: snake_case_ : Union[str, Any] = comet.load_from_checkpoint(comet.download_model(self.config_name ) ) def UpperCAmelCase_ ( self : Tuple , _A : Union[str, Any] , _A : Tuple , _A : List[str] , _A : Any=None , _A : Union[str, Any]=False ) -> List[Any]: """simple docstring""" if gpus is None: snake_case_ : Optional[int] = 1 if torch.cuda.is_available() else 0 snake_case_ : List[str] = {'src': sources, 'mt': predictions, 'ref': references} snake_case_ : Optional[Any] = [dict(zip(_A , _A ) ) for t in zip(*data.values() )] snake_case_ ,snake_case_ : Optional[Any] = self.scorer.predict(_A , gpus=_A , progress_bar=_A ) return {"mean_score": mean_score, "scores": scores}
327
import tempfile import unittest from transformers import AutoModelForSeqaSeqLM, AutoTokenizer from transformers.testing_utils import ( is_torch_available, require_optimum, require_torch, slow, ) if is_torch_available(): import torch @require_torch @require_optimum @slow class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ): def UpperCAmelCase_ ( self : Dict ) -> List[Any]: """simple docstring""" snake_case_ : Any = 'hf-internal-testing/tiny-random-t5' snake_case_ : Optional[Any] = AutoTokenizer.from_pretrained(_A ) snake_case_ : Optional[int] = AutoModelForSeqaSeqLM.from_pretrained(_A ) snake_case_ : List[Any] = tokenizer('This is me' , return_tensors='pt' ) snake_case_ : Any = model.to_bettertransformer() self.assertTrue(any('BetterTransformer' in mod.__class__.__name__ for _, mod in model.named_modules() ) ) snake_case_ : Optional[Any] = model.generate(**_A ) snake_case_ : int = model.reverse_bettertransformer() self.assertFalse(any('BetterTransformer' in mod.__class__.__name__ for _, mod in model.named_modules() ) ) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(_A ) snake_case_ : Tuple = AutoModelForSeqaSeqLM.from_pretrained(_A ) self.assertFalse( any('BetterTransformer' in mod.__class__.__name__ for _, mod in model_reloaded.named_modules() ) ) snake_case_ : Optional[Any] = model_reloaded.generate(**_A ) self.assertTrue(torch.allclose(_A , _A ) ) def UpperCAmelCase_ ( self : Optional[Any] ) -> Tuple: """simple docstring""" snake_case_ : Any = 'hf-internal-testing/tiny-random-t5' snake_case_ : int = AutoModelForSeqaSeqLM.from_pretrained(_A ) snake_case_ : Dict = model.to_bettertransformer() with tempfile.TemporaryDirectory() as tmpdirname: with self.assertRaises(_A ): model.save_pretrained(_A ) snake_case_ : Union[str, Any] = model.reverse_bettertransformer() model.save_pretrained(_A )
327
1
def SCREAMING_SNAKE_CASE__ ( __a ): snake_case_ : str = 0 for ch in input_str: snake_case_ : Union[str, Any] = ord(__a ) snake_case_ : Union[str, Any] = pow(2 , __a ) # If we already turned on bit for current character's unicode if bitmap >> ch_unicode & 1 == 1: return False bitmap |= ch_bit_index_on return True if __name__ == "__main__": import doctest doctest.testmod()
327
import numpy as np import torch from torch.utils.data import Dataset from utils import logger class SCREAMING_SNAKE_CASE_ ( snake_case_ ): def __init__( self : Union[str, Any] , _A : Any , _A : Dict ) -> Union[str, Any]: """simple docstring""" snake_case_ : str = params snake_case_ : int = np.array(_A ) snake_case_ : Optional[int] = np.array([len(_A ) for t in data] ) self.check() self.remove_long_sequences() self.remove_empty_sequences() self.remove_unknown_sequences() self.check() self.print_statistics() def __getitem__( self : Tuple , _A : Optional[int] ) -> str: """simple docstring""" return (self.token_ids[index], self.lengths[index]) def __len__( self : List[str] ) -> str: """simple docstring""" return len(self.lengths ) def UpperCAmelCase_ ( self : Dict ) -> str: """simple docstring""" assert len(self.token_ids ) == len(self.lengths ) assert all(self.lengths[i] == len(self.token_ids[i] ) for i in range(len(self.lengths ) ) ) def UpperCAmelCase_ ( self : Any ) -> Optional[Any]: """simple docstring""" snake_case_ : Dict = self.params.max_model_input_size snake_case_ : Tuple = self.lengths > max_len logger.info(F"""Splitting {sum(_A )} too long sequences.""" ) def divide_chunks(_A : Union[str, Any] , _A : Dict ): return [l[i : i + n] for i in range(0 , len(_A ) , _A )] snake_case_ : Dict = [] snake_case_ : Union[str, Any] = [] if self.params.mlm: snake_case_ ,snake_case_ : Optional[int] = self.params.special_tok_ids['cls_token'], self.params.special_tok_ids['sep_token'] else: snake_case_ ,snake_case_ : Any = self.params.special_tok_ids['bos_token'], self.params.special_tok_ids['eos_token'] for seq_, len_ in zip(self.token_ids , self.lengths ): assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_ if len_ <= max_len: new_tok_ids.append(seq_ ) new_lengths.append(len_ ) else: snake_case_ : List[Any] = [] for sub_s in divide_chunks(seq_ , max_len - 2 ): if sub_s[0] != cls_id: snake_case_ : Optional[int] = np.insert(_A , 0 , _A ) if sub_s[-1] != sep_id: snake_case_ : Optional[Any] = np.insert(_A , len(_A ) , _A ) assert len(_A ) <= max_len assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s sub_seqs.append(_A ) new_tok_ids.extend(_A ) new_lengths.extend([len(_A ) for l in sub_seqs] ) snake_case_ : Tuple = np.array(_A ) snake_case_ : int = np.array(_A ) def UpperCAmelCase_ ( self : List[str] ) -> List[str]: """simple docstring""" snake_case_ : Tuple = len(self ) snake_case_ : int = self.lengths > 11 snake_case_ : Dict = self.token_ids[indices] snake_case_ : int = self.lengths[indices] snake_case_ : List[Any] = len(self ) logger.info(F"""Remove {init_size - new_size} too short (<=11 tokens) sequences.""" ) def UpperCAmelCase_ ( self : Union[str, Any] ) -> int: """simple docstring""" if "unk_token" not in self.params.special_tok_ids: return else: snake_case_ : Optional[Any] = self.params.special_tok_ids['unk_token'] snake_case_ : Dict = len(self ) snake_case_ : str = np.array([np.count_nonzero(a == unk_token_id ) for a in self.token_ids] ) snake_case_ : Any = (unk_occs / self.lengths) < 0.5 snake_case_ : List[Any] = self.token_ids[indices] snake_case_ : int = self.lengths[indices] snake_case_ : Tuple = len(self ) logger.info(F"""Remove {init_size - new_size} sequences with a high level of unknown tokens (50%).""" ) def UpperCAmelCase_ ( self : Union[str, Any] ) -> Any: """simple docstring""" if not self.params.is_master: return logger.info(F"""{len(self )} sequences""" ) # data_len = sum(self.lengths) # nb_unique_tokens = len(Counter(list(chain(*self.token_ids)))) # logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)') # unk_idx = self.params.special_tok_ids['unk_token'] # nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids]) # logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)') def UpperCAmelCase_ ( self : Optional[int] , _A : Union[str, Any] ) -> List[Any]: """simple docstring""" snake_case_ : Any = [t[0] for t in batch] snake_case_ : int = [t[1] for t in batch] assert len(_A ) == len(_A ) # Max for paddings snake_case_ : str = max(_A ) # Pad token ids if self.params.mlm: snake_case_ : int = self.params.special_tok_ids['pad_token'] else: snake_case_ : Dict = self.params.special_tok_ids['unk_token'] snake_case_ : Dict = [list(t.astype(_A ) ) + [pad_idx] * (max_seq_len_ - len(_A )) for t in token_ids] assert len(tk_ ) == len(_A ) assert all(len(_A ) == max_seq_len_ for t in tk_ ) snake_case_ : Any = torch.tensor(tk_ ) # (bs, max_seq_len_) snake_case_ : Optional[Any] = torch.tensor(_A ) # (bs) return tk_t, lg_t
327
1
import argparse import gdown import numpy as np import torch from huggingface_hub import hf_hub_download from transformers import ( CLIPTokenizer, CLIPTokenizerFast, VideoMAEImageProcessor, XCLIPConfig, XCLIPModel, XCLIPProcessor, XCLIPTextConfig, XCLIPVisionConfig, ) def SCREAMING_SNAKE_CASE__ ( __a , __a ): snake_case_ : List[Any] = XCLIPTextConfig() # derive patch size from model name snake_case_ : Tuple = model_name.find('patch' ) snake_case_ : Tuple = int(model_name[start_idx + len('patch' ) : start_idx + len('patch' ) + 2] ) snake_case_ : int = XCLIPVisionConfig(patch_size=__a , num_frames=__a ) if "large" in model_name: snake_case_ : Dict = 7_68 snake_case_ : List[str] = 30_72 snake_case_ : Union[str, Any] = 12 snake_case_ : Optional[int] = 10_24 snake_case_ : str = 40_96 snake_case_ : Dict = 16 snake_case_ : Optional[int] = 24 snake_case_ : Union[str, Any] = 7_68 snake_case_ : str = 30_72 if model_name == "xclip-large-patch14-16-frames": snake_case_ : Optional[int] = 3_36 snake_case_ : Optional[Any] = XCLIPConfig.from_text_vision_configs(__a , __a ) if "large" in model_name: snake_case_ : int = 7_68 return config def SCREAMING_SNAKE_CASE__ ( __a ): # text encoder if name == "token_embedding.weight": snake_case_ : List[Any] = name.replace('token_embedding.weight' , 'text_model.embeddings.token_embedding.weight' ) if name == "positional_embedding": snake_case_ : List[str] = name.replace('positional_embedding' , 'text_model.embeddings.position_embedding.weight' ) if "ln_1" in name: snake_case_ : Tuple = name.replace('ln_1' , 'layer_norm1' ) if "ln_2" in name: snake_case_ : Optional[Any] = name.replace('ln_2' , 'layer_norm2' ) if "c_fc" in name: snake_case_ : str = name.replace('c_fc' , 'fc1' ) if "c_proj" in name: snake_case_ : Optional[int] = name.replace('c_proj' , 'fc2' ) if name.startswith('transformer.resblocks' ): snake_case_ : int = name.replace('transformer.resblocks' , 'text_model.encoder.layers' ) if "attn.out_proj" in name and "message" not in name: snake_case_ : Tuple = name.replace('attn.out_proj' , 'self_attn.out_proj' ) if "ln_final" in name: snake_case_ : Union[str, Any] = name.replace('ln_final' , 'text_model.final_layer_norm' ) # visual encoder if name == "visual.class_embedding": snake_case_ : Optional[int] = name.replace('visual.class_embedding' , 'vision_model.embeddings.class_embedding' ) if name == "visual.positional_embedding": snake_case_ : Union[str, Any] = name.replace('visual.positional_embedding' , 'vision_model.embeddings.position_embedding.weight' ) if name.startswith('visual.transformer.resblocks' ): snake_case_ : Dict = name.replace('visual.transformer.resblocks' , 'vision_model.encoder.layers' ) if "visual.conv1" in name: snake_case_ : Any = name.replace('visual.conv1' , 'vision_model.embeddings.patch_embedding' ) if "visual.ln_pre" in name: snake_case_ : Optional[Any] = name.replace('visual.ln_pre' , 'vision_model.pre_layernorm' ) if "visual.ln_post" in name: snake_case_ : List[Any] = name.replace('visual.ln_post' , 'vision_model.post_layernorm' ) if "visual.proj" in name: snake_case_ : str = name.replace('visual.proj' , 'visual_projection.weight' ) if "text_projection" in name: snake_case_ : List[str] = name.replace('text_projection' , 'text_projection.weight' ) # things on top if "prompts_visual_proj" in name: snake_case_ : List[Any] = name.replace('prompts_visual_proj' , 'prompts_visual_projection' ) if "prompts_visual_ln" in name: snake_case_ : Optional[int] = name.replace('prompts_visual_ln' , 'prompts_visual_layernorm' ) # mit if name == "mit.positional_embedding": snake_case_ : List[str] = name.replace('positional' , 'position' ) if name.startswith('mit.resblocks' ): snake_case_ : Union[str, Any] = name.replace('mit.resblocks' , 'mit.encoder.layers' ) # prompts generator if name.startswith('prompts_generator.norm' ): snake_case_ : Union[str, Any] = name.replace('prompts_generator.norm' , 'prompts_generator.layernorm' ) return name def SCREAMING_SNAKE_CASE__ ( __a , __a ): for key in orig_state_dict.copy().keys(): snake_case_ : Optional[int] = orig_state_dict.pop(__a ) if "attn.in_proj" in key: snake_case_ : str = key.split('.' ) if key.startswith('visual' ): snake_case_ : Any = key_split[3] snake_case_ : Tuple = config.vision_config.hidden_size if "message_attn" in key: if "weight" in key: snake_case_ : str = val[ :dim, : ] snake_case_ : Dict = val[ dim : dim * 2, : ] snake_case_ : str = val[ -dim:, : ] else: snake_case_ : str = val[ :dim ] snake_case_ : Dict = val[ dim : dim * 2 ] snake_case_ : List[Any] = val[ -dim: ] else: if "weight" in key: snake_case_ : Tuple = val[ :dim, : ] snake_case_ : Dict = val[ dim : dim * 2, : ] snake_case_ : Optional[int] = val[ -dim:, : ] else: snake_case_ : Dict = val[:dim] snake_case_ : Optional[Any] = val[ dim : dim * 2 ] snake_case_ : int = val[-dim:] elif key.startswith('mit' ): snake_case_ : Any = key_split[2] snake_case_ : List[str] = config.vision_config.mit_hidden_size if "weight" in key: snake_case_ : Dict = val[:dim, :] snake_case_ : Optional[Any] = val[dim : dim * 2, :] snake_case_ : Any = val[-dim:, :] else: snake_case_ : Optional[Any] = val[:dim] snake_case_ : Any = val[dim : dim * 2] snake_case_ : List[str] = val[-dim:] else: snake_case_ : Any = key_split[2] snake_case_ : Tuple = config.text_config.hidden_size if "weight" in key: snake_case_ : str = val[:dim, :] snake_case_ : Any = val[ dim : dim * 2, : ] snake_case_ : Optional[int] = val[-dim:, :] else: snake_case_ : List[Any] = val[:dim] snake_case_ : Union[str, Any] = val[ dim : dim * 2 ] snake_case_ : List[Any] = val[-dim:] else: snake_case_ : int = rename_key(__a ) if new_key_name in ["visual_projection.weight", "text_projection.weight"]: snake_case_ : str = val.T snake_case_ : Tuple = val return orig_state_dict def SCREAMING_SNAKE_CASE__ ( __a ): if num_frames == 8: snake_case_ : Optional[int] = 'eating_spaghetti_8_frames.npy' elif num_frames == 16: snake_case_ : Union[str, Any] = 'eating_spaghetti.npy' elif num_frames == 32: snake_case_ : Tuple = 'eating_spaghetti_32_frames.npy' snake_case_ : Tuple = hf_hub_download( repo_id='hf-internal-testing/spaghetti-video' , filename=__a , repo_type='dataset' , ) snake_case_ : Dict = np.load(__a ) return list(__a ) def SCREAMING_SNAKE_CASE__ ( __a , __a=None , __a=False ): snake_case_ : Any = { # fully supervised kinetics-400 checkpoints 'xclip-base-patch32': 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_8.pth', 'xclip-base-patch32-16-frames': ( 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_16.pth' ), 'xclip-base-patch16': 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_8.pth', 'xclip-base-patch16-16-frames': ( 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_16.pth' ), 'xclip-large-patch14': 'https://drive.google.com/u/0/uc?id=1NUOImq0o5DlQTST17iIP3vG7DgmHQuCx&amp;export=download&amp;confirm=t&amp;uuid=b26caedc-88e2-473e-830a-9d158b653cdb', 'xclip-large-patch14-16-frames': 'https://drive.google.com/u/0/uc?id=1FOYgnJc097OJ4lGwtRCCydQyVPJEOH7d&amp;export=download&amp;confirm=t&amp;uuid=538fa810-e671-4050-b385-9a623f89804f', # fully supervised kinetics-600 checkpoints 'xclip-base-patch16-kinetics-600': ( 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_8.pth' ), 'xclip-base-patch16-kinetics-600-16-frames': ( 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_16.pth' ), 'xclip-large-patch14-kinetics-600': 'https://drive.google.com/u/0/uc?id=1FV8C1INuM91sLAN4ImjzePLIlpMSihwV&amp;export=download&amp;confirm=t&amp;uuid=141d4977-4a65-44ae-864f-4b0c19f838be', # few shot 'xclip-base-patch16-hmdb-2-shot': ( 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_2.pth' ), 'xclip-base-patch16-hmdb-4-shot': ( 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_4.pth' ), 'xclip-base-patch16-hmdb-8-shot': ( 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_8.pth' ), 'xclip-base-patch16-hmdb-16-shot': ( 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_16.pth' ), 'xclip-base-patch16-ucf-2-shot': ( 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_2.pth' ), 'xclip-base-patch16-ucf-4-shot': ( 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_4.pth' ), 'xclip-base-patch16-ucf-8-shot': ( 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_8.pth' ), 'xclip-base-patch16-ucf-16-shot': ( 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_16.pth' ), # zero shot 'xclip-base-patch16-zero-shot': 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/zero.pth', } snake_case_ : Optional[Any] = model_to_url[model_name] snake_case_ : str = 8 if "16-frames" in model_name: snake_case_ : Union[str, Any] = 16 elif "shot" in model_name: snake_case_ : Optional[int] = 32 snake_case_ : int = get_xclip_config(__a , __a ) snake_case_ : List[Any] = XCLIPModel(__a ) model.eval() if "drive" in checkpoint_url: snake_case_ : List[str] = 'pytorch_model.bin' gdown.cached_download(__a , __a , quiet=__a ) snake_case_ : Optional[Any] = torch.load(__a , map_location='cpu' )['model'] else: snake_case_ : Union[str, Any] = torch.hub.load_state_dict_from_url(__a )['model'] snake_case_ : Union[str, Any] = convert_state_dict(__a , __a ) snake_case_ : int = XCLIPModel(__a ) snake_case_ ,snake_case_ : List[Any] = model.load_state_dict(__a , strict=__a ) assert missing_keys == ["text_model.embeddings.position_ids", "vision_model.embeddings.position_ids"] model.eval() snake_case_ : List[Any] = 3_36 if model_name == 'xclip-large-patch14-16-frames' else 2_24 snake_case_ : Tuple = VideoMAEImageProcessor(size=__a ) snake_case_ : Any = CLIPTokenizer.from_pretrained('openai/clip-vit-base-patch32' ) snake_case_ : Union[str, Any] = CLIPTokenizerFast.from_pretrained('openai/clip-vit-base-patch32' ) snake_case_ : Tuple = XCLIPProcessor(image_processor=__a , tokenizer=__a ) snake_case_ : Optional[Any] = prepare_video(__a ) snake_case_ : List[Any] = processor( text=['playing sports', 'eating spaghetti', 'go shopping'] , videos=__a , return_tensors='pt' , padding=__a ) print('Shape of pixel values:' , inputs.pixel_values.shape ) with torch.no_grad(): snake_case_ : int = model(**__a ) # Verify outputs snake_case_ : List[Any] = outputs.logits_per_video snake_case_ : Tuple = logits_per_video.softmax(dim=1 ) print('Probs:' , __a ) # kinetics-400 if model_name == "xclip-base-patch32": snake_case_ : Tuple = torch.tensor([[0.0019, 0.9951, 0.0030]] ) elif model_name == "xclip-base-patch32-16-frames": snake_case_ : int = torch.tensor([[7.0_999E-04, 9.9_883E-01, 4.5_580E-04]] ) elif model_name == "xclip-base-patch16": snake_case_ : Optional[Any] = torch.tensor([[0.0083, 0.9681, 0.0236]] ) elif model_name == "xclip-base-patch16-16-frames": snake_case_ : str = torch.tensor([[7.6_937E-04, 9.9_728E-01, 1.9_473E-03]] ) elif model_name == "xclip-large-patch14": snake_case_ : Optional[int] = torch.tensor([[0.0062, 0.9864, 0.0075]] ) elif model_name == "xclip-large-patch14-16-frames": snake_case_ : Optional[int] = torch.tensor([[3.3_877E-04, 9.9_937E-01, 2.8_888E-04]] ) # kinetics-600 elif model_name == "xclip-base-patch16-kinetics-600": snake_case_ : Any = torch.tensor([[0.0555, 0.8914, 0.0531]] ) elif model_name == "xclip-base-patch16-kinetics-600-16-frames": snake_case_ : Dict = torch.tensor([[3.8_554E-04, 9.9_929E-01, 3.2_754E-04]] ) elif model_name == "xclip-large-patch14-kinetics-600": snake_case_ : List[str] = torch.tensor([[0.0036, 0.9920, 0.0045]] ) # few shot elif model_name == "xclip-base-patch16-hmdb-2-shot": snake_case_ : Any = torch.tensor([[7.1_890E-06, 9.9_994E-01, 5.6_559E-05]] ) elif model_name == "xclip-base-patch16-hmdb-4-shot": snake_case_ : str = torch.tensor([[1.0_320E-05, 9.9_993E-01, 6.2_435E-05]] ) elif model_name == "xclip-base-patch16-hmdb-8-shot": snake_case_ : Optional[int] = torch.tensor([[4.1_377E-06, 9.9_990E-01, 9.8_386E-05]] ) elif model_name == "xclip-base-patch16-hmdb-16-shot": snake_case_ : Union[str, Any] = torch.tensor([[4.1_347E-05, 9.9_962E-01, 3.3_411E-04]] ) elif model_name == "xclip-base-patch16-ucf-2-shot": snake_case_ : Optional[Any] = torch.tensor([[8.5_857E-05, 9.9_928E-01, 6.3_291E-04]] ) elif model_name == "xclip-base-patch16-ucf-4-shot": snake_case_ : Any = torch.tensor([[8.5_857E-05, 9.9_928E-01, 6.3_291E-04]] ) elif model_name == "xclip-base-patch16-ucf-8-shot": snake_case_ : Union[str, Any] = torch.tensor([[0.0027, 0.9904, 0.0070]] ) elif model_name == "xclip-base-patch16-ucf-16-shot": snake_case_ : int = torch.tensor([[9.8_219E-04, 9.9_593E-01, 3.0_863E-03]] ) # zero shot elif model_name == "xclip-base-patch16-zero-shot": snake_case_ : Optional[int] = torch.tensor([[3.5_082E-04, 9.9_785E-01, 1.7_966E-03]] ) else: raise ValueError(f"""Model name {model_name} not supported""" ) assert torch.allclose(__a , __a , atol=1E-3 ) print('Looks ok!' ) if pytorch_dump_folder_path is not None: print(f"""Saving model {model_name} to {pytorch_dump_folder_path}""" ) model.save_pretrained(__a ) if push_to_hub: print('Pushing model, processor and slow tokenizer files to the hub...' ) model.push_to_hub(__a , organization='nielsr' ) processor.push_to_hub(__a , organization='nielsr' ) slow_tokenizer.push_to_hub(__a , organization='nielsr' ) if __name__ == "__main__": _SCREAMING_SNAKE_CASE = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default="""xclip-base-patch32""", type=str, help="""Name of the model.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub.""" ) _SCREAMING_SNAKE_CASE = parser.parse_args() convert_xclip_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
327
def SCREAMING_SNAKE_CASE__ ( __a , __a ): while b: snake_case_ ,snake_case_ : Any = b, a % b return a def SCREAMING_SNAKE_CASE__ ( __a , __a ): return a if b == 0 else euclidean_gcd_recursive(__a , a % b ) def SCREAMING_SNAKE_CASE__ ( ): print(f"""euclidean_gcd(3, 5) = {euclidean_gcd(3 , 5 )}""" ) print(f"""euclidean_gcd(5, 3) = {euclidean_gcd(5 , 3 )}""" ) print(f"""euclidean_gcd(1, 3) = {euclidean_gcd(1 , 3 )}""" ) print(f"""euclidean_gcd(3, 6) = {euclidean_gcd(3 , 6 )}""" ) print(f"""euclidean_gcd(6, 3) = {euclidean_gcd(6 , 3 )}""" ) print(f"""euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 , 5 )}""" ) print(f"""euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 , 3 )}""" ) print(f"""euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 , 3 )}""" ) print(f"""euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 , 6 )}""" ) print(f"""euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 , 3 )}""" ) if __name__ == "__main__": main()
327
1
import argparse from transformers import ( TapasConfig, TapasForMaskedLM, TapasForQuestionAnswering, TapasForSequenceClassification, TapasModel, TapasTokenizer, load_tf_weights_in_tapas, ) from transformers.utils import logging logging.set_verbosity_info() def SCREAMING_SNAKE_CASE__ ( __a , __a , __a , __a , __a ): # Initialise PyTorch model. # If you want to convert a checkpoint that uses absolute position embeddings, make sure to set reset_position_index_per_cell of # TapasConfig to False. # initialize configuration from json file snake_case_ : Any = TapasConfig.from_json_file(__a ) # set absolute/relative position embeddings parameter snake_case_ : List[str] = reset_position_index_per_cell # set remaining parameters of TapasConfig as well as the model based on the task if task == "SQA": snake_case_ : int = TapasForQuestionAnswering(config=__a ) elif task == "WTQ": # run_task_main.py hparams snake_case_ : Optional[int] = 4 snake_case_ : Any = True # hparam_utils.py hparams snake_case_ : Optional[int] = 0.664694 snake_case_ : Dict = 0.207951 snake_case_ : Optional[int] = 0.121194 snake_case_ : int = True snake_case_ : Union[str, Any] = True snake_case_ : Union[str, Any] = False snake_case_ : Optional[Any] = 0.0352513 snake_case_ : Union[str, Any] = TapasForQuestionAnswering(config=__a ) elif task == "WIKISQL_SUPERVISED": # run_task_main.py hparams snake_case_ : int = 4 snake_case_ : List[str] = False # hparam_utils.py hparams snake_case_ : Tuple = 36.4519 snake_case_ : Optional[int] = 0.903421 snake_case_ : Tuple = 222.088 snake_case_ : List[Any] = True snake_case_ : Optional[Any] = True snake_case_ : str = True snake_case_ : Any = 0.763141 snake_case_ : Dict = TapasForQuestionAnswering(config=__a ) elif task == "TABFACT": snake_case_ : List[str] = TapasForSequenceClassification(config=__a ) elif task == "MLM": snake_case_ : Any = TapasForMaskedLM(config=__a ) elif task == "INTERMEDIATE_PRETRAINING": snake_case_ : Tuple = TapasModel(config=__a ) else: raise ValueError(f"""Task {task} not supported.""" ) print(f"""Building PyTorch model from configuration: {config}""" ) # Load weights from tf checkpoint load_tf_weights_in_tapas(__a , __a , __a ) # Save pytorch-model (weights and configuration) print(f"""Save PyTorch model to {pytorch_dump_path}""" ) model.save_pretrained(__a ) # Save tokenizer files print(f"""Save tokenizer files to {pytorch_dump_path}""" ) snake_case_ : Union[str, Any] = TapasTokenizer(vocab_file=tf_checkpoint_path[:-10] + 'vocab.txt' , model_max_length=5_12 ) tokenizer.save_pretrained(__a ) print('Used relative position embeddings:' , model.config.reset_position_index_per_cell ) if __name__ == "__main__": _SCREAMING_SNAKE_CASE = argparse.ArgumentParser() # Required parameters parser.add_argument( """--task""", default="""SQA""", type=str, help="""Model task for which to convert a checkpoint. Defaults to SQA.""" ) parser.add_argument( """--reset_position_index_per_cell""", default=False, action="""store_true""", help="""Whether to use relative position embeddings or not. Defaults to True.""", ) parser.add_argument( """--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path.""" ) parser.add_argument( """--tapas_config_file""", default=None, type=str, required=True, help=( """The config json file corresponding to the pre-trained TAPAS model. \n""" """This specifies the model architecture.""" ), ) parser.add_argument( """--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) _SCREAMING_SNAKE_CASE = parser.parse_args() convert_tf_checkpoint_to_pytorch( args.task, args.reset_position_index_per_cell, args.tf_checkpoint_path, args.tapas_config_file, args.pytorch_dump_path, )
327
import os import torch from ..logging import get_logger from .constants import FSDP_PYTORCH_VERSION, MODEL_NAME, OPTIMIZER_NAME from .versions import is_torch_version if is_torch_version(""">=""", FSDP_PYTORCH_VERSION): import torch.distributed.checkpoint as dist_cp from torch.distributed.checkpoint.default_planner import DefaultLoadPlanner, DefaultSavePlanner from torch.distributed.checkpoint.optimizer import load_sharded_optimizer_state_dict from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType _SCREAMING_SNAKE_CASE = get_logger(__name__) def SCREAMING_SNAKE_CASE__ ( __a , __a , __a , __a , __a=0 ): os.makedirs(__a , exist_ok=__a ) with FSDP.state_dict_type( __a , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ): snake_case_ : Dict = model.state_dict() if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: snake_case_ : Optional[int] = f"""{MODEL_NAME}.bin""" if model_index == 0 else f"""{MODEL_NAME}_{model_index}.bin""" snake_case_ : Dict = os.path.join(__a , __a ) if accelerator.process_index == 0: logger.info(f"""Saving model to {output_model_file}""" ) torch.save(__a , __a ) logger.info(f"""Model saved to {output_model_file}""" ) elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT: snake_case_ : Dict = ( f"""{MODEL_NAME}_rank{accelerator.process_index}.bin""" if model_index == 0 else f"""{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin""" ) snake_case_ : Dict = os.path.join(__a , __a ) logger.info(f"""Saving model to {output_model_file}""" ) torch.save(__a , __a ) logger.info(f"""Model saved to {output_model_file}""" ) elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT: snake_case_ : Optional[int] = os.path.join(__a , f"""{MODEL_NAME}_{model_index}""" ) os.makedirs(__a , exist_ok=__a ) logger.info(f"""Saving model to {ckpt_dir}""" ) snake_case_ : int = {'model': state_dict} dist_cp.save_state_dict( state_dict=__a , storage_writer=dist_cp.FileSystemWriter(__a ) , planner=DefaultSavePlanner() , ) logger.info(f"""Model saved to {ckpt_dir}""" ) def SCREAMING_SNAKE_CASE__ ( __a , __a , __a , __a , __a=0 ): accelerator.wait_for_everyone() with FSDP.state_dict_type( __a , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ): if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: if type(__a ) != FSDP and accelerator.process_index != 0: if not fsdp_plugin.sync_module_states: raise ValueError( 'Set the `sync_module_states` flag to `True` so that model states are synced across processes when ' 'initializing FSDP object' ) return snake_case_ : Optional[int] = f"""{MODEL_NAME}.bin""" if model_index == 0 else f"""{MODEL_NAME}_{model_index}.bin""" snake_case_ : Optional[Any] = os.path.join(__a , __a ) logger.info(f"""Loading model from {input_model_file}""" ) snake_case_ : Optional[Any] = torch.load(__a ) logger.info(f"""Model loaded from {input_model_file}""" ) elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT: snake_case_ : Optional[Any] = ( f"""{MODEL_NAME}_rank{accelerator.process_index}.bin""" if model_index == 0 else f"""{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin""" ) snake_case_ : Tuple = os.path.join(__a , __a ) logger.info(f"""Loading model from {input_model_file}""" ) snake_case_ : Optional[int] = torch.load(__a ) logger.info(f"""Model loaded from {input_model_file}""" ) elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT: snake_case_ : Tuple = ( os.path.join(__a , f"""{MODEL_NAME}_{model_index}""" ) if f"""{MODEL_NAME}""" not in input_dir else input_dir ) logger.info(f"""Loading model from {ckpt_dir}""" ) snake_case_ : List[Any] = {'model': model.state_dict()} dist_cp.load_state_dict( state_dict=__a , storage_reader=dist_cp.FileSystemReader(__a ) , planner=DefaultLoadPlanner() , ) snake_case_ : Any = state_dict['model'] logger.info(f"""Model loaded from {ckpt_dir}""" ) model.load_state_dict(__a ) def SCREAMING_SNAKE_CASE__ ( __a , __a , __a , __a , __a , __a=0 ): os.makedirs(__a , exist_ok=__a ) with FSDP.state_dict_type( __a , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ): snake_case_ : List[str] = FSDP.optim_state_dict(__a , __a ) if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: if accelerator.process_index == 0: snake_case_ : str = ( f"""{OPTIMIZER_NAME}.bin""" if optimizer_index == 0 else f"""{OPTIMIZER_NAME}_{optimizer_index}.bin""" ) snake_case_ : Any = os.path.join(__a , __a ) logger.info(f"""Saving Optimizer state to {output_optimizer_file}""" ) torch.save(__a , __a ) logger.info(f"""Optimizer state saved in {output_optimizer_file}""" ) else: snake_case_ : Optional[int] = os.path.join(__a , f"""{OPTIMIZER_NAME}_{optimizer_index}""" ) os.makedirs(__a , exist_ok=__a ) logger.info(f"""Saving Optimizer state to {ckpt_dir}""" ) dist_cp.save_state_dict( state_dict={'optimizer': optim_state} , storage_writer=dist_cp.FileSystemWriter(__a ) , planner=DefaultSavePlanner() , ) logger.info(f"""Optimizer state saved in {ckpt_dir}""" ) def SCREAMING_SNAKE_CASE__ ( __a , __a , __a , __a , __a , __a=0 ): accelerator.wait_for_everyone() with FSDP.state_dict_type( __a , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ): if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: snake_case_ : Optional[Any] = None # below check should work but currently it isn't working (mostly opytorch issue), # in the meantime disabling it at the cost of excess memory usage # if accelerator.process_index == 0 or not fsdp_plugin.optim_state_dict_config.rank0_only: snake_case_ : Union[str, Any] = ( f"""{OPTIMIZER_NAME}.bin""" if optimizer_index == 0 else f"""{OPTIMIZER_NAME}_{optimizer_index}.bin""" ) snake_case_ : List[Any] = os.path.join(__a , __a ) logger.info(f"""Loading Optimizer state from {input_optimizer_file}""" ) snake_case_ : Optional[int] = torch.load(__a ) logger.info(f"""Optimizer state loaded from {input_optimizer_file}""" ) else: snake_case_ : str = ( os.path.join(__a , f"""{OPTIMIZER_NAME}_{optimizer_index}""" ) if f"""{OPTIMIZER_NAME}""" not in input_dir else input_dir ) logger.info(f"""Loading Optimizer from {ckpt_dir}""" ) snake_case_ : Any = load_sharded_optimizer_state_dict( model_state_dict=model.state_dict() , optimizer_key='optimizer' , storage_reader=dist_cp.FileSystemReader(__a ) , ) snake_case_ : Optional[int] = optim_state['optimizer'] logger.info(f"""Optimizer loaded from {ckpt_dir}""" ) snake_case_ : Optional[Any] = FSDP.optim_state_dict_to_load(__a , __a , __a ) optimizer.load_state_dict(__a )
327
1
def SCREAMING_SNAKE_CASE__ ( __a ): if not head: return True # split the list to two parts snake_case_ ,snake_case_ : Optional[Any] = head.next, head while fast and fast.next: snake_case_ : Optional[int] = fast.next.next snake_case_ : int = slow.next snake_case_ : int = slow.next snake_case_ : Tuple = None # Don't forget here! But forget still works! # reverse the second part snake_case_ : Dict = None while second: snake_case_ : Any = second.next snake_case_ : Tuple = node snake_case_ : str = second snake_case_ : Dict = nxt # compare two parts # second part has the same or one less node while node: if node.val != head.val: return False snake_case_ : Any = node.next snake_case_ : Dict = head.next return True def SCREAMING_SNAKE_CASE__ ( __a ): if not head or not head.next: return True # 1. Get the midpoint (slow) snake_case_ : Optional[Any] = head while fast and fast.next: snake_case_ ,snake_case_ : Tuple = fast.next.next, slow.next # 2. Push the second half into the stack snake_case_ : Union[str, Any] = [slow.val] while slow.next: snake_case_ : str = slow.next stack.append(slow.val ) # 3. Comparison while stack: if stack.pop() != cur.val: return False snake_case_ : int = cur.next return True def SCREAMING_SNAKE_CASE__ ( __a ): if not head or not head.next: return True snake_case_ : int = {} snake_case_ : Tuple = 0 while head: if head.val in d: d[head.val].append(__a ) else: snake_case_ : Tuple = [pos] snake_case_ : Union[str, Any] = head.next pos += 1 snake_case_ : Dict = pos - 1 snake_case_ : Dict = 0 for v in d.values(): if len(__a ) % 2 != 0: middle += 1 else: snake_case_ : Dict = 0 for i in range(0 , len(__a ) ): if v[i] + v[len(__a ) - 1 - step] != checksum: return False step += 1 if middle > 1: return False return True
327
import unittest from typing import Dict, List, Optional, Union import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import BridgeTowerImageProcessor class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ): def __init__( self : List[str] , _A : List[Any] , _A : bool = True , _A : Dict[str, int] = None , _A : int = 32 , _A : bool = True , _A : Union[int, float] = 1 / 255 , _A : bool = True , _A : bool = True , _A : Optional[Union[float, List[float]]] = [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3] , _A : Optional[Union[float, List[float]]] = [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1] , _A : bool = True , _A : Tuple=7 , _A : Tuple=30 , _A : int=400 , _A : Tuple=3 , ) -> Optional[int]: """simple docstring""" snake_case_ : str = parent snake_case_ : str = do_resize snake_case_ : str = size if size is not None else {'shortest_edge': 288} snake_case_ : Any = size_divisor snake_case_ : Any = do_rescale snake_case_ : Union[str, Any] = rescale_factor snake_case_ : str = do_normalize snake_case_ : int = do_center_crop snake_case_ : str = image_mean snake_case_ : int = image_std snake_case_ : Any = do_pad snake_case_ : Optional[int] = batch_size snake_case_ : List[str] = num_channels snake_case_ : Any = min_resolution snake_case_ : str = max_resolution def UpperCAmelCase_ ( self : Tuple ) -> Optional[int]: """simple docstring""" return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "size": self.size, "size_divisor": self.size_divisor, } def UpperCAmelCase_ ( self : Dict , _A : str , _A : Union[str, Any]=False ) -> int: """simple docstring""" if not batched: snake_case_ : Optional[int] = self.size['shortest_edge'] snake_case_ : List[Any] = image_inputs[0] if isinstance(_A , Image.Image ): snake_case_ ,snake_case_ : Optional[Any] = image.size else: snake_case_ ,snake_case_ : str = image.shape[1], image.shape[2] snake_case_ : Dict = size / min(_A , _A ) if h < w: snake_case_ ,snake_case_ : str = size, scale * w else: snake_case_ ,snake_case_ : Tuple = scale * h, size snake_case_ : Dict = int((1333 / 800) * size ) if max(_A , _A ) > max_size: snake_case_ : Union[str, Any] = max_size / max(_A , _A ) snake_case_ : Any = newh * scale snake_case_ : Union[str, Any] = neww * scale snake_case_ ,snake_case_ : Any = int(newh + 0.5 ), int(neww + 0.5 ) snake_case_ ,snake_case_ : int = ( newh // self.size_divisor * self.size_divisor, neww // self.size_divisor * self.size_divisor, ) else: snake_case_ : Optional[int] = [] for image in image_inputs: snake_case_ ,snake_case_ : Optional[int] = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) snake_case_ : str = max(_A , key=lambda _A : item[0] )[0] snake_case_ : List[str] = max(_A , key=lambda _A : item[1] )[1] return expected_height, expected_width @require_torch @require_vision class SCREAMING_SNAKE_CASE_ ( snake_case_ , unittest.TestCase ): __magic_name__: List[Any] = BridgeTowerImageProcessor if is_vision_available() else None def UpperCAmelCase_ ( self : List[Any] ) -> Tuple: """simple docstring""" snake_case_ : int = BridgeTowerImageProcessingTester(self ) @property def UpperCAmelCase_ ( self : int ) -> Any: """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def UpperCAmelCase_ ( self : Union[str, Any] ) -> Dict: """simple docstring""" snake_case_ : Optional[Any] = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_A , 'image_mean' ) ) self.assertTrue(hasattr(_A , 'image_std' ) ) self.assertTrue(hasattr(_A , 'do_normalize' ) ) self.assertTrue(hasattr(_A , 'do_resize' ) ) self.assertTrue(hasattr(_A , 'size' ) ) self.assertTrue(hasattr(_A , 'size_divisor' ) ) def UpperCAmelCase_ ( self : Optional[int] ) -> List[str]: """simple docstring""" pass def UpperCAmelCase_ ( self : Tuple ) -> Tuple: """simple docstring""" snake_case_ : Optional[Any] = self.image_processing_class(**self.image_processor_dict ) # create random PIL images snake_case_ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A ) for image in image_inputs: self.assertIsInstance(_A , Image.Image ) # Test not batched input snake_case_ : Dict = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values snake_case_ ,snake_case_ : Optional[Any] = self.image_processor_tester.get_expected_values(_A ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched snake_case_ : List[str] = image_processing(_A , return_tensors='pt' ).pixel_values snake_case_ ,snake_case_ : Optional[Any] = self.image_processor_tester.get_expected_values(_A , batched=_A ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def UpperCAmelCase_ ( self : Union[str, Any] ) -> Any: """simple docstring""" snake_case_ : int = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors snake_case_ : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , numpify=_A ) for image in image_inputs: self.assertIsInstance(_A , np.ndarray ) # Test not batched input snake_case_ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values snake_case_ ,snake_case_ : Tuple = self.image_processor_tester.get_expected_values(_A ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched snake_case_ : Any = image_processing(_A , return_tensors='pt' ).pixel_values snake_case_ ,snake_case_ : Any = self.image_processor_tester.get_expected_values(_A , batched=_A ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def UpperCAmelCase_ ( self : Dict ) -> List[str]: """simple docstring""" snake_case_ : Optional[Any] = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors snake_case_ : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , torchify=_A ) for image in image_inputs: self.assertIsInstance(_A , torch.Tensor ) # Test not batched input snake_case_ : Optional[int] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values snake_case_ ,snake_case_ : Optional[Any] = self.image_processor_tester.get_expected_values(_A ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched snake_case_ : str = image_processing(_A , return_tensors='pt' ).pixel_values snake_case_ ,snake_case_ : Tuple = self.image_processor_tester.get_expected_values(_A , batched=_A ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , )
327
1
from ...configuration_utils import PretrainedConfig from ...utils import logging _SCREAMING_SNAKE_CASE = logging.get_logger(__name__) _SCREAMING_SNAKE_CASE = { """google/vivit-b-16x2-kinetics400""": ( """https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json""" ), # See all Vivit models at https://huggingface.co/models?filter=vivit } class SCREAMING_SNAKE_CASE_ ( snake_case_ ): __magic_name__: int = "vivit" def __init__( self : int , _A : List[str]=224 , _A : int=32 , _A : int=[2, 16, 16] , _A : List[Any]=3 , _A : List[Any]=768 , _A : int=12 , _A : Optional[int]=12 , _A : List[Any]=3072 , _A : Dict="gelu_fast" , _A : Optional[int]=0.0 , _A : List[str]=0.0 , _A : Dict=0.0_2 , _A : Optional[Any]=1E-06 , _A : Optional[int]=True , **_A : Dict , ) -> List[Any]: """simple docstring""" snake_case_ : Optional[int] = hidden_size snake_case_ : Dict = num_hidden_layers snake_case_ : Tuple = num_attention_heads snake_case_ : List[Any] = intermediate_size snake_case_ : List[Any] = hidden_act snake_case_ : int = hidden_dropout_prob snake_case_ : Union[str, Any] = attention_probs_dropout_prob snake_case_ : Union[str, Any] = initializer_range snake_case_ : Optional[int] = layer_norm_eps snake_case_ : Optional[int] = image_size snake_case_ : List[str] = num_frames snake_case_ : Optional[Any] = tubelet_size snake_case_ : Optional[Any] = num_channels snake_case_ : List[Any] = qkv_bias super().__init__(**_A )
327
import json import os import tempfile import transformers import datasets from utils import generate_example_dataset, get_duration _SCREAMING_SNAKE_CASE = 50_00_00 _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = os.path.split(__file__) _SCREAMING_SNAKE_CASE = os.path.join(RESULTS_BASEPATH, """results""", RESULTS_FILENAME.replace(""".py""", """.json""")) @get_duration def SCREAMING_SNAKE_CASE__ ( __a , **__a ): snake_case_ : int = dataset.map(**__a ) @get_duration def SCREAMING_SNAKE_CASE__ ( __a , **__a ): snake_case_ : Dict = dataset.filter(**__a ) def SCREAMING_SNAKE_CASE__ ( ): snake_case_ : Tuple = {'num examples': SPEED_TEST_N_EXAMPLES} with tempfile.TemporaryDirectory() as tmp_dir: snake_case_ : Dict = datasets.Features({'text': datasets.Value('string' ), 'numbers': datasets.Value('float32' )} ) snake_case_ : List[Any] = generate_example_dataset( os.path.join(__a , 'dataset.arrow' ) , __a , num_examples=__a ) snake_case_ : str = transformers.AutoTokenizer.from_pretrained('bert-base-cased' , use_fast=__a ) def tokenize(__a ): return tokenizer(examples['text'] ) snake_case_ : Any = map(__a ) snake_case_ : Tuple = map(__a , batched=__a ) snake_case_ : str = map(__a , function=lambda __a : None , batched=__a ) with dataset.formatted_as(type='numpy' ): snake_case_ : Optional[int] = map(__a , function=lambda __a : None , batched=__a ) with dataset.formatted_as(type='pandas' ): snake_case_ : str = map(__a , function=lambda __a : None , batched=__a ) with dataset.formatted_as(type='torch' , columns='numbers' ): snake_case_ : int = map(__a , function=lambda __a : None , batched=__a ) with dataset.formatted_as(type='tensorflow' , columns='numbers' ): snake_case_ : List[Any] = map(__a , function=lambda __a : None , batched=__a ) snake_case_ : int = map(__a , function=__a , batched=__a ) snake_case_ : Optional[Any] = filter(__a ) # Activate later when tokenizer support batched inputs # with dataset.formatted_as(type='numpy'): # times[func.__name__ + " fast-tokenizer batched numpy"] = func(dataset, function=tokenize, batched=True) with open(__a , 'wb' ) as f: f.write(json.dumps(__a ).encode('utf-8' ) ) if __name__ == "__main__": # useful to run the profiler benchmark_map_filter()
327
1
from itertools import permutations def SCREAMING_SNAKE_CASE__ ( __a ): if num[3] % 2 != 0: return False if (num[2] + num[3] + num[4]) % 3 != 0: return False if num[5] % 5 != 0: return False snake_case_ : Any = [7, 11, 13, 17] for i, test in enumerate(__a ): if (num[i + 4] * 1_00 + num[i + 5] * 10 + num[i + 6]) % test != 0: return False return True def SCREAMING_SNAKE_CASE__ ( __a = 10 ): return sum( int(''.join(map(__a , __a ) ) ) for num in permutations(range(__a ) ) if is_substring_divisible(__a ) ) if __name__ == "__main__": print(F'''{solution() = }''')
327
from collections import namedtuple import requests from lxml import html # type: ignore _SCREAMING_SNAKE_CASE = namedtuple("""covid_data""", """cases deaths recovered""") def SCREAMING_SNAKE_CASE__ ( __a = "https://www.worldometers.info/coronavirus/" ): snake_case_ : Union[str, Any] = '//div[@class = "maincounter-number"]/span/text()' return covid_data(*html.fromstring(requests.get(__a ).content ).xpath(__a ) ) _SCREAMING_SNAKE_CASE = """Total COVID-19 cases in the world: {} Total deaths due to COVID-19 in the world: {} Total COVID-19 patients recovered in the world: {}""" print(fmt.format(*covid_stats()))
327
1
import argparse import torch from transformers import YosoConfig, YosoForMaskedLM def SCREAMING_SNAKE_CASE__ ( __a ): if "model" in orig_key: snake_case_ : int = orig_key.replace('model.' , '' ) if "norm1" in orig_key: snake_case_ : Any = orig_key.replace('norm1' , 'attention.output.LayerNorm' ) if "norm2" in orig_key: snake_case_ : str = orig_key.replace('norm2' , 'output.LayerNorm' ) if "norm" in orig_key: snake_case_ : Tuple = orig_key.replace('norm' , 'LayerNorm' ) if "transformer" in orig_key: snake_case_ : List[Any] = orig_key.split('.' )[0].split('_' )[-1] snake_case_ : List[Any] = orig_key.replace(f"""transformer_{layer_num}""" , f"""encoder.layer.{layer_num}""" ) if "mha.attn" in orig_key: snake_case_ : Any = orig_key.replace('mha.attn' , 'attention.self' ) if "mha" in orig_key: snake_case_ : Optional[int] = orig_key.replace('mha' , 'attention' ) if "W_q" in orig_key: snake_case_ : str = orig_key.replace('W_q' , 'self.query' ) if "W_k" in orig_key: snake_case_ : Optional[Any] = orig_key.replace('W_k' , 'self.key' ) if "W_v" in orig_key: snake_case_ : List[str] = orig_key.replace('W_v' , 'self.value' ) if "ff1" in orig_key: snake_case_ : Any = orig_key.replace('ff1' , 'intermediate.dense' ) if "ff2" in orig_key: snake_case_ : List[Any] = orig_key.replace('ff2' , 'output.dense' ) if "ff" in orig_key: snake_case_ : Union[str, Any] = orig_key.replace('ff' , 'output.dense' ) if "mlm_class" in orig_key: snake_case_ : Optional[Any] = orig_key.replace('mlm.mlm_class' , 'cls.predictions.decoder' ) if "mlm" in orig_key: snake_case_ : Dict = orig_key.replace('mlm' , 'cls.predictions.transform' ) if "cls" not in orig_key: snake_case_ : Dict = 'yoso.' + orig_key return orig_key def SCREAMING_SNAKE_CASE__ ( __a , __a ): for key in orig_state_dict.copy().keys(): snake_case_ : List[str] = orig_state_dict.pop(__a ) if ("pooler" in key) or ("sen_class" in key): continue else: snake_case_ : Optional[Any] = val snake_case_ : Optional[int] = orig_state_dict['cls.predictions.decoder.bias'] snake_case_ : str = torch.arange(__a ).expand((1, -1) ) + 2 return orig_state_dict def SCREAMING_SNAKE_CASE__ ( __a , __a , __a ): snake_case_ : Dict = torch.load(__a , map_location='cpu' )['model_state_dict'] snake_case_ : int = YosoConfig.from_json_file(__a ) snake_case_ : Tuple = YosoForMaskedLM(__a ) snake_case_ : Optional[int] = convert_checkpoint_helper(config.max_position_embeddings , __a ) print(model.load_state_dict(__a ) ) model.eval() model.save_pretrained(__a ) print(f"""Checkpoint successfuly converted. Model saved at {pytorch_dump_path}""" ) if __name__ == "__main__": _SCREAMING_SNAKE_CASE = argparse.ArgumentParser() # Required parameters parser.add_argument( """--pytorch_model_path""", default=None, type=str, required=True, help="""Path to YOSO pytorch checkpoint.""" ) parser.add_argument( """--config_file""", default=None, type=str, required=True, help="""The json file for YOSO model config.""", ) parser.add_argument( """--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) _SCREAMING_SNAKE_CASE = parser.parse_args() convert_yoso_checkpoint(args.pytorch_model_path, args.config_file, args.pytorch_dump_path)
327
import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from .tokenization_lxmert import LxmertTokenizer _SCREAMING_SNAKE_CASE = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""} _SCREAMING_SNAKE_CASE = { """vocab_file""": { """unc-nlp/lxmert-base-uncased""": """https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/vocab.txt""", }, """tokenizer_file""": { """unc-nlp/lxmert-base-uncased""": ( """https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/tokenizer.json""" ), }, } _SCREAMING_SNAKE_CASE = { """unc-nlp/lxmert-base-uncased""": 5_12, } _SCREAMING_SNAKE_CASE = { """unc-nlp/lxmert-base-uncased""": {"""do_lower_case""": True}, } class SCREAMING_SNAKE_CASE_ ( snake_case_ ): __magic_name__: List[Any] = VOCAB_FILES_NAMES __magic_name__: List[str] = PRETRAINED_VOCAB_FILES_MAP __magic_name__: List[str] = PRETRAINED_INIT_CONFIGURATION __magic_name__: Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __magic_name__: Union[str, Any] = LxmertTokenizer def __init__( self : List[str] , _A : Union[str, Any]=None , _A : Optional[Any]=None , _A : Dict=True , _A : Dict="[UNK]" , _A : Optional[int]="[SEP]" , _A : Dict="[PAD]" , _A : Union[str, Any]="[CLS]" , _A : str="[MASK]" , _A : Tuple=True , _A : Dict=None , **_A : List[Any] , ) -> Optional[int]: """simple docstring""" super().__init__( _A , tokenizer_file=_A , do_lower_case=_A , unk_token=_A , sep_token=_A , pad_token=_A , cls_token=_A , mask_token=_A , tokenize_chinese_chars=_A , strip_accents=_A , **_A , ) snake_case_ : Optional[int] = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get('lowercase' , _A ) != do_lower_case or normalizer_state.get('strip_accents' , _A ) != strip_accents or normalizer_state.get('handle_chinese_chars' , _A ) != tokenize_chinese_chars ): snake_case_ : Tuple = getattr(_A , normalizer_state.pop('type' ) ) snake_case_ : Union[str, Any] = do_lower_case snake_case_ : int = strip_accents snake_case_ : Optional[Any] = tokenize_chinese_chars snake_case_ : List[Any] = normalizer_class(**_A ) snake_case_ : Tuple = do_lower_case def UpperCAmelCase_ ( self : Dict , _A : Any , _A : List[Any]=None ) -> Dict: """simple docstring""" snake_case_ : Optional[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def UpperCAmelCase_ ( self : Optional[Any] , _A : List[int] , _A : Optional[List[int]] = None ) -> List[int]: """simple docstring""" snake_case_ : str = [self.sep_token_id] snake_case_ : Dict = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def UpperCAmelCase_ ( self : Optional[int] , _A : str , _A : Optional[str] = None ) -> Tuple[str]: """simple docstring""" snake_case_ : Union[str, Any] = self._tokenizer.model.save(_A , name=_A ) return tuple(_A )
327
1
import logging import os from typing import List, TextIO, Union from conllu import parse_incr from utils_ner import InputExample, Split, TokenClassificationTask _SCREAMING_SNAKE_CASE = logging.getLogger(__name__) class SCREAMING_SNAKE_CASE_ ( snake_case_ ): def __init__( self : List[Any] , _A : Optional[int]=-1 ) -> Dict: """simple docstring""" snake_case_ : str = label_idx def UpperCAmelCase_ ( self : Optional[int] , _A : Dict , _A : Union[Split, str] ) -> List[InputExample]: """simple docstring""" if isinstance(_A , _A ): snake_case_ : int = mode.value snake_case_ : Union[str, Any] = os.path.join(_A , F"""{mode}.txt""" ) snake_case_ : Dict = 1 snake_case_ : Optional[int] = [] with open(_A , encoding='utf-8' ) as f: snake_case_ : str = [] snake_case_ : List[str] = [] for line in f: if line.startswith('-DOCSTART-' ) or line == "" or line == "\n": if words: examples.append(InputExample(guid=F"""{mode}-{guid_index}""" , words=_A , labels=_A ) ) guid_index += 1 snake_case_ : List[str] = [] snake_case_ : List[Any] = [] else: snake_case_ : List[str] = line.split(' ' ) words.append(splits[0] ) if len(_A ) > 1: labels.append(splits[self.label_idx].replace('\n' , '' ) ) else: # Examples could have no label for mode = "test" labels.append('O' ) if words: examples.append(InputExample(guid=F"""{mode}-{guid_index}""" , words=_A , labels=_A ) ) return examples def UpperCAmelCase_ ( self : Any , _A : TextIO , _A : TextIO , _A : List ) -> Dict: """simple docstring""" snake_case_ : int = 0 for line in test_input_reader: if line.startswith('-DOCSTART-' ) or line == "" or line == "\n": writer.write(_A ) if not preds_list[example_id]: example_id += 1 elif preds_list[example_id]: snake_case_ : Dict = line.split()[0] + ' ' + preds_list[example_id].pop(0 ) + '\n' writer.write(_A ) else: logger.warning('Maximum sequence length exceeded: No prediction for \'%s\'.' , line.split()[0] ) def UpperCAmelCase_ ( self : List[str] , _A : str ) -> List[str]: """simple docstring""" if path: with open(_A , 'r' ) as f: snake_case_ : Optional[int] = f.read().splitlines() if "O" not in labels: snake_case_ : Dict = ['O'] + labels return labels else: return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"] class SCREAMING_SNAKE_CASE_ ( snake_case_ ): def __init__( self : Tuple ) -> Tuple: """simple docstring""" super().__init__(label_idx=-2 ) def UpperCAmelCase_ ( self : List[Any] , _A : str ) -> List[str]: """simple docstring""" if path: with open(_A , 'r' ) as f: snake_case_ : Optional[Any] = f.read().splitlines() if "O" not in labels: snake_case_ : Optional[Any] = ['O'] + labels return labels else: return [ "O", "B-ADVP", "B-INTJ", "B-LST", "B-PRT", "B-NP", "B-SBAR", "B-VP", "B-ADJP", "B-CONJP", "B-PP", "I-ADVP", "I-INTJ", "I-LST", "I-PRT", "I-NP", "I-SBAR", "I-VP", "I-ADJP", "I-CONJP", "I-PP", ] class SCREAMING_SNAKE_CASE_ ( snake_case_ ): def UpperCAmelCase_ ( self : List[str] , _A : Tuple , _A : Union[Split, str] ) -> List[InputExample]: """simple docstring""" if isinstance(_A , _A ): snake_case_ : Tuple = mode.value snake_case_ : Tuple = os.path.join(_A , F"""{mode}.txt""" ) snake_case_ : Optional[Any] = 1 snake_case_ : List[str] = [] with open(_A , encoding='utf-8' ) as f: for sentence in parse_incr(_A ): snake_case_ : int = [] snake_case_ : Dict = [] for token in sentence: words.append(token['form'] ) labels.append(token['upos'] ) assert len(_A ) == len(_A ) if words: examples.append(InputExample(guid=F"""{mode}-{guid_index}""" , words=_A , labels=_A ) ) guid_index += 1 return examples def UpperCAmelCase_ ( self : List[Any] , _A : TextIO , _A : TextIO , _A : List ) -> int: """simple docstring""" snake_case_ : List[str] = 0 for sentence in parse_incr(_A ): snake_case_ : Tuple = preds_list[example_id] snake_case_ : Tuple = '' for token in sentence: out += F"""{token['form']} ({token['upos']}|{s_p.pop(0 )}) """ out += "\n" writer.write(_A ) example_id += 1 def UpperCAmelCase_ ( self : Optional[Any] , _A : str ) -> List[str]: """simple docstring""" if path: with open(_A , 'r' ) as f: return f.read().splitlines() else: return [ "ADJ", "ADP", "ADV", "AUX", "CCONJ", "DET", "INTJ", "NOUN", "NUM", "PART", "PRON", "PROPN", "PUNCT", "SCONJ", "SYM", "VERB", "X", ]
327
def SCREAMING_SNAKE_CASE__ ( __a ): if not isinstance(__a , __a ): snake_case_ : int = f"""Input value of [number={number}] must be an integer""" raise TypeError(__a ) if number < 0: return False snake_case_ : Dict = number * number while number > 0: if number % 10 != number_square % 10: return False number //= 10 number_square //= 10 return True if __name__ == "__main__": import doctest doctest.testmod()
327
1
from __future__ import annotations import inspect import unittest import numpy as np from transformers import ResNetConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFResNetForImageClassification, TFResNetModel from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class SCREAMING_SNAKE_CASE_ : def __init__( self : List[Any] , _A : Union[str, Any] , _A : int=3 , _A : Optional[int]=32 , _A : Dict=3 , _A : List[str]=10 , _A : Union[str, Any]=[10, 20, 30, 40] , _A : Union[str, Any]=[1, 1, 2, 1] , _A : Optional[Any]=True , _A : Union[str, Any]=True , _A : str="relu" , _A : Tuple=3 , _A : List[Any]=None , ) -> Optional[int]: """simple docstring""" snake_case_ : List[str] = parent snake_case_ : List[Any] = batch_size snake_case_ : Tuple = image_size snake_case_ : Tuple = num_channels snake_case_ : Tuple = embeddings_size snake_case_ : Union[str, Any] = hidden_sizes snake_case_ : str = depths snake_case_ : int = is_training snake_case_ : str = use_labels snake_case_ : List[str] = hidden_act snake_case_ : int = num_labels snake_case_ : int = scope snake_case_ : List[Any] = len(_A ) def UpperCAmelCase_ ( self : int ) -> Dict: """simple docstring""" snake_case_ : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) snake_case_ : Union[str, Any] = None if self.use_labels: snake_case_ : int = ids_tensor([self.batch_size] , self.num_labels ) snake_case_ : Optional[int] = self.get_config() return config, pixel_values, labels def UpperCAmelCase_ ( self : Tuple ) -> List[str]: """simple docstring""" return ResNetConfig( num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , ) def UpperCAmelCase_ ( self : List[str] , _A : Optional[Any] , _A : Union[str, Any] , _A : List[Any] ) -> List[str]: """simple docstring""" snake_case_ : Dict = TFResNetModel(config=_A ) snake_case_ : Any = model(_A ) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def UpperCAmelCase_ ( self : Any , _A : Union[str, Any] , _A : Any , _A : int ) -> int: """simple docstring""" snake_case_ : Tuple = self.num_labels snake_case_ : List[Any] = TFResNetForImageClassification(_A ) snake_case_ : List[Any] = model(_A , labels=_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def UpperCAmelCase_ ( self : Any ) -> Optional[Any]: """simple docstring""" snake_case_ : Any = self.prepare_config_and_inputs() snake_case_ ,snake_case_ ,snake_case_ : Union[str, Any] = config_and_inputs snake_case_ : Union[str, Any] = {'pixel_values': pixel_values} return config, inputs_dict @require_tf class SCREAMING_SNAKE_CASE_ ( snake_case_ , snake_case_ , unittest.TestCase ): __magic_name__: Dict = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else () __magic_name__: str = ( {"feature-extraction": TFResNetModel, "image-classification": TFResNetForImageClassification} if is_tf_available() else {} ) __magic_name__: List[Any] = False __magic_name__: Union[str, Any] = False __magic_name__: Union[str, Any] = False __magic_name__: int = False __magic_name__: Union[str, Any] = False def UpperCAmelCase_ ( self : Optional[int] ) -> Any: """simple docstring""" snake_case_ : int = TFResNetModelTester(self ) snake_case_ : Optional[Any] = ConfigTester(self , config_class=_A , has_text_modality=_A ) def UpperCAmelCase_ ( self : str ) -> Dict: """simple docstring""" self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def UpperCAmelCase_ ( self : str ) -> Union[str, Any]: """simple docstring""" return @unittest.skip(reason='ResNet does not use inputs_embeds' ) def UpperCAmelCase_ ( self : str ) -> Union[str, Any]: """simple docstring""" pass @unittest.skip(reason='ResNet does not support input and output embeddings' ) def UpperCAmelCase_ ( self : Tuple ) -> Optional[Any]: """simple docstring""" pass def UpperCAmelCase_ ( self : Tuple ) -> Union[str, Any]: """simple docstring""" snake_case_ ,snake_case_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: snake_case_ : Tuple = model_class(_A ) snake_case_ : List[Any] = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic snake_case_ : Dict = [*signature.parameters.keys()] snake_case_ : str = ['pixel_values'] self.assertListEqual(arg_names[:1] , _A ) def UpperCAmelCase_ ( self : List[Any] ) -> List[str]: """simple docstring""" snake_case_ : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_A ) def UpperCAmelCase_ ( self : Any ) -> Dict: """simple docstring""" def check_hidden_states_output(_A : List[str] , _A : List[Any] , _A : str ): snake_case_ : Union[str, Any] = model_class(_A ) snake_case_ : str = model(**self._prepare_for_class(_A , _A ) ) snake_case_ : Optional[int] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states snake_case_ : Tuple = self.model_tester.num_stages self.assertEqual(len(_A ) , expected_num_stages + 1 ) # ResNet's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) snake_case_ ,snake_case_ : Dict = self.model_tester.prepare_config_and_inputs_for_common() snake_case_ : Optional[Any] = ['basic', 'bottleneck'] for model_class in self.all_model_classes: for layer_type in layers_type: snake_case_ : Optional[int] = layer_type snake_case_ : Any = True check_hidden_states_output(_A , _A , _A ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] snake_case_ : str = True check_hidden_states_output(_A , _A , _A ) def UpperCAmelCase_ ( self : Dict ) -> List[str]: """simple docstring""" snake_case_ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*_A ) @slow def UpperCAmelCase_ ( self : Union[str, Any] ) -> Optional[Any]: """simple docstring""" for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: snake_case_ : Tuple = TFResNetModel.from_pretrained(_A ) self.assertIsNotNone(_A ) def SCREAMING_SNAKE_CASE__ ( ): snake_case_ : Tuple = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_tf @require_vision class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ): @cached_property def UpperCAmelCase_ ( self : Union[str, Any] ) -> List[Any]: """simple docstring""" return ( AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None ) @slow def UpperCAmelCase_ ( self : Any ) -> List[str]: """simple docstring""" snake_case_ : Any = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) snake_case_ : Union[str, Any] = self.default_image_processor snake_case_ : List[str] = prepare_img() snake_case_ : List[Any] = image_processor(images=_A , return_tensors='tf' ) # forward pass snake_case_ : Optional[Any] = model(**_A ) # verify the logits snake_case_ : int = tf.TensorShape((1, 1000) ) self.assertEqual(outputs.logits.shape , _A ) snake_case_ : Any = tf.constant([-1_1.1_0_6_9, -9.7_8_7_7, -8.3_7_7_7] ) self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , _A , atol=1E-4 ) )
327
from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _SCREAMING_SNAKE_CASE = { """configuration_autoformer""": [ """AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """AutoformerConfig""", ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _SCREAMING_SNAKE_CASE = [ """AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""", """AutoformerForPrediction""", """AutoformerModel""", """AutoformerPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_autoformer import ( AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, AutoformerConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_autoformer import ( AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, AutoformerForPrediction, AutoformerModel, AutoformerPreTrainedModel, ) else: import sys _SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
327
1
import copy import inspect import unittest import numpy as np from huggingface_hub import hf_hub_download from transformers import VideoMAEConfig from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEForPreTraining, VideoMAEForVideoClassification, VideoMAEModel, ) from transformers.models.videomae.modeling_videomae import VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from transformers import VideoMAEImageProcessor class SCREAMING_SNAKE_CASE_ : def __init__( self : Any , _A : str , _A : Optional[Any]=13 , _A : Union[str, Any]=10 , _A : Dict=3 , _A : Union[str, Any]=2 , _A : List[Any]=2 , _A : List[str]=2 , _A : List[str]=True , _A : Dict=True , _A : Tuple=32 , _A : Tuple=5 , _A : Optional[int]=4 , _A : Dict=37 , _A : List[Any]="gelu" , _A : Any=0.1 , _A : Union[str, Any]=0.1 , _A : Dict=10 , _A : int=0.0_2 , _A : int=0.9 , _A : Tuple=None , ) -> List[Any]: """simple docstring""" snake_case_ : List[Any] = parent snake_case_ : str = batch_size snake_case_ : List[Any] = image_size snake_case_ : List[Any] = num_channels snake_case_ : List[str] = patch_size snake_case_ : Tuple = tubelet_size snake_case_ : List[Any] = num_frames snake_case_ : Optional[Any] = is_training snake_case_ : int = use_labels snake_case_ : Union[str, Any] = hidden_size snake_case_ : Optional[Any] = num_hidden_layers snake_case_ : int = num_attention_heads snake_case_ : Dict = intermediate_size snake_case_ : Optional[Any] = hidden_act snake_case_ : Any = hidden_dropout_prob snake_case_ : List[Any] = attention_probs_dropout_prob snake_case_ : Union[str, Any] = type_sequence_label_size snake_case_ : Union[str, Any] = initializer_range snake_case_ : List[str] = mask_ratio snake_case_ : Union[str, Any] = scope # in VideoMAE, the number of tokens equals num_frames/tubelet_size * num_patches per frame snake_case_ : List[Any] = (image_size // patch_size) ** 2 snake_case_ : Optional[int] = (num_frames // tubelet_size) * self.num_patches_per_frame # use this variable to define bool_masked_pos snake_case_ : Optional[int] = int(mask_ratio * self.seq_length ) def UpperCAmelCase_ ( self : Tuple ) -> Tuple: """simple docstring""" snake_case_ : Dict = floats_tensor( [self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] ) snake_case_ : int = None if self.use_labels: snake_case_ : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) snake_case_ : Any = self.get_config() return config, pixel_values, labels def UpperCAmelCase_ ( self : List[Any] ) -> Dict: """simple docstring""" return VideoMAEConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , tubelet_size=self.tubelet_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_A , initializer_range=self.initializer_range , ) def UpperCAmelCase_ ( self : Tuple , _A : Dict , _A : Optional[int] , _A : Optional[Any] ) -> Any: """simple docstring""" snake_case_ : List[Any] = VideoMAEModel(config=_A ) model.to(_A ) model.eval() snake_case_ : str = model(_A ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCAmelCase_ ( self : Union[str, Any] , _A : int , _A : List[Any] , _A : List[Any] ) -> int: """simple docstring""" snake_case_ : List[str] = VideoMAEForPreTraining(_A ) model.to(_A ) model.eval() # important: each video needs to have the same number of masked patches # hence we define a single mask, which we then repeat for each example in the batch snake_case_ : Dict = torch.ones((self.num_masks,) ) snake_case_ : List[Any] = torch.cat([mask, torch.zeros(self.seq_length - mask.size(0 ) )] ) snake_case_ : Tuple = mask.expand(self.batch_size , -1 ).bool() snake_case_ : int = model(_A , _A ) # model only returns predictions for masked patches snake_case_ : Optional[int] = mask.sum().item() snake_case_ : List[Any] = 3 * self.tubelet_size * self.patch_size**2 self.parent.assertEqual(result.logits.shape , (self.batch_size, num_masked_patches, decoder_num_labels) ) def UpperCAmelCase_ ( self : int ) -> Optional[Any]: """simple docstring""" snake_case_ : int = self.prepare_config_and_inputs() snake_case_ ,snake_case_ ,snake_case_ : List[str] = config_and_inputs snake_case_ : Optional[int] = {'pixel_values': pixel_values} return config, inputs_dict @require_torch class SCREAMING_SNAKE_CASE_ ( snake_case_ , snake_case_ , unittest.TestCase ): __magic_name__: Tuple = ( (VideoMAEModel, VideoMAEForPreTraining, VideoMAEForVideoClassification) if is_torch_available() else () ) __magic_name__: Optional[Any] = ( {"feature-extraction": VideoMAEModel, "video-classification": VideoMAEForVideoClassification} if is_torch_available() else {} ) __magic_name__: List[str] = False __magic_name__: Tuple = False __magic_name__: Dict = False __magic_name__: List[Any] = False def UpperCAmelCase_ ( self : Dict ) -> str: """simple docstring""" snake_case_ : List[str] = VideoMAEModelTester(self ) snake_case_ : str = ConfigTester(self , config_class=_A , has_text_modality=_A , hidden_size=37 ) def UpperCAmelCase_ ( self : Optional[Any] , _A : List[str] , _A : Any , _A : int=False ) -> Tuple: """simple docstring""" snake_case_ : Union[str, Any] = copy.deepcopy(_A ) if model_class == VideoMAEForPreTraining: # important: each video needs to have the same number of masked patches # hence we define a single mask, which we then repeat for each example in the batch snake_case_ : Optional[Any] = torch.ones((self.model_tester.num_masks,) ) snake_case_ : Union[str, Any] = torch.cat([mask, torch.zeros(self.model_tester.seq_length - mask.size(0 ) )] ) snake_case_ : List[str] = mask.expand(self.model_tester.batch_size , -1 ).bool() snake_case_ : Optional[Any] = bool_masked_pos.to(_A ) if return_labels: if model_class in [ *get_values(_A ), ]: snake_case_ : Dict = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=_A ) return inputs_dict def UpperCAmelCase_ ( self : Tuple ) -> Tuple: """simple docstring""" self.config_tester.run_common_tests() @unittest.skip(reason='VideoMAE does not use inputs_embeds' ) def UpperCAmelCase_ ( self : Tuple ) -> List[str]: """simple docstring""" pass def UpperCAmelCase_ ( self : Optional[Any] ) -> List[str]: """simple docstring""" snake_case_ ,snake_case_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: snake_case_ : str = model_class(_A ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) snake_case_ : int = model.get_output_embeddings() self.assertTrue(x is None or isinstance(_A , nn.Linear ) ) def UpperCAmelCase_ ( self : str ) -> List[Any]: """simple docstring""" snake_case_ ,snake_case_ : str = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: snake_case_ : Tuple = model_class(_A ) snake_case_ : Dict = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic snake_case_ : List[str] = [*signature.parameters.keys()] snake_case_ : Union[str, Any] = ['pixel_values'] self.assertListEqual(arg_names[:1] , _A ) def UpperCAmelCase_ ( self : Optional[Any] ) -> Dict: """simple docstring""" snake_case_ : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_A ) def UpperCAmelCase_ ( self : Optional[int] ) -> Union[str, Any]: """simple docstring""" snake_case_ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*_A ) @slow def UpperCAmelCase_ ( self : str ) -> int: """simple docstring""" for model_name in VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: snake_case_ : Union[str, Any] = VideoMAEModel.from_pretrained(_A ) self.assertIsNotNone(_A ) def UpperCAmelCase_ ( self : int ) -> List[str]: """simple docstring""" if not self.has_attentions: pass else: snake_case_ ,snake_case_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() snake_case_ : Optional[Any] = True for model_class in self.all_model_classes: snake_case_ : Union[str, Any] = self.model_tester.seq_length - self.model_tester.num_masks snake_case_ : Union[str, Any] = ( num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length ) snake_case_ : Any = True snake_case_ : Tuple = False snake_case_ : Optional[Any] = True snake_case_ : Optional[int] = model_class(_A ) model.to(_A ) model.eval() with torch.no_grad(): snake_case_ : Optional[Any] = model(**self._prepare_for_class(_A , _A ) ) snake_case_ : List[str] = outputs.attentions self.assertEqual(len(_A ) , self.model_tester.num_hidden_layers ) # check that output_attentions also work using config del inputs_dict["output_attentions"] snake_case_ : List[Any] = True snake_case_ : Optional[Any] = model_class(_A ) model.to(_A ) model.eval() with torch.no_grad(): snake_case_ : List[Any] = model(**self._prepare_for_class(_A , _A ) ) snake_case_ : Optional[int] = outputs.attentions self.assertEqual(len(_A ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , ) snake_case_ : Tuple = len(_A ) # Check attention is always last and order is fine snake_case_ : Any = True snake_case_ : int = True snake_case_ : Tuple = model_class(_A ) model.to(_A ) model.eval() with torch.no_grad(): snake_case_ : int = model(**self._prepare_for_class(_A , _A ) ) self.assertEqual(out_len + 1 , len(_A ) ) snake_case_ : Union[str, Any] = outputs.attentions self.assertEqual(len(_A ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , ) def UpperCAmelCase_ ( self : List[Any] ) -> List[Any]: """simple docstring""" def check_hidden_states_output(_A : Tuple , _A : int , _A : str ): snake_case_ : List[str] = model_class(_A ) model.to(_A ) model.eval() with torch.no_grad(): snake_case_ : Any = model(**self._prepare_for_class(_A , _A ) ) snake_case_ : List[Any] = outputs.hidden_states snake_case_ : Optional[Any] = self.model_tester.num_hidden_layers + 1 self.assertEqual(len(_A ) , _A ) snake_case_ : Union[str, Any] = self.model_tester.seq_length - self.model_tester.num_masks snake_case_ : Union[str, Any] = num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , ) snake_case_ ,snake_case_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: snake_case_ : List[str] = True check_hidden_states_output(_A , _A , _A ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] snake_case_ : Union[str, Any] = True check_hidden_states_output(_A , _A , _A ) @unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' ) def UpperCAmelCase_ ( self : Union[str, Any] ) -> Any: """simple docstring""" pass def SCREAMING_SNAKE_CASE__ ( ): snake_case_ : List[str] = hf_hub_download( repo_id='hf-internal-testing/spaghetti-video' , filename='eating_spaghetti.npy' , repo_type='dataset' ) snake_case_ : List[str] = np.load(__a ) return list(__a ) @require_torch @require_vision class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ): @cached_property def UpperCAmelCase_ ( self : List[str] ) -> int: """simple docstring""" return ( VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] ) if is_vision_available() else None ) @slow def UpperCAmelCase_ ( self : Optional[Any] ) -> int: """simple docstring""" snake_case_ : Any = VideoMAEForVideoClassification.from_pretrained('MCG-NJU/videomae-base-finetuned-kinetics' ).to( _A ) snake_case_ : List[Any] = self.default_image_processor snake_case_ : Optional[int] = prepare_video() snake_case_ : Optional[int] = image_processor(_A , return_tensors='pt' ).to(_A ) # forward pass with torch.no_grad(): snake_case_ : Union[str, Any] = model(**_A ) # verify the logits snake_case_ : Any = torch.Size((1, 400) ) self.assertEqual(outputs.logits.shape , _A ) snake_case_ : Optional[int] = torch.tensor([0.3_6_6_9, -0.0_6_8_8, -0.2_4_2_1] ).to(_A ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , _A , atol=1E-4 ) ) @slow def UpperCAmelCase_ ( self : str ) -> Any: """simple docstring""" snake_case_ : Union[str, Any] = VideoMAEForPreTraining.from_pretrained('MCG-NJU/videomae-base-short' ).to(_A ) snake_case_ : Optional[Any] = self.default_image_processor snake_case_ : List[str] = prepare_video() snake_case_ : List[Any] = image_processor(_A , return_tensors='pt' ).to(_A ) # add boolean mask, indicating which patches to mask snake_case_ : int = hf_hub_download(repo_id='hf-internal-testing/bool-masked-pos' , filename='bool_masked_pos.pt' ) snake_case_ : Dict = torch.load(_A ) # forward pass with torch.no_grad(): snake_case_ : int = model(**_A ) # verify the logits snake_case_ : str = torch.Size([1, 1408, 1536] ) snake_case_ : Optional[Any] = torch.tensor( [[0.7_9_9_4, 0.9_6_1_2, 0.8_5_0_8], [0.7_4_0_1, 0.8_9_5_8, 0.8_3_0_2], [0.5_8_6_2, 0.7_4_6_8, 0.7_3_2_5]] , device=_A ) self.assertEqual(outputs.logits.shape , _A ) self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , _A , atol=1E-4 ) ) # verify the loss (`config.norm_pix_loss` = `True`) snake_case_ : List[str] = torch.tensor([0.5_1_4_2] , device=_A ) self.assertTrue(torch.allclose(outputs.loss , _A , atol=1E-4 ) ) # verify the loss (`config.norm_pix_loss` = `False`) snake_case_ : Dict = VideoMAEForPreTraining.from_pretrained('MCG-NJU/videomae-base-short' , norm_pix_loss=_A ).to( _A ) with torch.no_grad(): snake_case_ : List[str] = model(**_A ) snake_case_ : Optional[Any] = torch.tensor(torch.tensor([0.6_4_6_9] ) , device=_A ) self.assertTrue(torch.allclose(outputs.loss , _A , atol=1E-4 ) )
327
from typing import Dict from .base import GenericTensor, Pipeline class SCREAMING_SNAKE_CASE_ ( snake_case_ ): def UpperCAmelCase_ ( self : str , _A : Optional[Any]=None , _A : List[str]=None , _A : Optional[Any]=None , **_A : List[str] ) -> Any: """simple docstring""" if tokenize_kwargs is None: snake_case_ : Optional[Any] = {} if truncation is not None: if "truncation" in tokenize_kwargs: raise ValueError( 'truncation parameter defined twice (given as keyword argument as well as in tokenize_kwargs)' ) snake_case_ : int = truncation snake_case_ : Optional[int] = tokenize_kwargs snake_case_ : Dict = {} if return_tensors is not None: snake_case_ : Union[str, Any] = return_tensors return preprocess_params, {}, postprocess_params def UpperCAmelCase_ ( self : Optional[int] , _A : int , **_A : Any ) -> Dict[str, GenericTensor]: """simple docstring""" snake_case_ : Dict = self.framework snake_case_ : Any = self.tokenizer(_A , return_tensors=_A , **_A ) return model_inputs def UpperCAmelCase_ ( self : Optional[Any] , _A : List[str] ) -> int: """simple docstring""" snake_case_ : Tuple = self.model(**_A ) return model_outputs def UpperCAmelCase_ ( self : Union[str, Any] , _A : str , _A : str=False ) -> Any: """simple docstring""" if return_tensors: return model_outputs[0] if self.framework == "pt": return model_outputs[0].tolist() elif self.framework == "tf": return model_outputs[0].numpy().tolist() def __call__( self : List[str] , *_A : Union[str, Any] , **_A : Tuple ) -> List[str]: """simple docstring""" return super().__call__(*_A , **_A )
327
1
import argparse import datetime def SCREAMING_SNAKE_CASE__ ( __a ): snake_case_ : Any = { '0': 'Sunday', '1': 'Monday', '2': 'Tuesday', '3': 'Wednesday', '4': 'Thursday', '5': 'Friday', '6': 'Saturday', } snake_case_ : List[Any] = {0: 1, 1: 2, 2: 3, 3: 4, 4: 5, 5: 6, 6: 0} # Validate if not 0 < len(__a ) < 11: raise ValueError('Must be 10 characters long' ) # Get month snake_case_ : int = int(date_input[0] + date_input[1] ) # Validate if not 0 < m < 13: raise ValueError('Month must be between 1 - 12' ) snake_case_ : str = date_input[2] # Validate if sep_a not in ["-", "/"]: raise ValueError('Date separator must be \'-\' or \'/\'' ) # Get day snake_case_ : int = int(date_input[3] + date_input[4] ) # Validate if not 0 < d < 32: raise ValueError('Date must be between 1 - 31' ) # Get second separator snake_case_ : str = date_input[5] # Validate if sep_a not in ["-", "/"]: raise ValueError('Date separator must be \'-\' or \'/\'' ) # Get year snake_case_ : int = int(date_input[6] + date_input[7] + date_input[8] + date_input[9] ) # Arbitrary year range if not 45 < y < 85_00: raise ValueError( 'Year out of range. There has to be some sort of limit...right?' ) # Get datetime obj for validation snake_case_ : Any = datetime.date(int(__a ) , int(__a ) , int(__a ) ) # Start math if m <= 2: snake_case_ : List[str] = y - 1 snake_case_ : List[str] = m + 12 # maths var snake_case_ : int = int(str(__a )[:2] ) snake_case_ : int = int(str(__a )[2:] ) snake_case_ : int = int(2.6 * m - 5.39 ) snake_case_ : int = int(c / 4 ) snake_case_ : int = int(k / 4 ) snake_case_ : int = int(d + k ) snake_case_ : int = int(t + u + v + x ) snake_case_ : int = int(z - (2 * c) ) snake_case_ : int = round(w % 7 ) # End math # Validate math if f != convert_datetime_days[dt_ck.weekday()]: raise AssertionError('The date was evaluated incorrectly. Contact developer.' ) # Response snake_case_ : str = f"""Your date {date_input}, is a {days[str(__a )]}!""" return response if __name__ == "__main__": import doctest doctest.testmod() _SCREAMING_SNAKE_CASE = argparse.ArgumentParser( description=( """Find out what day of the week nearly any date is or was. Enter """ """date as a string in the mm-dd-yyyy or mm/dd/yyyy format""" ) ) parser.add_argument( """date_input""", type=str, help="""Date as a string (mm-dd-yyyy or mm/dd/yyyy)""" ) _SCREAMING_SNAKE_CASE = parser.parse_args() zeller(args.date_input)
327
from itertools import permutations def SCREAMING_SNAKE_CASE__ ( __a ): if num[3] % 2 != 0: return False if (num[2] + num[3] + num[4]) % 3 != 0: return False if num[5] % 5 != 0: return False snake_case_ : Any = [7, 11, 13, 17] for i, test in enumerate(__a ): if (num[i + 4] * 1_00 + num[i + 5] * 10 + num[i + 6]) % test != 0: return False return True def SCREAMING_SNAKE_CASE__ ( __a = 10 ): return sum( int(''.join(map(__a , __a ) ) ) for num in permutations(range(__a ) ) if is_substring_divisible(__a ) ) if __name__ == "__main__": print(F'''{solution() = }''')
327
1
from typing import Dict from .base import GenericTensor, Pipeline class SCREAMING_SNAKE_CASE_ ( snake_case_ ): def UpperCAmelCase_ ( self : str , _A : Optional[Any]=None , _A : List[str]=None , _A : Optional[Any]=None , **_A : List[str] ) -> Any: """simple docstring""" if tokenize_kwargs is None: snake_case_ : Optional[Any] = {} if truncation is not None: if "truncation" in tokenize_kwargs: raise ValueError( 'truncation parameter defined twice (given as keyword argument as well as in tokenize_kwargs)' ) snake_case_ : int = truncation snake_case_ : Optional[int] = tokenize_kwargs snake_case_ : Dict = {} if return_tensors is not None: snake_case_ : Union[str, Any] = return_tensors return preprocess_params, {}, postprocess_params def UpperCAmelCase_ ( self : Optional[int] , _A : int , **_A : Any ) -> Dict[str, GenericTensor]: """simple docstring""" snake_case_ : Dict = self.framework snake_case_ : Any = self.tokenizer(_A , return_tensors=_A , **_A ) return model_inputs def UpperCAmelCase_ ( self : Optional[Any] , _A : List[str] ) -> int: """simple docstring""" snake_case_ : Tuple = self.model(**_A ) return model_outputs def UpperCAmelCase_ ( self : Union[str, Any] , _A : str , _A : str=False ) -> Any: """simple docstring""" if return_tensors: return model_outputs[0] if self.framework == "pt": return model_outputs[0].tolist() elif self.framework == "tf": return model_outputs[0].numpy().tolist() def __call__( self : List[str] , *_A : Union[str, Any] , **_A : Tuple ) -> List[str]: """simple docstring""" return super().__call__(*_A , **_A )
327
from __future__ import annotations from collections import namedtuple def SCREAMING_SNAKE_CASE__ ( __a , __a , __a ): snake_case_ : Any = namedtuple('result' , 'name value' ) if (voltage, current, power).count(0 ) != 1: raise ValueError('Only one argument must be 0' ) elif power < 0: raise ValueError( 'Power cannot be negative in any electrical/electronics system' ) elif voltage == 0: return result('voltage' , power / current ) elif current == 0: return result('current' , power / voltage ) elif power == 0: return result('power' , float(round(abs(voltage * current ) , 2 ) ) ) else: raise ValueError('Exactly one argument must be 0' ) if __name__ == "__main__": import doctest doctest.testmod()
327
1
import math import os import re import sys import unittest from pathlib import Path from typing import Tuple from unittest.mock import patch from parameterized import parameterized from transformers.testing_utils import ( CaptureStderr, ExtendSysPath, TestCasePlus, execute_subprocess_async, get_gpu_count, get_torch_dist_unique_port, require_apex, require_bitsandbytes, require_fairscale, require_torch, require_torch_gpu, require_torch_multi_gpu, require_torch_non_multi_gpu, slow, ) from transformers.trainer_callback import TrainerState from transformers.trainer_utils import set_seed _SCREAMING_SNAKE_CASE = os.path.abspath(os.path.dirname(__file__)) with ExtendSysPath(F'''{bindir}/../../examples/pytorch/translation'''): from run_translation import main # noqa set_seed(42) _SCREAMING_SNAKE_CASE = """sshleifer/student_marian_en_ro_6_1""" _SCREAMING_SNAKE_CASE = """sshleifer/tiny-mbart""" @require_torch class SCREAMING_SNAKE_CASE_ ( snake_case_ ): def UpperCAmelCase_ ( self : Union[str, Any] , _A : Union[str, Any]=False , _A : List[str]=None , _A : str=True , _A : Union[str, Any]=True , _A : Optional[int]=True , _A : int=True , ) -> Tuple: """simple docstring""" snake_case_ : Optional[int] = self.run_trainer( eval_steps=1 , max_len=12 , model_name=_A , num_train_epochs=1 , distributed=_A , extra_args_str=_A , predict_with_generate=_A , do_train=_A , do_eval=_A , do_predict=_A , ) snake_case_ : Dict = TrainerState.load_from_json(os.path.join(_A , 'trainer_state.json' ) ).log_history if not do_eval: return snake_case_ : str = [log for log in logs if 'eval_loss' in log.keys()] snake_case_ : Optional[Any] = eval_metrics[0] if predict_with_generate: assert "eval_bleu" in first_step_stats snake_case_ : str = eval_metrics[-1] assert isinstance(last_step_stats['eval_bleu'] , _A ) assert not math.isnan(float(last_step_stats['eval_loss'] ) ), "eval_loss must not be `nan`" @require_torch_non_multi_gpu def UpperCAmelCase_ ( self : str ) -> Optional[Any]: """simple docstring""" self.run_seqaseq_quick() @require_torch_multi_gpu def UpperCAmelCase_ ( self : Optional[Any] ) -> Tuple: """simple docstring""" self.run_seqaseq_quick(distributed=_A ) @require_torch_multi_gpu def UpperCAmelCase_ ( self : List[str] ) -> str: """simple docstring""" self.run_seqaseq_quick(distributed=_A ) @unittest.skip('Requires an update of the env running those tests' ) @require_torch_multi_gpu @require_fairscale def UpperCAmelCase_ ( self : List[str] ) -> Optional[int]: """simple docstring""" self.run_seqaseq_quick(distributed=_A , extra_args_str='--sharded_ddp simple' ) @unittest.skip('Requires an update of the env running those tests' ) @require_torch_multi_gpu @require_fairscale def UpperCAmelCase_ ( self : Tuple ) -> Any: """simple docstring""" self.run_seqaseq_quick(distributed=_A , extra_args_str='--sharded_ddp simple --fp16' ) @unittest.skip('Requires an update of the env running those tests' ) @require_torch_multi_gpu @require_fairscale def UpperCAmelCase_ ( self : Any ) -> Any: """simple docstring""" self.run_seqaseq_quick(distributed=_A , extra_args_str='--sharded_ddp zero_dp_2' , predict_with_generate=_A ) @unittest.skip('Requires an update of the env running those tests' ) @require_torch_multi_gpu @require_fairscale def UpperCAmelCase_ ( self : Optional[Any] ) -> List[str]: """simple docstring""" self.run_seqaseq_quick( distributed=_A , extra_args_str='--sharded_ddp zero_dp_2 --fp16' , predict_with_generate=_A ) @require_apex @require_torch_gpu def UpperCAmelCase_ ( self : int ) -> Union[str, Any]: """simple docstring""" self.run_seqaseq_quick(distributed=_A , extra_args_str='--fp16 --fp16_backend=apex' ) # test 2nd time - was getting eval_loss': nan' # to reproduce the problem set distributed=False self.run_seqaseq_quick(distributed=_A , extra_args_str='--fp16 --fp16_backend=apex' ) @parameterized.expand(['base', 'low', 'high', 'mixed'] ) @require_torch_multi_gpu def UpperCAmelCase_ ( self : Tuple , _A : Optional[Any] ) -> Union[str, Any]: """simple docstring""" snake_case_ : Dict = { # test with the default log_level - should be info and thus log info once 'base': {'extra_args_str': '', 'n_matches': 1}, # test with low log_level and log_level_replica - should be noisy on all processes # now the info string should appear twice on 2 processes 'low': {'extra_args_str': '--log_level debug --log_level_replica debug', 'n_matches': 2}, # test with high log_level and low log_level_replica # now the info string should appear once only on the replica 'high': {'extra_args_str': '--log_level error --log_level_replica debug', 'n_matches': 1}, # test with high log_level and log_level_replica - should be quiet on all processes 'mixed': {'extra_args_str': '--log_level error --log_level_replica error', 'n_matches': 0}, } snake_case_ : int = experiments[experiment_id] snake_case_ : Union[str, Any] = {'distributed': True, 'predict_with_generate': False, 'do_eval': False, 'do_predict': False} snake_case_ : Union[str, Any] = 'Running training' with CaptureStderr() as cl: self.run_seqaseq_quick(**_A , extra_args_str=data['extra_args_str'] ) snake_case_ : List[Any] = len(re.findall(_A , cl.err ) ) self.assertEqual(_A , data['n_matches'] ) @slow def UpperCAmelCase_ ( self : Tuple ) -> int: """simple docstring""" snake_case_ : Dict = self.run_trainer( eval_steps=2 , max_len=128 , model_name=_A , learning_rate=3E-4 , num_train_epochs=10 , distributed=_A , ) # Check metrics snake_case_ : List[str] = TrainerState.load_from_json(os.path.join(_A , 'trainer_state.json' ) ).log_history snake_case_ : Dict = [log for log in logs if 'eval_loss' in log.keys()] snake_case_ : Any = eval_metrics[0] snake_case_ : Any = eval_metrics[-1] assert first_step_stats["eval_loss"] > last_step_stats["eval_loss"], "model learned nothing" assert isinstance(last_step_stats['eval_bleu'] , _A ) # test if do_predict saves generations and metrics snake_case_ : Optional[int] = os.listdir(_A ) snake_case_ : List[Any] = {os.path.basename(_A ) for p in contents} assert "generated_predictions.txt" in contents assert "predict_results.json" in contents @slow @require_bitsandbytes def UpperCAmelCase_ ( self : Tuple ) -> str: """simple docstring""" from transformers.training_args import OptimizerNames def train_and_return_metrics(_A : str ) -> Tuple[int, float]: snake_case_ : Optional[Any] = '--skip_memory_metrics 0' snake_case_ : Union[str, Any] = self.run_trainer( max_len=128 , model_name=_A , learning_rate=3E-4 , num_train_epochs=1 , optim=_A , distributed=_A , extra_args_str=_A , do_eval=_A , do_predict=_A , n_gpus_to_use=1 , ) # Check metrics snake_case_ : str = TrainerState.load_from_json(Path(_A , 'trainer_state.json' ) ).log_history snake_case_ : Dict = int(logs[0]['train_mem_gpu_peaked_delta'] / 2**20 ) snake_case_ : List[str] = int(logs[0]['train_mem_gpu_alloc_delta'] / 2**20 ) snake_case_ : Optional[Any] = logs[0]['train_loss'] return gpu_peak_mem_mb, gpu_alloc_mem_mb, loss snake_case_ ,snake_case_ ,snake_case_ : Optional[Any] = train_and_return_metrics(OptimizerNames.ADAMW_TORCH.value ) snake_case_ ,snake_case_ ,snake_case_ : List[str] = train_and_return_metrics(OptimizerNames.ADAMW_BNB.value ) snake_case_ : Tuple = gpu_alloc_mem_orig - gpu_alloc_mem_bnb snake_case_ : Optional[int] = gpu_peak_mem_orig + gpu_alloc_mem_orig snake_case_ : int = gpu_peak_mem_bnb + gpu_alloc_mem_bnb snake_case_ : int = gpu_total_mem_orig - gpu_total_mem_bnb # sshleifer/student_marian_en_ro_6_1 has 54M parameter, 29M of which is `nn.Embedding` which # doesn't get quantized and remains in fp32. Therefore we only have 25M parameters quantized # in 2 bytes and the diff in optim memory usage is derived as so: # # - normal 25*8=~200MB (8 bytes per param) # - bnb 25*2= ~50MB (2 bytes per param) # # Thus we should expect ~150MB total memory saved. # # Peak memory should be the same - the total should be different by about that same margin # # After leaving a small margin to accommodate for differences between gpus let's check # that we have at least 120MB in savings snake_case_ : List[str] = 120 # uncomment the following if this test starts failing - requires py38 for a new print feature # gpu_peak_mem_diff = gpu_peak_mem_orig - gpu_peak_mem_bnb # print(f"{gpu_alloc_mem_orig=}MB {gpu_peak_mem_orig=}MB {gpu_alloc_mem_orig+gpu_peak_mem_orig=}MB") # print(f" {gpu_alloc_mem_bnb=}MB {gpu_peak_mem_bnb=}MB {gpu_alloc_mem_bnb+gpu_peak_mem_bnb=}MB") # print(f"{gpu_alloc_mem_diff=}MB") # print(f"{gpu_peak_mem_diff=}MB") # print(f"{gpu_total_mem_orig=}MB, {gpu_total_mem_bnb=}MB") # print(f"{gpu_total_mem_diff=}MB, {gpu_total_mem_diff=}MB") self.assertGreater( _A , _A , 'should use ~150MB less alloc gpu memory with BNB, compared to without it for this model but got' F""" a difference of {gpu_alloc_mem_diff}MB, with gpu_alloc_mem_orig={gpu_alloc_mem_orig}MB and""" F""" gpu_alloc_mem_bnb={gpu_alloc_mem_bnb}MB""" , ) self.assertGreater( _A , _A , 'should use ~150MB less total gpu memory with BNB, compared to without it for this model but got' F""" a difference of {gpu_total_mem_diff}MB, with gpu_total_mem_orig={gpu_total_mem_orig}MB and""" F""" gpu_total_mem_bnb={gpu_total_mem_bnb}MB""" , ) self.assertEqual( _A , _A , F"""loss should be the same, but got loss_orig={loss_orig}, loss_bnb={loss_bnb}""" ) def UpperCAmelCase_ ( self : List[str] , _A : int , _A : str , _A : int , _A : float = 3E-3 , _A : str = "adafactor" , _A : bool = False , _A : str = None , _A : int = 0 , _A : bool = True , _A : bool = True , _A : bool = True , _A : bool = True , _A : int = None , ) -> Any: """simple docstring""" snake_case_ : int = self.test_file_dir / '../fixtures/tests_samples/wmt_en_ro' snake_case_ : int = self.get_auto_remove_tmp_dir() snake_case_ : str = F""" --model_name_or_path {model_name} --train_file {data_dir}/train.json --validation_file {data_dir}/val.json --test_file {data_dir}/test.json --output_dir {output_dir} --overwrite_output_dir --max_train_samples 8 --max_source_length {max_len} --max_target_length {max_len} --do_train --num_train_epochs {str(_A )} --per_device_train_batch_size 4 --learning_rate {learning_rate} --warmup_steps 8 --logging_steps 0 --logging_strategy no --save_steps {str(_A )} --group_by_length --label_smoothing_factor 0.1 --target_lang ro_RO --source_lang en_XX """.split() snake_case_ : str = F""" --do_eval --per_device_eval_batch_size 4 --max_eval_samples 8 --val_max_target_length {max_len} --evaluation_strategy steps --eval_steps {str(_A )} """.split() snake_case_ : Any = '\n --do_predict\n '.split() snake_case_ : Optional[int] = [] if do_train: args += args_train if do_eval: args += args_eval if do_predict: args += args_predict if predict_with_generate: args += "--predict_with_generate".split() if do_train: if optim == "adafactor": args += "--adafactor".split() else: args += F"""--optim {optim}""".split() if extra_args_str is not None: args += extra_args_str.split() if distributed: if n_gpus_to_use is None: snake_case_ : Any = get_gpu_count() snake_case_ : Any = get_torch_dist_unique_port() snake_case_ : Union[str, Any] = F""" -m torch.distributed.run --nproc_per_node={n_gpus_to_use} --master_port={master_port} {self.examples_dir_str}/pytorch/translation/run_translation.py """.split() snake_case_ : List[Any] = [sys.executable] + distributed_args + args # keep for quick debug # print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die execute_subprocess_async(_A , env=self.get_env() ) else: snake_case_ : Optional[Any] = ['run_translation.py'] + args with patch.object(_A , 'argv' , _A ): main() return output_dir
327
import re import string import numpy as np import datasets _SCREAMING_SNAKE_CASE = """ Returns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list. """ _SCREAMING_SNAKE_CASE = """ Args: predictions: List of predicted texts. references: List of reference texts. regexes_to_ignore: List, defaults to None. Regex expressions of characters to ignore when calculating the exact matches. Note: these regexes are removed from the input data before the changes based on the options below (e.g. ignore_case, ignore_punctuation, ignore_numbers) are applied. ignore_case: Boolean, defaults to False. If true, turns everything to lowercase so that capitalization differences are ignored. ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before comparing predictions and references. ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before comparing predictions and references. Returns: exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive. Examples: >>> exact_match = datasets.load_metric(\"exact_match\") >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"] >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"] >>> results = exact_match.compute(references=refs, predictions=preds) >>> print(round(results[\"exact_match\"], 1)) 25.0 >>> exact_match = datasets.load_metric(\"exact_match\") >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"] >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"] >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\"], ignore_case=True, ignore_punctuation=True) >>> print(round(results[\"exact_match\"], 1)) 50.0 >>> exact_match = datasets.load_metric(\"exact_match\") >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"] >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"] >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\", \"YELL\"], ignore_case=True, ignore_punctuation=True) >>> print(round(results[\"exact_match\"], 1)) 75.0 >>> exact_match = datasets.load_metric(\"exact_match\") >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"] >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"] >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\", \"YELL\"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True) >>> print(round(results[\"exact_match\"], 1)) 100.0 >>> exact_match = datasets.load_metric(\"exact_match\") >>> refs = [\"The cat sat on the mat.\", \"Theaters are great.\", \"It's like comparing oranges and apples.\"] >>> preds = [\"The cat sat on the mat?\", \"Theaters are great.\", \"It's like comparing apples and oranges.\"] >>> results = exact_match.compute(references=refs, predictions=preds) >>> print(round(results[\"exact_match\"], 1)) 33.3 """ _SCREAMING_SNAKE_CASE = """ """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class SCREAMING_SNAKE_CASE_ ( datasets.Metric ): def UpperCAmelCase_ ( self : Union[str, Any] ) -> Optional[int]: """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { 'predictions': datasets.Value('string' , id='sequence' ), 'references': datasets.Value('string' , id='sequence' ), } ) , reference_urls=[] , ) def UpperCAmelCase_ ( self : int , _A : Tuple , _A : Tuple , _A : str=None , _A : Dict=False , _A : Tuple=False , _A : str=False , ) -> Tuple: """simple docstring""" if regexes_to_ignore is not None: for s in regexes_to_ignore: snake_case_ : List[Any] = np.array([re.sub(_A , '' , _A ) for x in predictions] ) snake_case_ : Optional[Any] = np.array([re.sub(_A , '' , _A ) for x in references] ) else: snake_case_ : Dict = np.asarray(_A ) snake_case_ : Tuple = np.asarray(_A ) if ignore_case: snake_case_ : List[str] = np.char.lower(_A ) snake_case_ : Any = np.char.lower(_A ) if ignore_punctuation: snake_case_ : int = string.punctuation.maketrans('' , '' , string.punctuation ) snake_case_ : Tuple = np.char.translate(_A , table=_A ) snake_case_ : str = np.char.translate(_A , table=_A ) if ignore_numbers: snake_case_ : Optional[int] = string.digits.maketrans('' , '' , string.digits ) snake_case_ : str = np.char.translate(_A , table=_A ) snake_case_ : Union[str, Any] = np.char.translate(_A , table=_A ) snake_case_ : int = predictions == references return {"exact_match": np.mean(_A ) * 100}
327
1
import argparse import torch from transformers import GPTaLMHeadModel, RobertaForMaskedLM if __name__ == "__main__": _SCREAMING_SNAKE_CASE = argparse.ArgumentParser( description=( """Extraction some layers of the full RobertaForMaskedLM or GPT2LMHeadModel for Transfer Learned""" """ Distillation""" ) ) parser.add_argument("""--model_type""", default="""roberta""", choices=["""roberta""", """gpt2"""]) parser.add_argument("""--model_name""", default="""roberta-large""", type=str) parser.add_argument("""--dump_checkpoint""", default="""serialization_dir/tf_roberta_048131723.pth""", type=str) parser.add_argument("""--vocab_transform""", action="""store_true""") _SCREAMING_SNAKE_CASE = parser.parse_args() if args.model_type == "roberta": _SCREAMING_SNAKE_CASE = RobertaForMaskedLM.from_pretrained(args.model_name) _SCREAMING_SNAKE_CASE = """roberta""" elif args.model_type == "gpt2": _SCREAMING_SNAKE_CASE = GPTaLMHeadModel.from_pretrained(args.model_name) _SCREAMING_SNAKE_CASE = """transformer""" _SCREAMING_SNAKE_CASE = model.state_dict() _SCREAMING_SNAKE_CASE = {} # Embeddings # if args.model_type == "gpt2": for param_name in ["wte.weight", "wpe.weight"]: _SCREAMING_SNAKE_CASE = state_dict[F'''{prefix}.{param_name}'''] else: for w in ["word_embeddings", "position_embeddings", "token_type_embeddings"]: _SCREAMING_SNAKE_CASE = F'''{prefix}.embeddings.{w}.weight''' _SCREAMING_SNAKE_CASE = state_dict[param_name] for w in ["weight", "bias"]: _SCREAMING_SNAKE_CASE = F'''{prefix}.embeddings.LayerNorm.{w}''' _SCREAMING_SNAKE_CASE = state_dict[param_name] # Transformer Blocks # _SCREAMING_SNAKE_CASE = 0 for teacher_idx in [0, 2, 4, 7, 9, 11]: if args.model_type == "gpt2": for layer in ["ln_1", "attn.c_attn", "attn.c_proj", "ln_2", "mlp.c_fc", "mlp.c_proj"]: for w in ["weight", "bias"]: _SCREAMING_SNAKE_CASE = state_dict[ F'''{prefix}.h.{teacher_idx}.{layer}.{w}''' ] _SCREAMING_SNAKE_CASE = state_dict[F'''{prefix}.h.{teacher_idx}.attn.bias'''] else: for layer in [ "attention.self.query", "attention.self.key", "attention.self.value", "attention.output.dense", "attention.output.LayerNorm", "intermediate.dense", "output.dense", "output.LayerNorm", ]: for w in ["weight", "bias"]: _SCREAMING_SNAKE_CASE = state_dict[ F'''{prefix}.encoder.layer.{teacher_idx}.{layer}.{w}''' ] std_idx += 1 # Language Modeling Head ###s if args.model_type == "roberta": for layer in ["lm_head.decoder.weight", "lm_head.bias"]: _SCREAMING_SNAKE_CASE = state_dict[F'''{layer}'''] if args.vocab_transform: for w in ["weight", "bias"]: _SCREAMING_SNAKE_CASE = state_dict[F'''lm_head.dense.{w}'''] _SCREAMING_SNAKE_CASE = state_dict[F'''lm_head.layer_norm.{w}'''] elif args.model_type == "gpt2": for w in ["weight", "bias"]: _SCREAMING_SNAKE_CASE = state_dict[F'''{prefix}.ln_f.{w}'''] _SCREAMING_SNAKE_CASE = state_dict["""lm_head.weight"""] print(F'''N layers selected for distillation: {std_idx}''') print(F'''Number of params transferred for distillation: {len(compressed_sd.keys())}''') print(F'''Save transferred checkpoint to {args.dump_checkpoint}.''') torch.save(compressed_sd, args.dump_checkpoint)
327
from __future__ import annotations import copy import inspect import json import math import os import tempfile import unittest from importlib import import_module import numpy as np from transformers import ViTMAEConfig from transformers.file_utils import cached_property, is_tf_available, is_vision_available from transformers.testing_utils import require_tf, require_vision, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFViTMAEForPreTraining, TFViTMAEModel if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class SCREAMING_SNAKE_CASE_ : def __init__( self : List[Any] , _A : Optional[Any] , _A : Dict=13 , _A : Union[str, Any]=30 , _A : Tuple=2 , _A : Union[str, Any]=3 , _A : Optional[int]=True , _A : Optional[Any]=True , _A : str=32 , _A : int=2 , _A : List[str]=4 , _A : List[str]=37 , _A : Tuple="gelu" , _A : Dict=0.1 , _A : Optional[Any]=0.1 , _A : Optional[int]=10 , _A : Optional[int]=0.0_2 , _A : Optional[Any]=3 , _A : str=0.6 , _A : Union[str, Any]=None , ) -> Any: """simple docstring""" snake_case_ : Optional[int] = parent snake_case_ : Tuple = batch_size snake_case_ : List[Any] = image_size snake_case_ : List[str] = patch_size snake_case_ : List[str] = num_channels snake_case_ : Optional[Any] = is_training snake_case_ : Any = use_labels snake_case_ : Tuple = hidden_size snake_case_ : Union[str, Any] = num_hidden_layers snake_case_ : List[Any] = num_attention_heads snake_case_ : Optional[Any] = intermediate_size snake_case_ : List[Any] = hidden_act snake_case_ : Union[str, Any] = hidden_dropout_prob snake_case_ : Any = attention_probs_dropout_prob snake_case_ : Tuple = type_sequence_label_size snake_case_ : List[str] = initializer_range snake_case_ : Optional[Any] = mask_ratio snake_case_ : Any = scope # in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above # (we add 1 for the [CLS] token) snake_case_ : Optional[int] = (image_size // patch_size) ** 2 snake_case_ : str = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) ) def UpperCAmelCase_ ( self : List[str] ) -> Optional[Any]: """simple docstring""" snake_case_ : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) snake_case_ : Union[str, Any] = None if self.use_labels: snake_case_ : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) snake_case_ : Union[str, Any] = self.get_config() return config, pixel_values, labels def UpperCAmelCase_ ( self : int ) -> Optional[Any]: """simple docstring""" return ViTMAEConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , decoder_hidden_size=self.hidden_size , decoder_num_hidden_layers=self.num_hidden_layers , decoder_num_attention_heads=self.num_attention_heads , decoder_intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_A , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , ) def UpperCAmelCase_ ( self : List[Any] , _A : int , _A : Dict , _A : str ) -> Dict: """simple docstring""" snake_case_ : Union[str, Any] = TFViTMAEModel(config=_A ) snake_case_ : str = model(_A , training=_A ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCAmelCase_ ( self : Dict , _A : Dict , _A : Any , _A : List[Any] ) -> int: """simple docstring""" snake_case_ : Any = TFViTMAEForPreTraining(_A ) snake_case_ : Optional[Any] = model(_A , training=_A ) # expected sequence length = num_patches snake_case_ : List[str] = (self.image_size // self.patch_size) ** 2 snake_case_ : Optional[Any] = self.patch_size**2 * self.num_channels self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) ) # test greyscale images snake_case_ : str = 1 snake_case_ : Dict = TFViTMAEForPreTraining(_A ) snake_case_ : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) snake_case_ : List[str] = model(_A , training=_A ) snake_case_ : Optional[Any] = self.patch_size**2 self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) ) def UpperCAmelCase_ ( self : Union[str, Any] ) -> Tuple: """simple docstring""" snake_case_ : List[Any] = self.prepare_config_and_inputs() ((snake_case_) ,(snake_case_) ,(snake_case_)) : Any = config_and_inputs snake_case_ : Optional[Any] = {'pixel_values': pixel_values} return config, inputs_dict @require_tf class SCREAMING_SNAKE_CASE_ ( snake_case_ , snake_case_ , unittest.TestCase ): __magic_name__: List[str] = (TFViTMAEModel, TFViTMAEForPreTraining) if is_tf_available() else () __magic_name__: str = {"feature-extraction": TFViTMAEModel} if is_tf_available() else {} __magic_name__: Dict = False __magic_name__: Dict = False __magic_name__: List[Any] = False __magic_name__: Dict = False def UpperCAmelCase_ ( self : Any ) -> List[Any]: """simple docstring""" snake_case_ : List[Any] = TFViTMAEModelTester(self ) snake_case_ : Tuple = ConfigTester(self , config_class=_A , has_text_modality=_A , hidden_size=37 ) def UpperCAmelCase_ ( self : int ) -> Union[str, Any]: """simple docstring""" self.config_tester.run_common_tests() @unittest.skip(reason='ViTMAE does not use inputs_embeds' ) def UpperCAmelCase_ ( self : Tuple ) -> Union[str, Any]: """simple docstring""" pass def UpperCAmelCase_ ( self : List[str] ) -> Union[str, Any]: """simple docstring""" snake_case_ ,snake_case_ : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: snake_case_ : List[Any] = model_class(_A ) self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) ) snake_case_ : Optional[int] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(_A , tf.keras.layers.Layer ) ) def UpperCAmelCase_ ( self : List[str] ) -> Dict: """simple docstring""" snake_case_ ,snake_case_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: snake_case_ : List[str] = model_class(_A ) snake_case_ : Any = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic snake_case_ : Dict = [*signature.parameters.keys()] snake_case_ : Dict = ['pixel_values'] self.assertListEqual(arg_names[:1] , _A ) def UpperCAmelCase_ ( self : Dict ) -> List[str]: """simple docstring""" snake_case_ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_A ) def UpperCAmelCase_ ( self : List[Any] ) -> List[str]: """simple docstring""" snake_case_ : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*_A ) def UpperCAmelCase_ ( self : Tuple ) -> Dict: """simple docstring""" np.random.seed(2 ) snake_case_ ,snake_case_ : int = self.model_tester.prepare_config_and_inputs_for_common() snake_case_ : Optional[int] = int((config.image_size // config.patch_size) ** 2 ) snake_case_ : Optional[Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) for model_class in self.all_model_classes: snake_case_ : Optional[Any] = model_class(_A ) snake_case_ : Union[str, Any] = self._prepare_for_class(_A , _A ) snake_case_ : List[str] = model(_A , noise=_A ) snake_case_ : Tuple = copy.deepcopy(self._prepare_for_class(_A , _A ) ) snake_case_ : str = model(**_A , noise=_A ) snake_case_ : Union[str, Any] = outputs_dict[0].numpy() snake_case_ : Optional[Any] = outputs_keywords[0].numpy() self.assertLess(np.sum(np.abs(output_dict - output_keywords ) ) , 1E-6 ) def UpperCAmelCase_ ( self : List[Any] ) -> List[Any]: """simple docstring""" np.random.seed(2 ) snake_case_ ,snake_case_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() snake_case_ : Tuple = int((config.image_size // config.patch_size) ** 2 ) snake_case_ : Optional[int] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) def prepare_numpy_arrays(_A : int ): snake_case_ : Any = {} for k, v in inputs_dict.items(): if tf.is_tensor(_A ): snake_case_ : str = v.numpy() else: snake_case_ : Optional[Any] = np.array(_A ) return inputs_np_dict for model_class in self.all_model_classes: snake_case_ : int = model_class(_A ) snake_case_ : List[Any] = self._prepare_for_class(_A , _A ) snake_case_ : Any = prepare_numpy_arrays(_A ) snake_case_ : List[Any] = model(_A , noise=_A ) snake_case_ : List[Any] = model(**_A , noise=_A ) self.assert_outputs_same(_A , _A ) def UpperCAmelCase_ ( self : Tuple , _A : Union[str, Any] , _A : Union[str, Any] , _A : List[Any] ) -> List[str]: """simple docstring""" np.random.seed(2 ) snake_case_ : Optional[int] = int((tf_model.config.image_size // tf_model.config.patch_size) ** 2 ) snake_case_ : Optional[int] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) snake_case_ : Optional[int] = tf.constant(_A ) # Add `noise` argument. # PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument snake_case_ : Optional[Any] = tf_noise super().check_pt_tf_models(_A , _A , _A ) def UpperCAmelCase_ ( self : Optional[Any] ) -> Dict: """simple docstring""" np.random.seed(2 ) snake_case_ ,snake_case_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common() snake_case_ : int = { module_member for model_class in self.all_model_classes for module in (import_module(model_class.__module__ ),) for module_member_name in dir(_A ) if module_member_name.endswith('MainLayer' ) # This condition is required, since `modeling_tf_clip.py` has 3 classes whose names end with `MainLayer`. and module_member_name[: -len('MainLayer' )] == model_class.__name__[: -len('Model' )] for module_member in (getattr(_A , _A ),) if isinstance(_A , _A ) and tf.keras.layers.Layer in module_member.__bases__ and getattr(_A , '_keras_serializable' , _A ) } snake_case_ : List[Any] = int((config.image_size // config.patch_size) ** 2 ) snake_case_ : List[Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) snake_case_ : Optional[int] = tf.convert_to_tensor(_A ) inputs_dict.update({'noise': noise} ) for main_layer_class in tf_main_layer_classes: snake_case_ : Optional[Any] = main_layer_class(_A ) snake_case_ : List[str] = { name: tf.keras.Input(tensor.shape[1:] , dtype=tensor.dtype ) for name, tensor in inputs_dict.items() } snake_case_ : Union[str, Any] = tf.keras.Model(_A , outputs=main_layer(_A ) ) snake_case_ : int = model(_A ) with tempfile.TemporaryDirectory() as tmpdirname: snake_case_ : List[Any] = os.path.join(_A , 'keras_model.h5' ) model.save(_A ) snake_case_ : str = tf.keras.models.load_model( _A , custom_objects={main_layer_class.__name__: main_layer_class} ) assert isinstance(_A , tf.keras.Model ) snake_case_ : List[str] = model(_A ) self.assert_outputs_same(_A , _A ) @slow def UpperCAmelCase_ ( self : Union[str, Any] ) -> Union[str, Any]: """simple docstring""" np.random.seed(2 ) snake_case_ ,snake_case_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() snake_case_ : int = int((config.image_size // config.patch_size) ** 2 ) snake_case_ : int = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) for model_class in self.all_model_classes: snake_case_ : Optional[Any] = model_class(_A ) snake_case_ : Optional[Any] = self._prepare_for_class(_A , _A ) snake_case_ : int = model(_A , noise=_A ) if model_class.__name__ == "TFViTMAEModel": snake_case_ : Any = outputs.last_hidden_state.numpy() snake_case_ : Optional[int] = 0 else: snake_case_ : str = outputs.logits.numpy() snake_case_ : Optional[Any] = 0 with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(_A , saved_model=_A ) snake_case_ : Any = model_class.from_pretrained(_A ) snake_case_ : Any = model(_A , noise=_A ) if model_class.__name__ == "TFViTMAEModel": snake_case_ : Dict = after_outputs['last_hidden_state'].numpy() snake_case_ : Dict = 0 else: snake_case_ : Any = after_outputs['logits'].numpy() snake_case_ : Optional[Any] = 0 snake_case_ : Any = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(_A , 1E-5 ) def UpperCAmelCase_ ( self : Any ) -> str: """simple docstring""" np.random.seed(2 ) snake_case_ ,snake_case_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() snake_case_ : Optional[int] = int((config.image_size // config.patch_size) ** 2 ) snake_case_ : Union[str, Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) for model_class in self.all_model_classes: snake_case_ : str = model_class(_A ) snake_case_ : int = self._prepare_for_class(_A , _A ) snake_case_ : str = model(_A , noise=_A ) snake_case_ : Dict = model.get_config() # make sure that returned config is jsonifiable, which is required by keras json.dumps(_A ) snake_case_ : Any = model_class.from_config(model.get_config() ) # make sure it also accepts a normal config snake_case_ : str = model_class.from_config(model.config ) snake_case_ : Union[str, Any] = new_model(_A ) # Build model new_model.set_weights(model.get_weights() ) snake_case_ : List[str] = new_model(_A , noise=_A ) self.assert_outputs_same(_A , _A ) @unittest.skip( reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.' ) def UpperCAmelCase_ ( self : List[Any] ) -> Optional[int]: """simple docstring""" pass @unittest.skip(reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load' ) def UpperCAmelCase_ ( self : Optional[int] ) -> Tuple: """simple docstring""" pass @slow def UpperCAmelCase_ ( self : Tuple ) -> Tuple: """simple docstring""" snake_case_ : Optional[Any] = TFViTMAEModel.from_pretrained('google/vit-base-patch16-224' ) self.assertIsNotNone(_A ) def SCREAMING_SNAKE_CASE__ ( ): snake_case_ : Optional[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_tf @require_vision class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ): @cached_property def UpperCAmelCase_ ( self : str ) -> Dict: """simple docstring""" return ViTImageProcessor.from_pretrained('facebook/vit-mae-base' ) if is_vision_available() else None @slow def UpperCAmelCase_ ( self : str ) -> Dict: """simple docstring""" np.random.seed(2 ) snake_case_ : List[str] = TFViTMAEForPreTraining.from_pretrained('facebook/vit-mae-base' ) snake_case_ : List[Any] = self.default_image_processor snake_case_ : Dict = prepare_img() snake_case_ : Optional[Any] = image_processor(images=_A , return_tensors='tf' ) # prepare a noise vector that will be also used for testing the TF model # (this way we can ensure that the PT and TF models operate on the same inputs) snake_case_ : int = ViTMAEConfig() snake_case_ : List[Any] = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 ) snake_case_ : List[Any] = np.random.uniform(size=(1, num_patches) ) # forward pass snake_case_ : Optional[Any] = model(**_A , noise=_A ) # verify the logits snake_case_ : Optional[int] = tf.convert_to_tensor([1, 196, 768] ) self.assertEqual(outputs.logits.shape , _A ) snake_case_ : Any = tf.convert_to_tensor( [[-0.0_5_4_8, -1.7_0_2_3, -0.9_3_2_5], [0.3_7_2_1, -0.5_6_7_0, -0.2_2_3_3], [0.8_2_3_5, -1.3_8_7_8, -0.3_5_2_4]] ) tf.debugging.assert_near(outputs.logits[0, :3, :3] , _A , atol=1E-4 )
327
1
import math from collections.abc import Iterator from itertools import takewhile def SCREAMING_SNAKE_CASE__ ( __a ): if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(__a ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def SCREAMING_SNAKE_CASE__ ( ): snake_case_ : str = 2 while True: if is_prime(__a ): yield num num += 1 def SCREAMING_SNAKE_CASE__ ( __a = 2_00_00_00 ): return sum(takewhile(lambda __a : x < n , prime_generator() ) ) if __name__ == "__main__": print(F'''{solution() = }''')
327
from __future__ import annotations def SCREAMING_SNAKE_CASE__ ( __a , __a ): snake_case_ : list[list[int]] = [] snake_case_ : list[int] = [] snake_case_ : List[Any] = 0 snake_case_ : Union[str, Any] = sum(__a ) create_state_space_tree(__a , __a , __a , __a , __a , __a ) return result def SCREAMING_SNAKE_CASE__ ( __a , __a , __a , __a , __a , __a , ): if sum(__a ) > max_sum or (remaining_nums_sum + sum(__a )) < max_sum: return if sum(__a ) == max_sum: result.append(__a ) return for index in range(__a , len(__a ) ): create_state_space_tree( __a , __a , index + 1 , [*path, nums[index]] , __a , remaining_nums_sum - nums[index] , ) _SCREAMING_SNAKE_CASE = [3, 34, 4, 12, 5, 2] _SCREAMING_SNAKE_CASE = 9 _SCREAMING_SNAKE_CASE = generate_sum_of_subsets_soln(nums, max_sum) print(*result)
327
1
from pathlib import Path from typing import List from transformers import is_torch_available, is_vision_available from transformers.testing_utils import get_tests_dir, is_tool_test from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText if is_torch_available(): import torch if is_vision_available(): from PIL import Image _SCREAMING_SNAKE_CASE = ["""text""", """image""", """audio"""] def SCREAMING_SNAKE_CASE__ ( __a ): snake_case_ : Optional[Any] = [] for input_type in input_types: if input_type == "text": inputs.append('Text input' ) elif input_type == "image": inputs.append( Image.open(Path(get_tests_dir('fixtures/tests_samples/COCO' ) ) / '000000039769.png' ).resize((5_12, 5_12) ) ) elif input_type == "audio": inputs.append(torch.ones(30_00 ) ) elif isinstance(__a , __a ): inputs.append(create_inputs(__a ) ) else: raise ValueError(f"""Invalid type requested: {input_type}""" ) return inputs def SCREAMING_SNAKE_CASE__ ( __a ): snake_case_ : int = [] for output in outputs: if isinstance(__a , (str, AgentText) ): output_types.append('text' ) elif isinstance(__a , (Image.Image, AgentImage) ): output_types.append('image' ) elif isinstance(__a , (torch.Tensor, AgentAudio) ): output_types.append('audio' ) else: raise ValueError(f"""Invalid output: {output}""" ) return output_types @is_tool_test class SCREAMING_SNAKE_CASE_ : def UpperCAmelCase_ ( self : Optional[Any] ) -> int: """simple docstring""" self.assertTrue(hasattr(self.tool , 'inputs' ) ) self.assertTrue(hasattr(self.tool , 'outputs' ) ) snake_case_ : Any = self.tool.inputs for _input in inputs: if isinstance(_input , _A ): for __input in _input: self.assertTrue(__input in authorized_types ) else: self.assertTrue(_input in authorized_types ) snake_case_ : Optional[int] = self.tool.outputs for _output in outputs: self.assertTrue(_output in authorized_types ) def UpperCAmelCase_ ( self : Optional[int] ) -> str: """simple docstring""" snake_case_ : Optional[Any] = create_inputs(self.tool.inputs ) snake_case_ : int = self.tool(*_A ) # There is a single output if len(self.tool.outputs ) == 1: snake_case_ : Tuple = [outputs] self.assertListEqual(output_types(_A ) , self.tool.outputs ) def UpperCAmelCase_ ( self : List[Any] ) -> Tuple: """simple docstring""" self.assertTrue(hasattr(self.tool , 'description' ) ) self.assertTrue(hasattr(self.tool , 'default_checkpoint' ) ) self.assertTrue(self.tool.description.startswith('This is a tool that' ) ) def UpperCAmelCase_ ( self : Tuple ) -> Optional[int]: """simple docstring""" snake_case_ : Any = create_inputs(self.tool.inputs ) snake_case_ : Tuple = self.tool(*_A ) if not isinstance(_A , _A ): snake_case_ : List[str] = [outputs] self.assertEqual(len(_A ) , len(self.tool.outputs ) ) for output, output_type in zip(_A , self.tool.outputs ): snake_case_ : List[Any] = AGENT_TYPE_MAPPING[output_type] self.assertTrue(isinstance(_A , _A ) ) def UpperCAmelCase_ ( self : Optional[Any] ) -> Optional[Any]: """simple docstring""" snake_case_ : int = create_inputs(self.tool.inputs ) snake_case_ : Any = [] for _input, input_type in zip(_A , self.tool.inputs ): if isinstance(_A , _A ): _inputs.append([AGENT_TYPE_MAPPING[_input_type](_input ) for _input_type in input_type] ) else: _inputs.append(AGENT_TYPE_MAPPING[input_type](_input ) ) # Should not raise an error snake_case_ : List[Any] = self.tool(*_A ) if not isinstance(_A , _A ): snake_case_ : Union[str, Any] = [outputs] self.assertEqual(len(_A ) , len(self.tool.outputs ) )
327
def SCREAMING_SNAKE_CASE__ ( __a , __a ): if density <= 0: raise ValueError('Impossible fluid density' ) if bulk_modulus <= 0: raise ValueError('Impossible bulk modulus' ) return (bulk_modulus / density) ** 0.5 if __name__ == "__main__": import doctest doctest.testmod()
327
1
import gzip import hashlib import json import multiprocessing import os import re import shutil import time from pathlib import Path import numpy as np from arguments import PreprocessingArguments from datasets import load_dataset from minhash_deduplication import deduplicate_dataset from transformers import AutoTokenizer, HfArgumentParser _SCREAMING_SNAKE_CASE = re.compile(R"""\s+""") def SCREAMING_SNAKE_CASE__ ( __a ): return {"hash": hashlib.mda(re.sub(__a , '' , example['content'] ).encode('utf-8' ) ).hexdigest()} def SCREAMING_SNAKE_CASE__ ( __a ): snake_case_ : Any = [len(__a ) for line in example['content'].splitlines()] return {"line_mean": np.mean(__a ), "line_max": max(__a )} def SCREAMING_SNAKE_CASE__ ( __a ): snake_case_ : List[str] = np.mean([c.isalnum() for c in example['content']] ) return {"alpha_frac": alpha_frac} def SCREAMING_SNAKE_CASE__ ( __a , __a ): if example["hash"] in uniques: uniques.remove(example['hash'] ) return True else: return False def SCREAMING_SNAKE_CASE__ ( __a , __a=5 ): snake_case_ : Dict = ['auto-generated', 'autogenerated', 'automatically generated'] snake_case_ : List[str] = example['content'].splitlines() for _, line in zip(range(__a ) , __a ): for keyword in keywords: if keyword in line.lower(): return {"autogenerated": True} else: return {"autogenerated": False} def SCREAMING_SNAKE_CASE__ ( __a , __a=5 , __a=0.05 ): snake_case_ : Optional[Any] = ['unit tests', 'test file', 'configuration file'] snake_case_ : Dict = example['content'].splitlines() snake_case_ : List[Any] = 0 snake_case_ : Dict = 0 # first test for _, line in zip(range(__a ) , __a ): for keyword in keywords: if keyword in line.lower(): return {"config_or_test": True} # second test snake_case_ : Union[str, Any] = example['content'].count('\n' ) snake_case_ : List[str] = int(coeff * nlines ) for line in lines: count_config += line.lower().count('config' ) count_test += line.lower().count('test' ) if count_config > threshold or count_test > threshold: return {"config_or_test": True} return {"config_or_test": False} def SCREAMING_SNAKE_CASE__ ( __a ): snake_case_ : Optional[int] = ['def ', 'class ', 'for ', 'while '] snake_case_ : str = example['content'].splitlines() for line in lines: for keyword in keywords: if keyword in line.lower(): return {"has_no_keywords": False} return {"has_no_keywords": True} def SCREAMING_SNAKE_CASE__ ( __a , __a=4 ): snake_case_ : Optional[int] = example['content'].splitlines() snake_case_ : int = 0 for line in lines: counter += line.lower().count('=' ) if counter > minimum: return {"has_few_assignments": False} return {"has_few_assignments": True} def SCREAMING_SNAKE_CASE__ ( __a ): snake_case_ : Optional[int] = tokenizer(example['content'] , truncation=__a )['input_ids'] snake_case_ : Optional[Any] = len(example['content'] ) / len(__a ) return {"ratio": ratio} def SCREAMING_SNAKE_CASE__ ( __a ): snake_case_ : List[str] = {} results.update(get_hash(__a ) ) results.update(line_stats(__a ) ) results.update(alpha_stats(__a ) ) results.update(char_token_ratio(__a ) ) results.update(is_autogenerated(__a ) ) results.update(is_config_or_test(__a ) ) results.update(has_no_keywords(__a ) ) results.update(has_few_assignments(__a ) ) return results def SCREAMING_SNAKE_CASE__ ( __a , __a , __a ): if not check_uniques(__a , __a ): return False elif example["autogenerated"]: return False elif example["line_max"] > args.line_max: return False elif example["line_mean"] > args.line_mean: return False elif example["alpha_frac"] < args.alpha_frac: return False elif example["ratio"] < args.min_token_ratio: return False elif example["config_or_test"] and np.random.rand() <= args.filter_proba: return False elif example["has_no_keywords"] and np.random.rand() <= args.filter_proba: return False elif example["has_few_assignments"]: return False else: return True def SCREAMING_SNAKE_CASE__ ( __a ): with open(__a , 'rb' ) as f_in: with gzip.open(str(__a ) + '.gz' , 'wb' , compresslevel=6 ) as f_out: shutil.copyfileobj(__a , __a ) os.unlink(__a ) # Settings _SCREAMING_SNAKE_CASE = HfArgumentParser(PreprocessingArguments) _SCREAMING_SNAKE_CASE = parser.parse_args() if args.num_workers is None: _SCREAMING_SNAKE_CASE = multiprocessing.cpu_count() _SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(args.tokenizer_dir) # Load dataset _SCREAMING_SNAKE_CASE = time.time() _SCREAMING_SNAKE_CASE = load_dataset(args.dataset_name, split="""train""") print(F'''Time to load dataset: {time.time()-t_start:.2f}''') # Run preprocessing _SCREAMING_SNAKE_CASE = time.time() _SCREAMING_SNAKE_CASE = ds.map(preprocess, num_proc=args.num_workers) print(F'''Time to preprocess dataset: {time.time()-t_start:.2f}''') # Deduplicate hashes _SCREAMING_SNAKE_CASE = set(ds.unique("""hash""")) _SCREAMING_SNAKE_CASE = len(uniques) / len(ds) print(F'''Fraction of duplicates: {1-frac:.2%}''') # Deduplicate data and apply heuristics _SCREAMING_SNAKE_CASE = time.time() _SCREAMING_SNAKE_CASE = ds.filter(filter, fn_kwargs={"""uniques""": uniques, """args""": args}) print(F'''Time to filter dataset: {time.time()-t_start:.2f}''') print(F'''Size of filtered dataset: {len(ds_filter)}''') # Deduplicate with minhash and jaccard similarity if args.near_deduplication: _SCREAMING_SNAKE_CASE = time.time() _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = deduplicate_dataset(ds_filter, args.jaccard_threshold) print(F'''Time to deduplicate dataset: {time.time()-t_start:.2f}''') print(F'''Size of deduplicate dataset: {len(ds_filter)}''') # Save data in batches of samples_per_file _SCREAMING_SNAKE_CASE = Path(args.output_dir) output_dir.mkdir(exist_ok=True) # save duplicate_clusters in the output_dir as artifacts # not sure it is the right place the save it if args.near_deduplication: with open(output_dir / """duplicate_clusters.json""", """w""") as f: json.dump(duplicate_clusters, f) _SCREAMING_SNAKE_CASE = output_dir / """data""" data_dir.mkdir(exist_ok=True) _SCREAMING_SNAKE_CASE = time.time() for file_number, index in enumerate(range(0, len(ds_filter), args.samples_per_file)): _SCREAMING_SNAKE_CASE = str(data_dir / F'''file-{file_number+1:012}.json''') _SCREAMING_SNAKE_CASE = min(len(ds_filter), index + args.samples_per_file) ds_filter.select(list(range(index, end_index))).to_json(file_path) compress_file(file_path) print(F'''Time to save dataset: {time.time()-t_start:.2f}''')
327
from math import pi def SCREAMING_SNAKE_CASE__ ( __a , __a ): return 2 * pi * radius * (angle / 3_60) if __name__ == "__main__": print(arc_length(90, 10))
327
1
# limitations under the License. from typing import Optional, Tuple, Union import torch from diffusers import DiffusionPipeline, ImagePipelineOutput class SCREAMING_SNAKE_CASE_ ( snake_case_ ): def __init__( self : Optional[Any] , _A : Optional[Any] , _A : List[Any] ) -> Tuple: """simple docstring""" super().__init__() self.register_modules(unet=_A , scheduler=_A ) @torch.no_grad() def __call__( self : Union[str, Any] , _A : int = 1 , _A : Optional[torch.Generator] = None , _A : int = 50 , _A : Optional[str] = "pil" , _A : bool = True , **_A : Tuple , ) -> Union[ImagePipelineOutput, Tuple]: """simple docstring""" snake_case_ : Any = torch.randn( (batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , generator=_A , ) snake_case_ : str = image.to(self.device ) # set step values self.scheduler.set_timesteps(_A ) for t in self.progress_bar(self.scheduler.timesteps ): # 1. predict noise model_output snake_case_ : str = self.unet(_A , _A ).sample # 2. predict previous mean of image x_t-1 and add variance depending on eta # eta corresponds to η in paper and should be between [0, 1] # do x_t -> x_t-1 snake_case_ : Optional[int] = self.scheduler.step(_A , _A , _A ).prev_sample snake_case_ : List[str] = (image / 2 + 0.5).clamp(0 , 1 ) snake_case_ : int = image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": snake_case_ : int = self.numpy_to_pil(_A ) if not return_dict: return (image,), "This is a local test" return ImagePipelineOutput(images=_A ), "This is a local test"
327
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import center_crop, normalize, rescale, resize, to_channel_dimension_format from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): import PIL _SCREAMING_SNAKE_CASE = logging.get_logger(__name__) class SCREAMING_SNAKE_CASE_ ( snake_case_ ): __magic_name__: Optional[Any] = ["pixel_values"] def __init__( self : str , _A : bool = True , _A : Dict[str, int] = None , _A : PILImageResampling = PIL.Image.BICUBIC , _A : bool = True , _A : Dict[str, int] = None , _A : Union[int, float] = 1 / 255 , _A : bool = True , _A : bool = True , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[float, List[float]]] = None , **_A : str , ) -> None: """simple docstring""" super().__init__(**_A ) snake_case_ : Dict = size if size is not None else {'height': 256, 'width': 256} snake_case_ : Tuple = get_size_dict(_A ) snake_case_ : str = crop_size if crop_size is not None else {'height': 224, 'width': 224} snake_case_ : int = get_size_dict(_A , param_name='crop_size' ) snake_case_ : Union[str, Any] = do_resize snake_case_ : str = size snake_case_ : List[str] = resample snake_case_ : List[Any] = do_center_crop snake_case_ : Dict = crop_size snake_case_ : Tuple = do_rescale snake_case_ : Optional[Any] = rescale_factor snake_case_ : Any = do_normalize snake_case_ : Any = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN snake_case_ : Optional[int] = image_std if image_std is not None else IMAGENET_STANDARD_STD def UpperCAmelCase_ ( self : Optional[int] , _A : np.ndarray , _A : Dict[str, int] , _A : PILImageResampling = PIL.Image.BICUBIC , _A : Optional[Union[str, ChannelDimension]] = None , **_A : List[str] , ) -> np.ndarray: """simple docstring""" snake_case_ : Tuple = get_size_dict(_A ) if "height" not in size or "width" not in size: raise ValueError(F"""The size dictionary must have keys 'height' and 'width'. Got {size.keys()}""" ) return resize( _A , size=(size['height'], size['width']) , resample=_A , data_format=_A , **_A ) def UpperCAmelCase_ ( self : int , _A : np.ndarray , _A : Dict[str, int] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Optional[Any] , ) -> np.ndarray: """simple docstring""" snake_case_ : Optional[int] = get_size_dict(_A ) if "height" not in size or "width" not in size: raise ValueError(F"""The size dictionary must have keys 'height' and 'width'. Got {size.keys()}""" ) return center_crop(_A , size=(size['height'], size['width']) , data_format=_A , **_A ) def UpperCAmelCase_ ( self : Dict , _A : np.ndarray , _A : Union[int, float] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : str , ) -> str: """simple docstring""" return rescale(_A , scale=_A , data_format=_A , **_A ) def UpperCAmelCase_ ( self : Any , _A : np.ndarray , _A : Union[float, List[float]] , _A : Union[float, List[float]] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Tuple , ) -> np.ndarray: """simple docstring""" return normalize(_A , mean=_A , std=_A , data_format=_A , **_A ) def UpperCAmelCase_ ( self : List[str] , _A : ImageInput , _A : bool = None , _A : Dict[str, int] = None , _A : Union[str, Any]=None , _A : bool = None , _A : Dict[str, int] = None , _A : bool = None , _A : float = None , _A : bool = None , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[str, TensorType]] = None , _A : ChannelDimension = ChannelDimension.FIRST , **_A : int , ) -> PIL.Image.Image: """simple docstring""" snake_case_ : int = do_resize if do_resize is not None else self.do_resize snake_case_ : str = resample if resample is not None else self.resample snake_case_ : Any = do_center_crop if do_center_crop is not None else self.do_center_crop snake_case_ : List[str] = do_rescale if do_rescale is not None else self.do_rescale snake_case_ : Any = rescale_factor if rescale_factor is not None else self.rescale_factor snake_case_ : List[str] = do_normalize if do_normalize is not None else self.do_normalize snake_case_ : Any = image_mean if image_mean is not None else self.image_mean snake_case_ : Dict = image_std if image_std is not None else self.image_std snake_case_ : int = size if size is not None else self.size snake_case_ : Optional[int] = get_size_dict(_A ) snake_case_ : int = crop_size if crop_size is not None else self.crop_size snake_case_ : Any = get_size_dict(_A , param_name='crop_size' ) snake_case_ : Optional[Any] = make_list_of_images(_A ) if not valid_images(_A ): raise ValueError( 'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ' 'torch.Tensor, tf.Tensor or jax.ndarray.' ) if do_resize and size is None or resample is None: raise ValueError('Size and resample must be specified if do_resize is True.' ) if do_center_crop and crop_size is None: raise ValueError('Crop size must be specified if do_center_crop is True.' ) if do_rescale and rescale_factor is None: raise ValueError('Rescale factor must be specified if do_rescale is True.' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('Image mean and std must be specified if do_normalize is True.' ) # All transformations expect numpy arrays. snake_case_ : Optional[Any] = [to_numpy_array(_A ) for image in images] if do_resize: snake_case_ : Dict = [self.resize(image=_A , size=_A , resample=_A ) for image in images] if do_center_crop: snake_case_ : Optional[Any] = [self.center_crop(image=_A , size=_A ) for image in images] if do_rescale: snake_case_ : Optional[int] = [self.rescale(image=_A , scale=_A ) for image in images] if do_normalize: snake_case_ : str = [self.normalize(image=_A , mean=_A , std=_A ) for image in images] snake_case_ : Dict = [to_channel_dimension_format(_A , _A ) for image in images] snake_case_ : Tuple = {'pixel_values': images} return BatchFeature(data=_A , tensor_type=_A )
327
1
def SCREAMING_SNAKE_CASE__ ( __a ): if not isinstance(__a , __a ): raise ValueError('multiplicative_persistence() only accepts integral values' ) if num < 0: raise ValueError('multiplicative_persistence() does not accept negative values' ) snake_case_ : List[Any] = 0 snake_case_ : List[Any] = str(__a ) while len(__a ) != 1: snake_case_ : Optional[int] = [int(__a ) for i in num_string] snake_case_ : List[str] = 1 for i in range(0 , len(__a ) ): total *= numbers[i] snake_case_ : Optional[Any] = str(__a ) steps += 1 return steps def SCREAMING_SNAKE_CASE__ ( __a ): if not isinstance(__a , __a ): raise ValueError('additive_persistence() only accepts integral values' ) if num < 0: raise ValueError('additive_persistence() does not accept negative values' ) snake_case_ : Tuple = 0 snake_case_ : int = str(__a ) while len(__a ) != 1: snake_case_ : List[str] = [int(__a ) for i in num_string] snake_case_ : Union[str, Any] = 0 for i in range(0 , len(__a ) ): total += numbers[i] snake_case_ : Any = str(__a ) steps += 1 return steps if __name__ == "__main__": import doctest doctest.testmod()
327
import sys _SCREAMING_SNAKE_CASE = ( """73167176531330624919225119674426574742355349194934""" """96983520312774506326239578318016984801869478851843""" """85861560789112949495459501737958331952853208805511""" """12540698747158523863050715693290963295227443043557""" """66896648950445244523161731856403098711121722383113""" """62229893423380308135336276614282806444486645238749""" """30358907296290491560440772390713810515859307960866""" """70172427121883998797908792274921901699720888093776""" """65727333001053367881220235421809751254540594752243""" """52584907711670556013604839586446706324415722155397""" """53697817977846174064955149290862569321978468622482""" """83972241375657056057490261407972968652414535100474""" """82166370484403199890008895243450658541227588666881""" """16427171479924442928230863465674813919123162824586""" """17866458359124566529476545682848912883142607690042""" """24219022671055626321111109370544217506941658960408""" """07198403850962455444362981230987879927244284909188""" """84580156166097919133875499200524063689912560717606""" """05886116467109405077541002256983155200055935729725""" """71636269561882670428252483600823257530420752963450""" ) def SCREAMING_SNAKE_CASE__ ( __a = N ): snake_case_ : Optional[Any] = -sys.maxsize - 1 for i in range(len(__a ) - 12 ): snake_case_ : Optional[Any] = 1 for j in range(13 ): product *= int(n[i + j] ) if product > largest_product: snake_case_ : int = product return largest_product if __name__ == "__main__": print(F'''{solution() = }''')
327
1
import math_equivalence # From: git+https://github.com/hendrycks/math.git import datasets _SCREAMING_SNAKE_CASE = """\ @article{hendrycksmath2021, title={Measuring Mathematical Problem Solving With the MATH Dataset}, author={Dan Hendrycks and Collin Burns and Saurav Kadavath and Akul Arora and Steven Basart and Eric Tang and Dawn Song and Jacob Steinhardt}, journal={arXiv preprint arXiv:2103.03874}, year={2021} } """ _SCREAMING_SNAKE_CASE = """\ This metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset. It first canonicalizes the inputs (e.g., converting \"1/2\" to \"\\frac{1}{2}\") and then computes accuracy. """ _SCREAMING_SNAKE_CASE = R""" Calculates accuracy after canonicalizing inputs. Args: predictions: list of predictions to score. Each prediction is a string that contains natural language and LaTex. references: list of reference for each prediction. Each reference is a string that contains natural language and LaTex. Returns: accuracy: accuracy after canonicalizing inputs (e.g., converting \"1/2\" to \"\\frac{1}{2}\") Examples: >>> metric = datasets.load_metric(\"competition_math\") >>> results = metric.compute(references=[\"\\frac{1}{2}\"], predictions=[\"1/2\"]) >>> print(results) {'accuracy': 1.0} """ @datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class SCREAMING_SNAKE_CASE_ ( datasets.Metric ): def UpperCAmelCase_ ( self : int ) -> List[Any]: """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { 'predictions': datasets.Value('string' ), 'references': datasets.Value('string' ), } ) , homepage='https://github.com/hendrycks/math' , codebase_urls=['https://github.com/hendrycks/math'] , ) def UpperCAmelCase_ ( self : List[str] , _A : Optional[Any] , _A : List[str] ) -> Optional[int]: """simple docstring""" snake_case_ : Dict = 0.0 for i, j in zip(_A , _A ): n_correct += 1.0 if math_equivalence.is_equiv(_A , _A ) else 0.0 snake_case_ : Any = n_correct / len(_A ) return { "accuracy": accuracy, }
327
import logging import os import sys from dataclasses import dataclass, field from typing import Optional from seqaseq_trainer import SeqaSeqTrainer from seqaseq_training_args import SeqaSeqTrainingArguments import transformers from transformers import ( AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer, HfArgumentParser, MBartTokenizer, MBartTokenizerFast, set_seed, ) from transformers.trainer_utils import EvaluationStrategy, is_main_process from transformers.training_args import ParallelMode from utils import ( SeqaSeqDataCollator, SeqaSeqDataset, assert_all_frozen, build_compute_metrics_fn, check_output_dir, freeze_embeds, freeze_params, lmap, save_json, use_task_specific_params, write_txt_file, ) _SCREAMING_SNAKE_CASE = logging.getLogger(__name__) @dataclass class SCREAMING_SNAKE_CASE_ : __magic_name__: str = field( metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} ) __magic_name__: Optional[str] = field( default=snake_case_ , metadata={"help": "Pretrained config name or path if not the same as model_name"} ) __magic_name__: Optional[str] = field( default=snake_case_ , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} ) __magic_name__: Optional[str] = field( default=snake_case_ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , ) __magic_name__: bool = field(default=snake_case_ , metadata={"help": "Whether tp freeze the encoder."} ) __magic_name__: bool = field(default=snake_case_ , metadata={"help": "Whether to freeze the embeddings."} ) @dataclass class SCREAMING_SNAKE_CASE_ : __magic_name__: str = field( metadata={"help": "The input data dir. Should contain the .tsv files (or other data files) for the task."} ) __magic_name__: Optional[str] = field( default="summarization" , metadata={"help": "Task name, summarization (or summarization_{dataset} for pegasus) or translation"} , ) __magic_name__: Optional[int] = field( default=1024 , metadata={ "help": ( "The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) } , ) __magic_name__: Optional[int] = field( default=128 , metadata={ "help": ( "The maximum total sequence length for target text after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) } , ) __magic_name__: Optional[int] = field( default=142 , metadata={ "help": ( "The maximum total sequence length for validation target text after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded. " "This argument is also used to override the ``max_length`` param of ``model.generate``, which is used " "during ``evaluate`` and ``predict``." ) } , ) __magic_name__: Optional[int] = field( default=142 , metadata={ "help": ( "The maximum total sequence length for test target text after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) } , ) __magic_name__: Optional[int] = field(default=-1 , metadata={"help": "# training examples. -1 means use all."} ) __magic_name__: Optional[int] = field(default=-1 , metadata={"help": "# validation examples. -1 means use all."} ) __magic_name__: Optional[int] = field(default=-1 , metadata={"help": "# test examples. -1 means use all."} ) __magic_name__: Optional[str] = field(default=snake_case_ , metadata={"help": "Source language id for translation."} ) __magic_name__: Optional[str] = field(default=snake_case_ , metadata={"help": "Target language id for translation."} ) __magic_name__: Optional[int] = field(default=snake_case_ , metadata={"help": "# num_beams to use for evaluation."} ) __magic_name__: bool = field( default=snake_case_ , metadata={"help": "If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined."} , ) def SCREAMING_SNAKE_CASE__ ( __a , __a , __a ): logger.info(f"""***** {split} metrics *****""" ) for key in sorted(metrics.keys() ): logger.info(f""" {key} = {metrics[key]}""" ) save_json(__a , os.path.join(__a , f"""{split}_results.json""" ) ) def SCREAMING_SNAKE_CASE__ ( ): # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. snake_case_ : Any = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. snake_case_ ,snake_case_ ,snake_case_ : List[Any] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: snake_case_ ,snake_case_ ,snake_case_ : List[str] = parser.parse_args_into_dataclasses() check_output_dir(__a ) # Setup logging logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , ) logger.warning( 'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() logger.info('Training/evaluation parameters %s' , __a ) # Set seed set_seed(training_args.seed ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. snake_case_ : Tuple = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) snake_case_ : Any = ('encoder_layerdrop', 'decoder_layerdrop', 'dropout', 'attention_dropout') for p in extra_model_params: if getattr(__a , __a , __a ): assert hasattr(__a , __a ), f"""({config.__class__.__name__}) doesn't have a `{p}` attribute""" setattr(__a , __a , getattr(__a , __a ) ) snake_case_ : Tuple = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) snake_case_ : Any = AutoModelForSeqaSeqLM.from_pretrained( model_args.model_name_or_path , from_tf='.ckpt' in model_args.model_name_or_path , config=__a , cache_dir=model_args.cache_dir , ) # use task specific params use_task_specific_params(__a , data_args.task ) # set num_beams for evaluation if data_args.eval_beams is None: snake_case_ : Any = model.config.num_beams # set decoder_start_token_id for MBart if model.config.decoder_start_token_id is None and isinstance(__a , (MBartTokenizer, MBartTokenizerFast) ): assert ( data_args.tgt_lang is not None and data_args.src_lang is not None ), "mBart requires --tgt_lang and --src_lang" if isinstance(__a , __a ): snake_case_ : int = tokenizer.lang_code_to_id[data_args.tgt_lang] else: snake_case_ : int = tokenizer.convert_tokens_to_ids(data_args.tgt_lang ) if model_args.freeze_embeds: freeze_embeds(__a ) if model_args.freeze_encoder: freeze_params(model.get_encoder() ) assert_all_frozen(model.get_encoder() ) snake_case_ : List[Any] = SeqaSeqDataset # Get datasets snake_case_ : List[Any] = ( dataset_class( __a , type_path='train' , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '' , ) if training_args.do_train else None ) snake_case_ : List[str] = ( dataset_class( __a , type_path='val' , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '' , ) if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO else None ) snake_case_ : List[Any] = ( dataset_class( __a , type_path='test' , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '' , ) if training_args.do_predict else None ) # Initialize our Trainer snake_case_ : Any = ( build_compute_metrics_fn(data_args.task , __a ) if training_args.predict_with_generate else None ) snake_case_ : List[str] = SeqaSeqTrainer( model=__a , args=__a , data_args=__a , train_dataset=__a , eval_dataset=__a , data_collator=SeqaSeqDataCollator( __a , __a , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=__a , tokenizer=__a , ) snake_case_ : Optional[int] = {} # Training if training_args.do_train: logger.info('*** Train ***' ) snake_case_ : Any = trainer.train( model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None ) snake_case_ : Tuple = train_result.metrics snake_case_ : List[str] = data_args.n_train trainer.save_model() # this also saves the tokenizer if trainer.is_world_process_zero(): handle_metrics('train' , __a , training_args.output_dir ) all_metrics.update(__a ) # Need to save the state, since Trainer.save_model saves only the tokenizer with the model trainer.state.save_to_json(os.path.join(training_args.output_dir , 'trainer_state.json' ) ) # For convenience, we also re-save the tokenizer to the same directory, # so that you can share your model easily on huggingface.co/models =) tokenizer.save_pretrained(training_args.output_dir ) # Evaluation if training_args.do_eval: logger.info('*** Evaluate ***' ) snake_case_ : List[Any] = trainer.evaluate(metric_key_prefix='val' ) snake_case_ : str = data_args.n_val snake_case_ : Union[str, Any] = round(metrics['val_loss'] , 4 ) if trainer.is_world_process_zero(): handle_metrics('val' , __a , training_args.output_dir ) all_metrics.update(__a ) if training_args.do_predict: logger.info('*** Predict ***' ) snake_case_ : Dict = trainer.predict(test_dataset=__a , metric_key_prefix='test' ) snake_case_ : Union[str, Any] = test_output.metrics snake_case_ : int = data_args.n_test if trainer.is_world_process_zero(): snake_case_ : List[str] = round(metrics['test_loss'] , 4 ) handle_metrics('test' , __a , training_args.output_dir ) all_metrics.update(__a ) if training_args.predict_with_generate: snake_case_ : Any = tokenizer.batch_decode( test_output.predictions , skip_special_tokens=__a , clean_up_tokenization_spaces=__a ) snake_case_ : Any = lmap(str.strip , __a ) write_txt_file(__a , os.path.join(training_args.output_dir , 'test_generations.txt' ) ) if trainer.is_world_process_zero(): save_json(__a , os.path.join(training_args.output_dir , 'all_results.json' ) ) return all_metrics def SCREAMING_SNAKE_CASE__ ( __a ): # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
327
1
import argparse import re from typing import Dict import torch from datasets import Audio, Dataset, load_dataset, load_metric from transformers import AutoFeatureExtractor, pipeline def SCREAMING_SNAKE_CASE__ ( __a , __a ): snake_case_ : Tuple = args.log_outputs snake_case_ : Optional[int] = '_'.join(args.dataset.split('/' ) + [args.config, args.split] ) # load metric snake_case_ : str = load_metric('wer' ) snake_case_ : Optional[int] = load_metric('cer' ) # compute metrics snake_case_ : Union[str, Any] = wer.compute(references=result['target'] , predictions=result['prediction'] ) snake_case_ : Any = cer.compute(references=result['target'] , predictions=result['prediction'] ) # print & log results snake_case_ : Dict = f"""WER: {wer_result}\nCER: {cer_result}""" print(__a ) with open(f"""{dataset_id}_eval_results.txt""" , 'w' ) as f: f.write(__a ) # log all results in text file. Possibly interesting for analysis if log_outputs is not None: snake_case_ : List[Any] = f"""log_{dataset_id}_predictions.txt""" snake_case_ : Union[str, Any] = f"""log_{dataset_id}_targets.txt""" with open(__a , 'w' ) as p, open(__a , 'w' ) as t: # mapping function to write output def write_to_file(__a , __a ): p.write(f"""{i}""" + '\n' ) p.write(batch['prediction'] + '\n' ) t.write(f"""{i}""" + '\n' ) t.write(batch['target'] + '\n' ) result.map(__a , with_indices=__a ) def SCREAMING_SNAKE_CASE__ ( __a ): snake_case_ : Dict = '[,?.!\-\;\:"“%‘”�—’…–]' # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training snake_case_ : List[Any] = re.sub(__a , '' , text.lower() ) # In addition, we can normalize the target text, e.g. removing new lines characters etc... # note that order is important here! snake_case_ : Union[str, Any] = ['\n\n', '\n', ' ', ' '] for t in token_sequences_to_ignore: snake_case_ : Any = ' '.join(text.split(__a ) ) return text def SCREAMING_SNAKE_CASE__ ( __a ): # load dataset snake_case_ : Union[str, Any] = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=__a ) # for testing: only process the first two examples as a test # dataset = dataset.select(range(10)) # load processor snake_case_ : Optional[int] = AutoFeatureExtractor.from_pretrained(args.model_id ) snake_case_ : int = feature_extractor.sampling_rate # resample audio snake_case_ : Any = dataset.cast_column('audio' , Audio(sampling_rate=__a ) ) # load eval pipeline if args.device is None: snake_case_ : List[Any] = 0 if torch.cuda.is_available() else -1 snake_case_ : Union[str, Any] = pipeline('automatic-speech-recognition' , model=args.model_id , device=args.device ) # map function to decode audio def map_to_pred(__a ): snake_case_ : str = asr( batch['audio']['array'] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s ) snake_case_ : Optional[int] = prediction['text'] snake_case_ : Optional[Any] = normalize_text(batch['sentence'] ) return batch # run inference on all examples snake_case_ : Any = dataset.map(__a , remove_columns=dataset.column_names ) # compute and log_results # do not change function below log_results(__a , __a ) if __name__ == "__main__": _SCREAMING_SNAKE_CASE = argparse.ArgumentParser() parser.add_argument( """--model_id""", type=str, required=True, help="""Model identifier. Should be loadable with 🤗 Transformers""" ) parser.add_argument( """--dataset""", type=str, required=True, help="""Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets""", ) parser.add_argument( """--config""", type=str, required=True, help="""Config of the dataset. *E.g.* `'en'` for Common Voice""" ) parser.add_argument("""--split""", type=str, required=True, help="""Split of the dataset. *E.g.* `'test'`""") parser.add_argument( """--chunk_length_s""", type=float, default=None, help="""Chunk length in seconds. Defaults to 5 seconds.""" ) parser.add_argument( """--stride_length_s""", type=float, default=None, help="""Stride of the audio chunks. Defaults to 1 second.""" ) parser.add_argument( """--log_outputs""", action="""store_true""", help="""If defined, write outputs to log file for analysis.""" ) parser.add_argument( """--device""", type=int, default=None, help="""The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.""", ) _SCREAMING_SNAKE_CASE = parser.parse_args() main(args)
327
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available _SCREAMING_SNAKE_CASE = { """configuration_poolformer""": [ """POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """PoolFormerConfig""", """PoolFormerOnnxConfig""", ] } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _SCREAMING_SNAKE_CASE = ["""PoolFormerFeatureExtractor"""] _SCREAMING_SNAKE_CASE = ["""PoolFormerImageProcessor"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _SCREAMING_SNAKE_CASE = [ """POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""", """PoolFormerForImageClassification""", """PoolFormerModel""", """PoolFormerPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_poolformer import ( POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, PoolFormerConfig, PoolFormerOnnxConfig, ) try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_poolformer import PoolFormerFeatureExtractor from .image_processing_poolformer import PoolFormerImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_poolformer import ( POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, PoolFormerForImageClassification, PoolFormerModel, PoolFormerPreTrainedModel, ) else: import sys _SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
327
1
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _SCREAMING_SNAKE_CASE = logging.get_logger(__name__) _SCREAMING_SNAKE_CASE = { """google/bigbird-roberta-base""": """https://huggingface.co/google/bigbird-roberta-base/resolve/main/config.json""", """google/bigbird-roberta-large""": """https://huggingface.co/google/bigbird-roberta-large/resolve/main/config.json""", """google/bigbird-base-trivia-itc""": """https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/config.json""", # See all BigBird models at https://huggingface.co/models?filter=big_bird } class SCREAMING_SNAKE_CASE_ ( snake_case_ ): __magic_name__: int = "big_bird" def __init__( self : int , _A : Any=50358 , _A : List[str]=768 , _A : List[Any]=12 , _A : Optional[Any]=12 , _A : Union[str, Any]=3072 , _A : Tuple="gelu_new" , _A : Tuple=0.1 , _A : Union[str, Any]=0.1 , _A : str=4096 , _A : Optional[int]=2 , _A : Any=0.0_2 , _A : Tuple=1E-12 , _A : Tuple=True , _A : List[str]=0 , _A : int=1 , _A : Tuple=2 , _A : List[Any]=66 , _A : Optional[Any]="block_sparse" , _A : List[str]=True , _A : Dict=False , _A : Optional[Any]=64 , _A : Any=3 , _A : Optional[Any]=None , **_A : List[str] , ) -> Any: """simple docstring""" super().__init__( pad_token_id=_A , bos_token_id=_A , eos_token_id=_A , sep_token_id=_A , **_A , ) snake_case_ : Optional[Any] = vocab_size snake_case_ : List[str] = max_position_embeddings snake_case_ : Union[str, Any] = hidden_size snake_case_ : Optional[int] = num_hidden_layers snake_case_ : int = num_attention_heads snake_case_ : str = intermediate_size snake_case_ : Dict = hidden_act snake_case_ : Optional[int] = hidden_dropout_prob snake_case_ : Union[str, Any] = attention_probs_dropout_prob snake_case_ : str = initializer_range snake_case_ : Any = type_vocab_size snake_case_ : Tuple = layer_norm_eps snake_case_ : int = use_cache snake_case_ : Optional[int] = rescale_embeddings snake_case_ : int = attention_type snake_case_ : Union[str, Any] = use_bias snake_case_ : List[str] = block_size snake_case_ : Optional[Any] = num_random_blocks snake_case_ : Any = classifier_dropout class SCREAMING_SNAKE_CASE_ ( snake_case_ ): @property def UpperCAmelCase_ ( self : int ) -> Mapping[str, Mapping[int, str]]: """simple docstring""" if self.task == "multiple-choice": snake_case_ : str = {0: 'batch', 1: 'choice', 2: 'sequence'} else: snake_case_ : List[Any] = {0: 'batch', 1: 'sequence'} return OrderedDict( [ ('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ] )
327
import tempfile import unittest from transformers import AutoModelForSeqaSeqLM, AutoTokenizer from transformers.testing_utils import ( is_torch_available, require_optimum, require_torch, slow, ) if is_torch_available(): import torch @require_torch @require_optimum @slow class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ): def UpperCAmelCase_ ( self : Dict ) -> List[Any]: """simple docstring""" snake_case_ : Any = 'hf-internal-testing/tiny-random-t5' snake_case_ : Optional[Any] = AutoTokenizer.from_pretrained(_A ) snake_case_ : Optional[int] = AutoModelForSeqaSeqLM.from_pretrained(_A ) snake_case_ : List[Any] = tokenizer('This is me' , return_tensors='pt' ) snake_case_ : Any = model.to_bettertransformer() self.assertTrue(any('BetterTransformer' in mod.__class__.__name__ for _, mod in model.named_modules() ) ) snake_case_ : Optional[Any] = model.generate(**_A ) snake_case_ : int = model.reverse_bettertransformer() self.assertFalse(any('BetterTransformer' in mod.__class__.__name__ for _, mod in model.named_modules() ) ) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(_A ) snake_case_ : Tuple = AutoModelForSeqaSeqLM.from_pretrained(_A ) self.assertFalse( any('BetterTransformer' in mod.__class__.__name__ for _, mod in model_reloaded.named_modules() ) ) snake_case_ : Optional[Any] = model_reloaded.generate(**_A ) self.assertTrue(torch.allclose(_A , _A ) ) def UpperCAmelCase_ ( self : Optional[Any] ) -> Tuple: """simple docstring""" snake_case_ : Any = 'hf-internal-testing/tiny-random-t5' snake_case_ : int = AutoModelForSeqaSeqLM.from_pretrained(_A ) snake_case_ : Dict = model.to_bettertransformer() with tempfile.TemporaryDirectory() as tmpdirname: with self.assertRaises(_A ): model.save_pretrained(_A ) snake_case_ : Union[str, Any] = model.reverse_bettertransformer() model.save_pretrained(_A )
327
1
from scipy.stats import pearsonr import datasets _SCREAMING_SNAKE_CASE = """ Pearson correlation coefficient and p-value for testing non-correlation. The Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases. The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. """ _SCREAMING_SNAKE_CASE = """ Args: predictions (`list` of `int`): Predicted class labels, as returned by a model. references (`list` of `int`): Ground truth labels. return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`. Returns: pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation. p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities. Examples: Example 1-A simple example using only predictions and references. >>> pearsonr_metric = datasets.load_metric(\"pearsonr\") >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5]) >>> print(round(results['pearsonr'], 2)) -0.74 Example 2-The same as Example 1, but that also returns the `p-value`. >>> pearsonr_metric = datasets.load_metric(\"pearsonr\") >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True) >>> print(sorted(list(results.keys()))) ['p-value', 'pearsonr'] >>> print(round(results['pearsonr'], 2)) -0.74 >>> print(round(results['p-value'], 2)) 0.15 """ _SCREAMING_SNAKE_CASE = """ @article{2020SciPy-NMeth, author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and Haberland, Matt and Reddy, Tyler and Cournapeau, David and Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and Bright, Jonathan and {van der Walt}, St{\'e}fan J. and Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and Kern, Robert and Larson, Eric and Carey, C J and Polat, Ilhan and Feng, Yu and Moore, Eric W. and {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and Harris, Charles R. and Archibald, Anne M. and Ribeiro, Antonio H. and Pedregosa, Fabian and {van Mulbregt}, Paul and {SciPy 1.0 Contributors}}, title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific Computing in Python}}, journal = {Nature Methods}, year = {2020}, volume = {17}, pages = {261--272}, adsurl = {https://rdcu.be/b08Wh}, doi = {10.1038/s41592-019-0686-2}, } """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class SCREAMING_SNAKE_CASE_ ( datasets.Metric ): def UpperCAmelCase_ ( self : Dict ) -> List[Any]: """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { 'predictions': datasets.Value('float' ), 'references': datasets.Value('float' ), } ) , reference_urls=['https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html'] , ) def UpperCAmelCase_ ( self : Union[str, Any] , _A : List[str] , _A : Dict , _A : Dict=False ) -> List[Any]: """simple docstring""" if return_pvalue: snake_case_ : List[str] = pearsonr(_A , _A ) return {"pearsonr": results[0], "p-value": results[1]} else: return {"pearsonr": float(pearsonr(_A , _A )[0] )}
327
import numpy as np import torch from torch.utils.data import Dataset from utils import logger class SCREAMING_SNAKE_CASE_ ( snake_case_ ): def __init__( self : Union[str, Any] , _A : Any , _A : Dict ) -> Union[str, Any]: """simple docstring""" snake_case_ : str = params snake_case_ : int = np.array(_A ) snake_case_ : Optional[int] = np.array([len(_A ) for t in data] ) self.check() self.remove_long_sequences() self.remove_empty_sequences() self.remove_unknown_sequences() self.check() self.print_statistics() def __getitem__( self : Tuple , _A : Optional[int] ) -> str: """simple docstring""" return (self.token_ids[index], self.lengths[index]) def __len__( self : List[str] ) -> str: """simple docstring""" return len(self.lengths ) def UpperCAmelCase_ ( self : Dict ) -> str: """simple docstring""" assert len(self.token_ids ) == len(self.lengths ) assert all(self.lengths[i] == len(self.token_ids[i] ) for i in range(len(self.lengths ) ) ) def UpperCAmelCase_ ( self : Any ) -> Optional[Any]: """simple docstring""" snake_case_ : Dict = self.params.max_model_input_size snake_case_ : Tuple = self.lengths > max_len logger.info(F"""Splitting {sum(_A )} too long sequences.""" ) def divide_chunks(_A : Union[str, Any] , _A : Dict ): return [l[i : i + n] for i in range(0 , len(_A ) , _A )] snake_case_ : Dict = [] snake_case_ : Union[str, Any] = [] if self.params.mlm: snake_case_ ,snake_case_ : Optional[int] = self.params.special_tok_ids['cls_token'], self.params.special_tok_ids['sep_token'] else: snake_case_ ,snake_case_ : Any = self.params.special_tok_ids['bos_token'], self.params.special_tok_ids['eos_token'] for seq_, len_ in zip(self.token_ids , self.lengths ): assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_ if len_ <= max_len: new_tok_ids.append(seq_ ) new_lengths.append(len_ ) else: snake_case_ : List[Any] = [] for sub_s in divide_chunks(seq_ , max_len - 2 ): if sub_s[0] != cls_id: snake_case_ : Optional[int] = np.insert(_A , 0 , _A ) if sub_s[-1] != sep_id: snake_case_ : Optional[Any] = np.insert(_A , len(_A ) , _A ) assert len(_A ) <= max_len assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s sub_seqs.append(_A ) new_tok_ids.extend(_A ) new_lengths.extend([len(_A ) for l in sub_seqs] ) snake_case_ : Tuple = np.array(_A ) snake_case_ : int = np.array(_A ) def UpperCAmelCase_ ( self : List[str] ) -> List[str]: """simple docstring""" snake_case_ : Tuple = len(self ) snake_case_ : int = self.lengths > 11 snake_case_ : Dict = self.token_ids[indices] snake_case_ : int = self.lengths[indices] snake_case_ : List[Any] = len(self ) logger.info(F"""Remove {init_size - new_size} too short (<=11 tokens) sequences.""" ) def UpperCAmelCase_ ( self : Union[str, Any] ) -> int: """simple docstring""" if "unk_token" not in self.params.special_tok_ids: return else: snake_case_ : Optional[Any] = self.params.special_tok_ids['unk_token'] snake_case_ : Dict = len(self ) snake_case_ : str = np.array([np.count_nonzero(a == unk_token_id ) for a in self.token_ids] ) snake_case_ : Any = (unk_occs / self.lengths) < 0.5 snake_case_ : List[Any] = self.token_ids[indices] snake_case_ : int = self.lengths[indices] snake_case_ : Tuple = len(self ) logger.info(F"""Remove {init_size - new_size} sequences with a high level of unknown tokens (50%).""" ) def UpperCAmelCase_ ( self : Union[str, Any] ) -> Any: """simple docstring""" if not self.params.is_master: return logger.info(F"""{len(self )} sequences""" ) # data_len = sum(self.lengths) # nb_unique_tokens = len(Counter(list(chain(*self.token_ids)))) # logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)') # unk_idx = self.params.special_tok_ids['unk_token'] # nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids]) # logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)') def UpperCAmelCase_ ( self : Optional[int] , _A : Union[str, Any] ) -> List[Any]: """simple docstring""" snake_case_ : Any = [t[0] for t in batch] snake_case_ : int = [t[1] for t in batch] assert len(_A ) == len(_A ) # Max for paddings snake_case_ : str = max(_A ) # Pad token ids if self.params.mlm: snake_case_ : int = self.params.special_tok_ids['pad_token'] else: snake_case_ : Dict = self.params.special_tok_ids['unk_token'] snake_case_ : Dict = [list(t.astype(_A ) ) + [pad_idx] * (max_seq_len_ - len(_A )) for t in token_ids] assert len(tk_ ) == len(_A ) assert all(len(_A ) == max_seq_len_ for t in tk_ ) snake_case_ : Any = torch.tensor(tk_ ) # (bs, max_seq_len_) snake_case_ : Optional[Any] = torch.tensor(_A ) # (bs) return tk_t, lg_t
327
1
import os from shutil import copyfile from typing import List, Optional, Tuple from tokenizers import processors from ...tokenization_utils import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_mbart import MBartTokenizer else: _SCREAMING_SNAKE_CASE = None _SCREAMING_SNAKE_CASE = logging.get_logger(__name__) _SCREAMING_SNAKE_CASE = {"""vocab_file""": """sentencepiece.bpe.model""", """tokenizer_file""": """tokenizer.json"""} _SCREAMING_SNAKE_CASE = { """vocab_file""": { """facebook/mbart-large-en-ro""": ( """https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model""" ), """facebook/mbart-large-cc25""": ( """https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model""" ), }, """tokenizer_file""": { """facebook/mbart-large-en-ro""": """https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json""", """facebook/mbart-large-cc25""": """https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json""", }, } _SCREAMING_SNAKE_CASE = { """facebook/mbart-large-en-ro""": 10_24, """facebook/mbart-large-cc25""": 10_24, } # fmt: off _SCREAMING_SNAKE_CASE = ["""ar_AR""", """cs_CZ""", """de_DE""", """en_XX""", """es_XX""", """et_EE""", """fi_FI""", """fr_XX""", """gu_IN""", """hi_IN""", """it_IT""", """ja_XX""", """kk_KZ""", """ko_KR""", """lt_LT""", """lv_LV""", """my_MM""", """ne_NP""", """nl_XX""", """ro_RO""", """ru_RU""", """si_LK""", """tr_TR""", """vi_VN""", """zh_CN"""] class SCREAMING_SNAKE_CASE_ ( snake_case_ ): __magic_name__: Dict = VOCAB_FILES_NAMES __magic_name__: Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __magic_name__: List[str] = PRETRAINED_VOCAB_FILES_MAP __magic_name__: int = ["input_ids", "attention_mask"] __magic_name__: Any = MBartTokenizer __magic_name__: List[int] = [] __magic_name__: List[int] = [] def __init__( self : int , _A : Optional[int]=None , _A : int=None , _A : List[str]="<s>" , _A : str="</s>" , _A : List[str]="</s>" , _A : Optional[int]="<s>" , _A : Any="<unk>" , _A : Any="<pad>" , _A : List[str]="<mask>" , _A : Tuple=None , _A : List[str]=None , _A : Tuple=None , **_A : Optional[Any] , ) -> Dict: """simple docstring""" snake_case_ : Dict = AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else mask_token super().__init__( vocab_file=_A , tokenizer_file=_A , bos_token=_A , eos_token=_A , sep_token=_A , cls_token=_A , unk_token=_A , pad_token=_A , mask_token=_A , src_lang=_A , tgt_lang=_A , additional_special_tokens=_A , **_A , ) snake_case_ : Optional[int] = vocab_file snake_case_ : Dict = False if not self.vocab_file else True snake_case_ : str = FAIRSEQ_LANGUAGE_CODES.copy() if additional_special_tokens is not None: # Only add those special tokens if they are not already there. _additional_special_tokens.extend( [t for t in additional_special_tokens if t not in _additional_special_tokens] ) self.add_special_tokens({'additional_special_tokens': _additional_special_tokens} ) snake_case_ : Optional[int] = { lang_code: self.convert_tokens_to_ids(_A ) for lang_code in FAIRSEQ_LANGUAGE_CODES } snake_case_ : Tuple = src_lang if src_lang is not None else 'en_XX' snake_case_ : int = self.convert_tokens_to_ids(self._src_lang ) snake_case_ : int = tgt_lang self.set_src_lang_special_tokens(self._src_lang ) @property def UpperCAmelCase_ ( self : Optional[int] ) -> str: """simple docstring""" return self._src_lang @src_lang.setter def UpperCAmelCase_ ( self : List[str] , _A : str ) -> None: """simple docstring""" snake_case_ : Tuple = new_src_lang self.set_src_lang_special_tokens(self._src_lang ) def UpperCAmelCase_ ( self : List[Any] , _A : List[int] , _A : Optional[List[int]] = None ) -> List[int]: """simple docstring""" if token_ids_a is None: return self.prefix_tokens + token_ids_a + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens def UpperCAmelCase_ ( self : Tuple , _A : List[int] , _A : Optional[List[int]] = None ) -> List[int]: """simple docstring""" snake_case_ : Union[str, Any] = [self.sep_token_id] snake_case_ : str = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def UpperCAmelCase_ ( self : int , _A : Tuple , _A : str , _A : Optional[str] , _A : Optional[str] , **_A : str ) -> Tuple: """simple docstring""" if src_lang is None or tgt_lang is None: raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' ) snake_case_ : int = src_lang snake_case_ : List[str] = self(_A , add_special_tokens=_A , return_tensors=_A , **_A ) snake_case_ : List[Any] = self.convert_tokens_to_ids(_A ) snake_case_ : int = tgt_lang_id return inputs def UpperCAmelCase_ ( self : str , _A : List[str] , _A : str = "en_XX" , _A : Optional[List[str]] = None , _A : str = "ro_RO" , **_A : str , ) -> BatchEncoding: """simple docstring""" snake_case_ : Optional[Any] = src_lang snake_case_ : List[Any] = tgt_lang return super().prepare_seqaseq_batch(_A , _A , **_A ) def UpperCAmelCase_ ( self : int ) -> Any: """simple docstring""" return self.set_src_lang_special_tokens(self.src_lang ) def UpperCAmelCase_ ( self : Optional[Any] ) -> List[str]: """simple docstring""" return self.set_tgt_lang_special_tokens(self.tgt_lang ) def UpperCAmelCase_ ( self : Dict , _A : Any ) -> None: """simple docstring""" snake_case_ : List[str] = self.convert_tokens_to_ids(_A ) snake_case_ : List[Any] = [] snake_case_ : Tuple = [self.eos_token_id, self.cur_lang_code] snake_case_ : Optional[int] = self.convert_ids_to_tokens(self.prefix_tokens ) snake_case_ : List[str] = self.convert_ids_to_tokens(self.suffix_tokens ) snake_case_ : Optional[int] = processors.TemplateProcessing( single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , ) def UpperCAmelCase_ ( self : Union[str, Any] , _A : str ) -> None: """simple docstring""" snake_case_ : int = self.convert_tokens_to_ids(_A ) snake_case_ : Union[str, Any] = [] snake_case_ : Optional[Any] = [self.eos_token_id, self.cur_lang_code] snake_case_ : Any = self.convert_ids_to_tokens(self.prefix_tokens ) snake_case_ : Dict = self.convert_ids_to_tokens(self.suffix_tokens ) snake_case_ : str = processors.TemplateProcessing( single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , ) def UpperCAmelCase_ ( self : Tuple , _A : str , _A : Optional[str] = None ) -> Tuple[str]: """simple docstring""" if not self.can_save_slow_tokenizer: raise ValueError( 'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow ' 'tokenizer.' ) if not os.path.isdir(_A ): logger.error(F"""Vocabulary path ({save_directory}) should be a directory.""" ) return snake_case_ : Any = os.path.join( _A , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_A ): copyfile(self.vocab_file , _A ) return (out_vocab_file,)
327
def SCREAMING_SNAKE_CASE__ ( __a , __a ): while b: snake_case_ ,snake_case_ : Any = b, a % b return a def SCREAMING_SNAKE_CASE__ ( __a , __a ): return a if b == 0 else euclidean_gcd_recursive(__a , a % b ) def SCREAMING_SNAKE_CASE__ ( ): print(f"""euclidean_gcd(3, 5) = {euclidean_gcd(3 , 5 )}""" ) print(f"""euclidean_gcd(5, 3) = {euclidean_gcd(5 , 3 )}""" ) print(f"""euclidean_gcd(1, 3) = {euclidean_gcd(1 , 3 )}""" ) print(f"""euclidean_gcd(3, 6) = {euclidean_gcd(3 , 6 )}""" ) print(f"""euclidean_gcd(6, 3) = {euclidean_gcd(6 , 3 )}""" ) print(f"""euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 , 5 )}""" ) print(f"""euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 , 3 )}""" ) print(f"""euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 , 3 )}""" ) print(f"""euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 , 6 )}""" ) print(f"""euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 , 3 )}""" ) if __name__ == "__main__": main()
327
1
import copy from ...configuration_utils import PretrainedConfig from ...utils import logging from ..bit import BitConfig _SCREAMING_SNAKE_CASE = logging.get_logger(__name__) _SCREAMING_SNAKE_CASE = { """Intel/dpt-large""": """https://huggingface.co/Intel/dpt-large/resolve/main/config.json""", # See all DPT models at https://huggingface.co/models?filter=dpt } class SCREAMING_SNAKE_CASE_ ( snake_case_ ): __magic_name__: Optional[int] = "dpt" def __init__( self : Union[str, Any] , _A : Tuple=768 , _A : Tuple=12 , _A : List[str]=12 , _A : Union[str, Any]=3072 , _A : Optional[int]="gelu" , _A : List[str]=0.0 , _A : List[str]=0.0 , _A : Union[str, Any]=0.0_2 , _A : Dict=1E-12 , _A : Optional[int]=384 , _A : int=16 , _A : Dict=3 , _A : Optional[Any]=False , _A : List[str]=True , _A : Tuple=[2, 5, 8, 11] , _A : str="project" , _A : Any=[4, 2, 1, 0.5] , _A : Union[str, Any]=[96, 192, 384, 768] , _A : Tuple=256 , _A : List[Any]=-1 , _A : Tuple=False , _A : List[str]=True , _A : Tuple=0.4 , _A : List[str]=255 , _A : Dict=0.1 , _A : Any=[1, 1024, 24, 24] , _A : Optional[Any]=[0, 1] , _A : Optional[int]=None , **_A : int , ) -> str: """simple docstring""" super().__init__(**_A ) snake_case_ : Dict = hidden_size snake_case_ : int = is_hybrid if self.is_hybrid: if backbone_config is None: logger.info('Initializing the config with a `BiT` backbone.' ) snake_case_ : Optional[Any] = { 'global_padding': 'same', 'layer_type': 'bottleneck', 'depths': [3, 4, 9], 'out_features': ['stage1', 'stage2', 'stage3'], 'embedding_dynamic_padding': True, } snake_case_ : Dict = BitConfig(**_A ) elif isinstance(_A , _A ): logger.info('Initializing the config with a `BiT` backbone.' ) snake_case_ : str = BitConfig(**_A ) elif isinstance(_A , _A ): snake_case_ : Union[str, Any] = backbone_config else: raise ValueError( F"""backbone_config must be a dictionary or a `PretrainedConfig`, got {backbone_config.__class__}.""" ) snake_case_ : Union[str, Any] = backbone_featmap_shape snake_case_ : Any = neck_ignore_stages if readout_type != "project": raise ValueError('Readout type must be \'project\' when using `DPT-hybrid` mode.' ) else: snake_case_ : List[Any] = None snake_case_ : Optional[Any] = None snake_case_ : Union[str, Any] = [] snake_case_ : int = num_hidden_layers snake_case_ : str = num_attention_heads snake_case_ : Tuple = intermediate_size snake_case_ : Tuple = hidden_act snake_case_ : Dict = hidden_dropout_prob snake_case_ : List[Any] = attention_probs_dropout_prob snake_case_ : Any = initializer_range snake_case_ : str = layer_norm_eps snake_case_ : str = image_size snake_case_ : Optional[Any] = patch_size snake_case_ : Union[str, Any] = num_channels snake_case_ : Any = qkv_bias snake_case_ : Union[str, Any] = backbone_out_indices if readout_type not in ["ignore", "add", "project"]: raise ValueError('Readout_type must be one of [\'ignore\', \'add\', \'project\']' ) snake_case_ : Dict = readout_type snake_case_ : Any = reassemble_factors snake_case_ : List[str] = neck_hidden_sizes snake_case_ : List[Any] = fusion_hidden_size snake_case_ : Tuple = head_in_index snake_case_ : Any = use_batch_norm_in_fusion_residual # auxiliary head attributes (semantic segmentation) snake_case_ : Dict = use_auxiliary_head snake_case_ : int = auxiliary_loss_weight snake_case_ : Union[str, Any] = semantic_loss_ignore_index snake_case_ : List[Any] = semantic_classifier_dropout def UpperCAmelCase_ ( self : Dict ) -> int: """simple docstring""" snake_case_ : Optional[Any] = copy.deepcopy(self.__dict__ ) if output["backbone_config"] is not None: snake_case_ : Any = self.backbone_config.to_dict() snake_case_ : Optional[int] = self.__class__.model_type return output
327
import os import torch from ..logging import get_logger from .constants import FSDP_PYTORCH_VERSION, MODEL_NAME, OPTIMIZER_NAME from .versions import is_torch_version if is_torch_version(""">=""", FSDP_PYTORCH_VERSION): import torch.distributed.checkpoint as dist_cp from torch.distributed.checkpoint.default_planner import DefaultLoadPlanner, DefaultSavePlanner from torch.distributed.checkpoint.optimizer import load_sharded_optimizer_state_dict from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType _SCREAMING_SNAKE_CASE = get_logger(__name__) def SCREAMING_SNAKE_CASE__ ( __a , __a , __a , __a , __a=0 ): os.makedirs(__a , exist_ok=__a ) with FSDP.state_dict_type( __a , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ): snake_case_ : Dict = model.state_dict() if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: snake_case_ : Optional[int] = f"""{MODEL_NAME}.bin""" if model_index == 0 else f"""{MODEL_NAME}_{model_index}.bin""" snake_case_ : Dict = os.path.join(__a , __a ) if accelerator.process_index == 0: logger.info(f"""Saving model to {output_model_file}""" ) torch.save(__a , __a ) logger.info(f"""Model saved to {output_model_file}""" ) elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT: snake_case_ : Dict = ( f"""{MODEL_NAME}_rank{accelerator.process_index}.bin""" if model_index == 0 else f"""{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin""" ) snake_case_ : Dict = os.path.join(__a , __a ) logger.info(f"""Saving model to {output_model_file}""" ) torch.save(__a , __a ) logger.info(f"""Model saved to {output_model_file}""" ) elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT: snake_case_ : Optional[int] = os.path.join(__a , f"""{MODEL_NAME}_{model_index}""" ) os.makedirs(__a , exist_ok=__a ) logger.info(f"""Saving model to {ckpt_dir}""" ) snake_case_ : int = {'model': state_dict} dist_cp.save_state_dict( state_dict=__a , storage_writer=dist_cp.FileSystemWriter(__a ) , planner=DefaultSavePlanner() , ) logger.info(f"""Model saved to {ckpt_dir}""" ) def SCREAMING_SNAKE_CASE__ ( __a , __a , __a , __a , __a=0 ): accelerator.wait_for_everyone() with FSDP.state_dict_type( __a , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ): if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: if type(__a ) != FSDP and accelerator.process_index != 0: if not fsdp_plugin.sync_module_states: raise ValueError( 'Set the `sync_module_states` flag to `True` so that model states are synced across processes when ' 'initializing FSDP object' ) return snake_case_ : Optional[int] = f"""{MODEL_NAME}.bin""" if model_index == 0 else f"""{MODEL_NAME}_{model_index}.bin""" snake_case_ : Optional[Any] = os.path.join(__a , __a ) logger.info(f"""Loading model from {input_model_file}""" ) snake_case_ : Optional[Any] = torch.load(__a ) logger.info(f"""Model loaded from {input_model_file}""" ) elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT: snake_case_ : Optional[Any] = ( f"""{MODEL_NAME}_rank{accelerator.process_index}.bin""" if model_index == 0 else f"""{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin""" ) snake_case_ : Tuple = os.path.join(__a , __a ) logger.info(f"""Loading model from {input_model_file}""" ) snake_case_ : Optional[int] = torch.load(__a ) logger.info(f"""Model loaded from {input_model_file}""" ) elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT: snake_case_ : Tuple = ( os.path.join(__a , f"""{MODEL_NAME}_{model_index}""" ) if f"""{MODEL_NAME}""" not in input_dir else input_dir ) logger.info(f"""Loading model from {ckpt_dir}""" ) snake_case_ : List[Any] = {'model': model.state_dict()} dist_cp.load_state_dict( state_dict=__a , storage_reader=dist_cp.FileSystemReader(__a ) , planner=DefaultLoadPlanner() , ) snake_case_ : Any = state_dict['model'] logger.info(f"""Model loaded from {ckpt_dir}""" ) model.load_state_dict(__a ) def SCREAMING_SNAKE_CASE__ ( __a , __a , __a , __a , __a , __a=0 ): os.makedirs(__a , exist_ok=__a ) with FSDP.state_dict_type( __a , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ): snake_case_ : List[str] = FSDP.optim_state_dict(__a , __a ) if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: if accelerator.process_index == 0: snake_case_ : str = ( f"""{OPTIMIZER_NAME}.bin""" if optimizer_index == 0 else f"""{OPTIMIZER_NAME}_{optimizer_index}.bin""" ) snake_case_ : Any = os.path.join(__a , __a ) logger.info(f"""Saving Optimizer state to {output_optimizer_file}""" ) torch.save(__a , __a ) logger.info(f"""Optimizer state saved in {output_optimizer_file}""" ) else: snake_case_ : Optional[int] = os.path.join(__a , f"""{OPTIMIZER_NAME}_{optimizer_index}""" ) os.makedirs(__a , exist_ok=__a ) logger.info(f"""Saving Optimizer state to {ckpt_dir}""" ) dist_cp.save_state_dict( state_dict={'optimizer': optim_state} , storage_writer=dist_cp.FileSystemWriter(__a ) , planner=DefaultSavePlanner() , ) logger.info(f"""Optimizer state saved in {ckpt_dir}""" ) def SCREAMING_SNAKE_CASE__ ( __a , __a , __a , __a , __a , __a=0 ): accelerator.wait_for_everyone() with FSDP.state_dict_type( __a , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ): if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: snake_case_ : Optional[Any] = None # below check should work but currently it isn't working (mostly opytorch issue), # in the meantime disabling it at the cost of excess memory usage # if accelerator.process_index == 0 or not fsdp_plugin.optim_state_dict_config.rank0_only: snake_case_ : Union[str, Any] = ( f"""{OPTIMIZER_NAME}.bin""" if optimizer_index == 0 else f"""{OPTIMIZER_NAME}_{optimizer_index}.bin""" ) snake_case_ : List[Any] = os.path.join(__a , __a ) logger.info(f"""Loading Optimizer state from {input_optimizer_file}""" ) snake_case_ : Optional[int] = torch.load(__a ) logger.info(f"""Optimizer state loaded from {input_optimizer_file}""" ) else: snake_case_ : str = ( os.path.join(__a , f"""{OPTIMIZER_NAME}_{optimizer_index}""" ) if f"""{OPTIMIZER_NAME}""" not in input_dir else input_dir ) logger.info(f"""Loading Optimizer from {ckpt_dir}""" ) snake_case_ : Any = load_sharded_optimizer_state_dict( model_state_dict=model.state_dict() , optimizer_key='optimizer' , storage_reader=dist_cp.FileSystemReader(__a ) , ) snake_case_ : Optional[int] = optim_state['optimizer'] logger.info(f"""Optimizer loaded from {ckpt_dir}""" ) snake_case_ : Optional[Any] = FSDP.optim_state_dict_to_load(__a , __a , __a ) optimizer.load_state_dict(__a )
327
1
import logging import os import sys from dataclasses import dataclass, field from typing import Optional from seqaseq_trainer import SeqaSeqTrainer from seqaseq_training_args import SeqaSeqTrainingArguments import transformers from transformers import ( AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer, HfArgumentParser, MBartTokenizer, MBartTokenizerFast, set_seed, ) from transformers.trainer_utils import EvaluationStrategy, is_main_process from transformers.training_args import ParallelMode from utils import ( SeqaSeqDataCollator, SeqaSeqDataset, assert_all_frozen, build_compute_metrics_fn, check_output_dir, freeze_embeds, freeze_params, lmap, save_json, use_task_specific_params, write_txt_file, ) _SCREAMING_SNAKE_CASE = logging.getLogger(__name__) @dataclass class SCREAMING_SNAKE_CASE_ : __magic_name__: str = field( metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} ) __magic_name__: Optional[str] = field( default=snake_case_ , metadata={"help": "Pretrained config name or path if not the same as model_name"} ) __magic_name__: Optional[str] = field( default=snake_case_ , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} ) __magic_name__: Optional[str] = field( default=snake_case_ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , ) __magic_name__: bool = field(default=snake_case_ , metadata={"help": "Whether tp freeze the encoder."} ) __magic_name__: bool = field(default=snake_case_ , metadata={"help": "Whether to freeze the embeddings."} ) @dataclass class SCREAMING_SNAKE_CASE_ : __magic_name__: str = field( metadata={"help": "The input data dir. Should contain the .tsv files (or other data files) for the task."} ) __magic_name__: Optional[str] = field( default="summarization" , metadata={"help": "Task name, summarization (or summarization_{dataset} for pegasus) or translation"} , ) __magic_name__: Optional[int] = field( default=1024 , metadata={ "help": ( "The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) } , ) __magic_name__: Optional[int] = field( default=128 , metadata={ "help": ( "The maximum total sequence length for target text after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) } , ) __magic_name__: Optional[int] = field( default=142 , metadata={ "help": ( "The maximum total sequence length for validation target text after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded. " "This argument is also used to override the ``max_length`` param of ``model.generate``, which is used " "during ``evaluate`` and ``predict``." ) } , ) __magic_name__: Optional[int] = field( default=142 , metadata={ "help": ( "The maximum total sequence length for test target text after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) } , ) __magic_name__: Optional[int] = field(default=-1 , metadata={"help": "# training examples. -1 means use all."} ) __magic_name__: Optional[int] = field(default=-1 , metadata={"help": "# validation examples. -1 means use all."} ) __magic_name__: Optional[int] = field(default=-1 , metadata={"help": "# test examples. -1 means use all."} ) __magic_name__: Optional[str] = field(default=snake_case_ , metadata={"help": "Source language id for translation."} ) __magic_name__: Optional[str] = field(default=snake_case_ , metadata={"help": "Target language id for translation."} ) __magic_name__: Optional[int] = field(default=snake_case_ , metadata={"help": "# num_beams to use for evaluation."} ) __magic_name__: bool = field( default=snake_case_ , metadata={"help": "If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined."} , ) def SCREAMING_SNAKE_CASE__ ( __a , __a , __a ): logger.info(f"""***** {split} metrics *****""" ) for key in sorted(metrics.keys() ): logger.info(f""" {key} = {metrics[key]}""" ) save_json(__a , os.path.join(__a , f"""{split}_results.json""" ) ) def SCREAMING_SNAKE_CASE__ ( ): # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. snake_case_ : Any = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. snake_case_ ,snake_case_ ,snake_case_ : List[Any] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: snake_case_ ,snake_case_ ,snake_case_ : List[str] = parser.parse_args_into_dataclasses() check_output_dir(__a ) # Setup logging logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , ) logger.warning( 'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() logger.info('Training/evaluation parameters %s' , __a ) # Set seed set_seed(training_args.seed ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. snake_case_ : Tuple = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) snake_case_ : Any = ('encoder_layerdrop', 'decoder_layerdrop', 'dropout', 'attention_dropout') for p in extra_model_params: if getattr(__a , __a , __a ): assert hasattr(__a , __a ), f"""({config.__class__.__name__}) doesn't have a `{p}` attribute""" setattr(__a , __a , getattr(__a , __a ) ) snake_case_ : Tuple = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) snake_case_ : Any = AutoModelForSeqaSeqLM.from_pretrained( model_args.model_name_or_path , from_tf='.ckpt' in model_args.model_name_or_path , config=__a , cache_dir=model_args.cache_dir , ) # use task specific params use_task_specific_params(__a , data_args.task ) # set num_beams for evaluation if data_args.eval_beams is None: snake_case_ : Any = model.config.num_beams # set decoder_start_token_id for MBart if model.config.decoder_start_token_id is None and isinstance(__a , (MBartTokenizer, MBartTokenizerFast) ): assert ( data_args.tgt_lang is not None and data_args.src_lang is not None ), "mBart requires --tgt_lang and --src_lang" if isinstance(__a , __a ): snake_case_ : int = tokenizer.lang_code_to_id[data_args.tgt_lang] else: snake_case_ : int = tokenizer.convert_tokens_to_ids(data_args.tgt_lang ) if model_args.freeze_embeds: freeze_embeds(__a ) if model_args.freeze_encoder: freeze_params(model.get_encoder() ) assert_all_frozen(model.get_encoder() ) snake_case_ : List[Any] = SeqaSeqDataset # Get datasets snake_case_ : List[Any] = ( dataset_class( __a , type_path='train' , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '' , ) if training_args.do_train else None ) snake_case_ : List[str] = ( dataset_class( __a , type_path='val' , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '' , ) if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO else None ) snake_case_ : List[Any] = ( dataset_class( __a , type_path='test' , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '' , ) if training_args.do_predict else None ) # Initialize our Trainer snake_case_ : Any = ( build_compute_metrics_fn(data_args.task , __a ) if training_args.predict_with_generate else None ) snake_case_ : List[str] = SeqaSeqTrainer( model=__a , args=__a , data_args=__a , train_dataset=__a , eval_dataset=__a , data_collator=SeqaSeqDataCollator( __a , __a , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=__a , tokenizer=__a , ) snake_case_ : Optional[int] = {} # Training if training_args.do_train: logger.info('*** Train ***' ) snake_case_ : Any = trainer.train( model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None ) snake_case_ : Tuple = train_result.metrics snake_case_ : List[str] = data_args.n_train trainer.save_model() # this also saves the tokenizer if trainer.is_world_process_zero(): handle_metrics('train' , __a , training_args.output_dir ) all_metrics.update(__a ) # Need to save the state, since Trainer.save_model saves only the tokenizer with the model trainer.state.save_to_json(os.path.join(training_args.output_dir , 'trainer_state.json' ) ) # For convenience, we also re-save the tokenizer to the same directory, # so that you can share your model easily on huggingface.co/models =) tokenizer.save_pretrained(training_args.output_dir ) # Evaluation if training_args.do_eval: logger.info('*** Evaluate ***' ) snake_case_ : List[Any] = trainer.evaluate(metric_key_prefix='val' ) snake_case_ : str = data_args.n_val snake_case_ : Union[str, Any] = round(metrics['val_loss'] , 4 ) if trainer.is_world_process_zero(): handle_metrics('val' , __a , training_args.output_dir ) all_metrics.update(__a ) if training_args.do_predict: logger.info('*** Predict ***' ) snake_case_ : Dict = trainer.predict(test_dataset=__a , metric_key_prefix='test' ) snake_case_ : Union[str, Any] = test_output.metrics snake_case_ : int = data_args.n_test if trainer.is_world_process_zero(): snake_case_ : List[str] = round(metrics['test_loss'] , 4 ) handle_metrics('test' , __a , training_args.output_dir ) all_metrics.update(__a ) if training_args.predict_with_generate: snake_case_ : Any = tokenizer.batch_decode( test_output.predictions , skip_special_tokens=__a , clean_up_tokenization_spaces=__a ) snake_case_ : Any = lmap(str.strip , __a ) write_txt_file(__a , os.path.join(training_args.output_dir , 'test_generations.txt' ) ) if trainer.is_world_process_zero(): save_json(__a , os.path.join(training_args.output_dir , 'all_results.json' ) ) return all_metrics def SCREAMING_SNAKE_CASE__ ( __a ): # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
327
import unittest from typing import Dict, List, Optional, Union import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import BridgeTowerImageProcessor class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ): def __init__( self : List[str] , _A : List[Any] , _A : bool = True , _A : Dict[str, int] = None , _A : int = 32 , _A : bool = True , _A : Union[int, float] = 1 / 255 , _A : bool = True , _A : bool = True , _A : Optional[Union[float, List[float]]] = [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3] , _A : Optional[Union[float, List[float]]] = [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1] , _A : bool = True , _A : Tuple=7 , _A : Tuple=30 , _A : int=400 , _A : Tuple=3 , ) -> Optional[int]: """simple docstring""" snake_case_ : str = parent snake_case_ : str = do_resize snake_case_ : str = size if size is not None else {'shortest_edge': 288} snake_case_ : Any = size_divisor snake_case_ : Any = do_rescale snake_case_ : Union[str, Any] = rescale_factor snake_case_ : str = do_normalize snake_case_ : int = do_center_crop snake_case_ : str = image_mean snake_case_ : int = image_std snake_case_ : Any = do_pad snake_case_ : Optional[int] = batch_size snake_case_ : List[str] = num_channels snake_case_ : Any = min_resolution snake_case_ : str = max_resolution def UpperCAmelCase_ ( self : Tuple ) -> Optional[int]: """simple docstring""" return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "size": self.size, "size_divisor": self.size_divisor, } def UpperCAmelCase_ ( self : Dict , _A : str , _A : Union[str, Any]=False ) -> int: """simple docstring""" if not batched: snake_case_ : Optional[int] = self.size['shortest_edge'] snake_case_ : List[Any] = image_inputs[0] if isinstance(_A , Image.Image ): snake_case_ ,snake_case_ : Optional[Any] = image.size else: snake_case_ ,snake_case_ : str = image.shape[1], image.shape[2] snake_case_ : Dict = size / min(_A , _A ) if h < w: snake_case_ ,snake_case_ : str = size, scale * w else: snake_case_ ,snake_case_ : Tuple = scale * h, size snake_case_ : Dict = int((1333 / 800) * size ) if max(_A , _A ) > max_size: snake_case_ : Union[str, Any] = max_size / max(_A , _A ) snake_case_ : Any = newh * scale snake_case_ : Union[str, Any] = neww * scale snake_case_ ,snake_case_ : Any = int(newh + 0.5 ), int(neww + 0.5 ) snake_case_ ,snake_case_ : int = ( newh // self.size_divisor * self.size_divisor, neww // self.size_divisor * self.size_divisor, ) else: snake_case_ : Optional[int] = [] for image in image_inputs: snake_case_ ,snake_case_ : Optional[int] = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) snake_case_ : str = max(_A , key=lambda _A : item[0] )[0] snake_case_ : List[str] = max(_A , key=lambda _A : item[1] )[1] return expected_height, expected_width @require_torch @require_vision class SCREAMING_SNAKE_CASE_ ( snake_case_ , unittest.TestCase ): __magic_name__: List[Any] = BridgeTowerImageProcessor if is_vision_available() else None def UpperCAmelCase_ ( self : List[Any] ) -> Tuple: """simple docstring""" snake_case_ : int = BridgeTowerImageProcessingTester(self ) @property def UpperCAmelCase_ ( self : int ) -> Any: """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def UpperCAmelCase_ ( self : Union[str, Any] ) -> Dict: """simple docstring""" snake_case_ : Optional[Any] = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_A , 'image_mean' ) ) self.assertTrue(hasattr(_A , 'image_std' ) ) self.assertTrue(hasattr(_A , 'do_normalize' ) ) self.assertTrue(hasattr(_A , 'do_resize' ) ) self.assertTrue(hasattr(_A , 'size' ) ) self.assertTrue(hasattr(_A , 'size_divisor' ) ) def UpperCAmelCase_ ( self : Optional[int] ) -> List[str]: """simple docstring""" pass def UpperCAmelCase_ ( self : Tuple ) -> Tuple: """simple docstring""" snake_case_ : Optional[Any] = self.image_processing_class(**self.image_processor_dict ) # create random PIL images snake_case_ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A ) for image in image_inputs: self.assertIsInstance(_A , Image.Image ) # Test not batched input snake_case_ : Dict = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values snake_case_ ,snake_case_ : Optional[Any] = self.image_processor_tester.get_expected_values(_A ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched snake_case_ : List[str] = image_processing(_A , return_tensors='pt' ).pixel_values snake_case_ ,snake_case_ : Optional[Any] = self.image_processor_tester.get_expected_values(_A , batched=_A ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def UpperCAmelCase_ ( self : Union[str, Any] ) -> Any: """simple docstring""" snake_case_ : int = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors snake_case_ : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , numpify=_A ) for image in image_inputs: self.assertIsInstance(_A , np.ndarray ) # Test not batched input snake_case_ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values snake_case_ ,snake_case_ : Tuple = self.image_processor_tester.get_expected_values(_A ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched snake_case_ : Any = image_processing(_A , return_tensors='pt' ).pixel_values snake_case_ ,snake_case_ : Any = self.image_processor_tester.get_expected_values(_A , batched=_A ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def UpperCAmelCase_ ( self : Dict ) -> List[str]: """simple docstring""" snake_case_ : Optional[Any] = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors snake_case_ : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , torchify=_A ) for image in image_inputs: self.assertIsInstance(_A , torch.Tensor ) # Test not batched input snake_case_ : Optional[int] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values snake_case_ ,snake_case_ : Optional[Any] = self.image_processor_tester.get_expected_values(_A ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched snake_case_ : str = image_processing(_A , return_tensors='pt' ).pixel_values snake_case_ ,snake_case_ : Tuple = self.image_processor_tester.get_expected_values(_A , batched=_A ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , )
327
1
import unittest from transformers import SqueezeBertConfig, is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST, SqueezeBertForMaskedLM, SqueezeBertForMultipleChoice, SqueezeBertForQuestionAnswering, SqueezeBertForSequenceClassification, SqueezeBertForTokenClassification, SqueezeBertModel, ) class SCREAMING_SNAKE_CASE_ ( snake_case_ ): def __init__( self : str , _A : Dict , _A : Any=13 , _A : Dict=7 , _A : str=True , _A : List[str]=True , _A : str=False , _A : Any=True , _A : Optional[int]=99 , _A : Tuple=32 , _A : Any=5 , _A : int=4 , _A : Optional[Any]=64 , _A : Tuple="gelu" , _A : List[Any]=0.1 , _A : str=0.1 , _A : Tuple=512 , _A : List[str]=16 , _A : Any=2 , _A : List[Any]=0.0_2 , _A : Tuple=3 , _A : Dict=4 , _A : Optional[Any]=None , _A : Optional[int]=2 , _A : Any=2 , _A : str=2 , _A : List[str]=2 , _A : Optional[Any]=4 , _A : Optional[Any]=1 , ) -> Optional[Any]: """simple docstring""" snake_case_ : int = parent snake_case_ : Optional[Any] = batch_size snake_case_ : List[Any] = seq_length snake_case_ : List[Any] = is_training snake_case_ : Optional[Any] = use_input_mask snake_case_ : Tuple = use_token_type_ids snake_case_ : List[str] = use_labels snake_case_ : List[Any] = vocab_size snake_case_ : Any = hidden_size snake_case_ : Optional[Any] = num_hidden_layers snake_case_ : List[str] = num_attention_heads snake_case_ : Any = intermediate_size snake_case_ : List[str] = hidden_act snake_case_ : List[Any] = hidden_dropout_prob snake_case_ : Dict = attention_probs_dropout_prob snake_case_ : List[str] = max_position_embeddings snake_case_ : List[str] = type_vocab_size snake_case_ : str = type_sequence_label_size snake_case_ : Any = initializer_range snake_case_ : Optional[int] = num_labels snake_case_ : List[Any] = num_choices snake_case_ : Tuple = scope snake_case_ : Optional[Any] = q_groups snake_case_ : Optional[int] = k_groups snake_case_ : str = v_groups snake_case_ : Optional[int] = post_attention_groups snake_case_ : Any = intermediate_groups snake_case_ : List[str] = output_groups def UpperCAmelCase_ ( self : int ) -> Tuple: """simple docstring""" snake_case_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) snake_case_ : List[str] = None if self.use_input_mask: snake_case_ : List[Any] = random_attention_mask([self.batch_size, self.seq_length] ) snake_case_ : List[Any] = None snake_case_ : Dict = None snake_case_ : Dict = None if self.use_labels: snake_case_ : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size ) snake_case_ : str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) snake_case_ : List[Any] = ids_tensor([self.batch_size] , self.num_choices ) snake_case_ : Optional[Any] = self.get_config() return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def UpperCAmelCase_ ( self : Union[str, Any] ) -> str: """simple docstring""" return SqueezeBertConfig( embedding_size=self.hidden_size , vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , attention_probs_dropout_prob=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , q_groups=self.q_groups , k_groups=self.k_groups , v_groups=self.v_groups , post_attention_groups=self.post_attention_groups , intermediate_groups=self.intermediate_groups , output_groups=self.output_groups , ) def UpperCAmelCase_ ( self : List[str] , _A : List[Any] , _A : Any , _A : int , _A : int , _A : List[str] , _A : List[str] ) -> List[Any]: """simple docstring""" snake_case_ : Optional[int] = SqueezeBertModel(config=_A ) model.to(_A ) model.eval() snake_case_ : int = model(_A , _A ) snake_case_ : Dict = model(_A ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCAmelCase_ ( self : List[Any] , _A : Union[str, Any] , _A : List[str] , _A : Tuple , _A : Optional[int] , _A : Tuple , _A : Optional[int] ) -> str: """simple docstring""" snake_case_ : Optional[Any] = SqueezeBertForMaskedLM(config=_A ) model.to(_A ) model.eval() snake_case_ : Optional[Any] = model(_A , attention_mask=_A , labels=_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def UpperCAmelCase_ ( self : Union[str, Any] , _A : Any , _A : Dict , _A : Optional[int] , _A : Any , _A : Optional[Any] , _A : Any ) -> Optional[Any]: """simple docstring""" snake_case_ : Tuple = SqueezeBertForQuestionAnswering(config=_A ) model.to(_A ) model.eval() snake_case_ : List[Any] = model( _A , attention_mask=_A , start_positions=_A , end_positions=_A ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def UpperCAmelCase_ ( self : Union[str, Any] , _A : int , _A : Optional[Any] , _A : Dict , _A : Optional[int] , _A : Tuple , _A : List[Any] ) -> str: """simple docstring""" snake_case_ : Any = self.num_labels snake_case_ : Any = SqueezeBertForSequenceClassification(_A ) model.to(_A ) model.eval() snake_case_ : str = model(_A , attention_mask=_A , labels=_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def UpperCAmelCase_ ( self : Dict , _A : Union[str, Any] , _A : List[Any] , _A : str , _A : List[Any] , _A : Optional[Any] , _A : Dict ) -> Dict: """simple docstring""" snake_case_ : Union[str, Any] = self.num_labels snake_case_ : List[Any] = SqueezeBertForTokenClassification(config=_A ) model.to(_A ) model.eval() snake_case_ : str = model(_A , attention_mask=_A , labels=_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def UpperCAmelCase_ ( self : Union[str, Any] , _A : List[Any] , _A : Tuple , _A : List[Any] , _A : str , _A : str , _A : Union[str, Any] ) -> Tuple: """simple docstring""" snake_case_ : List[str] = self.num_choices snake_case_ : Tuple = SqueezeBertForMultipleChoice(config=_A ) model.to(_A ) model.eval() snake_case_ : List[str] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() snake_case_ : Optional[int] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() snake_case_ : List[str] = model( _A , attention_mask=_A , labels=_A , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def UpperCAmelCase_ ( self : int ) -> List[str]: """simple docstring""" snake_case_ : str = self.prepare_config_and_inputs() ((snake_case_) ,(snake_case_) ,(snake_case_) ,(snake_case_) ,(snake_case_) ,(snake_case_)) : Tuple = config_and_inputs snake_case_ : Dict = {'input_ids': input_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class SCREAMING_SNAKE_CASE_ ( snake_case_ , snake_case_ , unittest.TestCase ): __magic_name__: int = ( ( SqueezeBertModel, SqueezeBertForMaskedLM, SqueezeBertForMultipleChoice, SqueezeBertForQuestionAnswering, SqueezeBertForSequenceClassification, SqueezeBertForTokenClassification, ) if is_torch_available() else None ) __magic_name__: List[str] = ( { "feature-extraction": SqueezeBertModel, "fill-mask": SqueezeBertForMaskedLM, "question-answering": SqueezeBertForQuestionAnswering, "text-classification": SqueezeBertForSequenceClassification, "token-classification": SqueezeBertForTokenClassification, "zero-shot": SqueezeBertForSequenceClassification, } if is_torch_available() else {} ) __magic_name__: List[Any] = False __magic_name__: List[str] = True __magic_name__: Optional[int] = False def UpperCAmelCase_ ( self : Union[str, Any] ) -> Tuple: """simple docstring""" snake_case_ : str = SqueezeBertModelTester(self ) snake_case_ : int = ConfigTester(self , config_class=_A , dim=37 ) def UpperCAmelCase_ ( self : Optional[Any] ) -> Dict: """simple docstring""" self.config_tester.run_common_tests() def UpperCAmelCase_ ( self : Dict ) -> Any: """simple docstring""" snake_case_ : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_model(*_A ) def UpperCAmelCase_ ( self : Any ) -> str: """simple docstring""" snake_case_ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_for_masked_lm(*_A ) def UpperCAmelCase_ ( self : List[Any] ) -> Optional[Any]: """simple docstring""" snake_case_ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_for_question_answering(*_A ) def UpperCAmelCase_ ( self : str ) -> Any: """simple docstring""" snake_case_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_for_sequence_classification(*_A ) def UpperCAmelCase_ ( self : str ) -> int: """simple docstring""" snake_case_ : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_for_token_classification(*_A ) def UpperCAmelCase_ ( self : str ) -> Dict: """simple docstring""" snake_case_ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_for_multiple_choice(*_A ) @slow def UpperCAmelCase_ ( self : int ) -> Dict: """simple docstring""" for model_name in SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: snake_case_ : str = SqueezeBertModel.from_pretrained(_A ) self.assertIsNotNone(_A ) @require_sentencepiece @require_tokenizers @require_torch class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ): @slow def UpperCAmelCase_ ( self : Optional[Any] ) -> List[Any]: """simple docstring""" snake_case_ : Dict = SqueezeBertForSequenceClassification.from_pretrained('squeezebert/squeezebert-mnli' ) snake_case_ : Union[str, Any] = torch.tensor([[1, 29414, 232, 328, 740, 1140, 12695, 69, 13, 1588, 2]] ) snake_case_ : Optional[Any] = model(_A )[0] snake_case_ : Dict = torch.Size((1, 3) ) self.assertEqual(output.shape , _A ) snake_case_ : int = torch.tensor([[0.6_4_0_1, -0.0_3_4_9, -0.6_0_4_1]] ) self.assertTrue(torch.allclose(_A , _A , atol=1E-4 ) )
327
import json import os import tempfile import transformers import datasets from utils import generate_example_dataset, get_duration _SCREAMING_SNAKE_CASE = 50_00_00 _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = os.path.split(__file__) _SCREAMING_SNAKE_CASE = os.path.join(RESULTS_BASEPATH, """results""", RESULTS_FILENAME.replace(""".py""", """.json""")) @get_duration def SCREAMING_SNAKE_CASE__ ( __a , **__a ): snake_case_ : int = dataset.map(**__a ) @get_duration def SCREAMING_SNAKE_CASE__ ( __a , **__a ): snake_case_ : Dict = dataset.filter(**__a ) def SCREAMING_SNAKE_CASE__ ( ): snake_case_ : Tuple = {'num examples': SPEED_TEST_N_EXAMPLES} with tempfile.TemporaryDirectory() as tmp_dir: snake_case_ : Dict = datasets.Features({'text': datasets.Value('string' ), 'numbers': datasets.Value('float32' )} ) snake_case_ : List[Any] = generate_example_dataset( os.path.join(__a , 'dataset.arrow' ) , __a , num_examples=__a ) snake_case_ : str = transformers.AutoTokenizer.from_pretrained('bert-base-cased' , use_fast=__a ) def tokenize(__a ): return tokenizer(examples['text'] ) snake_case_ : Any = map(__a ) snake_case_ : Tuple = map(__a , batched=__a ) snake_case_ : str = map(__a , function=lambda __a : None , batched=__a ) with dataset.formatted_as(type='numpy' ): snake_case_ : Optional[int] = map(__a , function=lambda __a : None , batched=__a ) with dataset.formatted_as(type='pandas' ): snake_case_ : str = map(__a , function=lambda __a : None , batched=__a ) with dataset.formatted_as(type='torch' , columns='numbers' ): snake_case_ : int = map(__a , function=lambda __a : None , batched=__a ) with dataset.formatted_as(type='tensorflow' , columns='numbers' ): snake_case_ : List[Any] = map(__a , function=lambda __a : None , batched=__a ) snake_case_ : int = map(__a , function=__a , batched=__a ) snake_case_ : Optional[Any] = filter(__a ) # Activate later when tokenizer support batched inputs # with dataset.formatted_as(type='numpy'): # times[func.__name__ + " fast-tokenizer batched numpy"] = func(dataset, function=tokenize, batched=True) with open(__a , 'wb' ) as f: f.write(json.dumps(__a ).encode('utf-8' ) ) if __name__ == "__main__": # useful to run the profiler benchmark_map_filter()
327
1
import fire from utils import calculate_rouge, save_json def SCREAMING_SNAKE_CASE__ ( __a , __a , __a=None , **__a ): snake_case_ : Optional[Any] = [x.strip() for x in open(__a ).readlines()] snake_case_ : str = [x.strip() for x in open(__a ).readlines()][: len(__a )] snake_case_ : Dict = calculate_rouge(__a , __a , **__a ) if save_path is not None: save_json(__a , __a , indent=__a ) return metrics # these print nicely if __name__ == "__main__": fire.Fire(calculate_rouge_path)
327
from collections import namedtuple import requests from lxml import html # type: ignore _SCREAMING_SNAKE_CASE = namedtuple("""covid_data""", """cases deaths recovered""") def SCREAMING_SNAKE_CASE__ ( __a = "https://www.worldometers.info/coronavirus/" ): snake_case_ : Union[str, Any] = '//div[@class = "maincounter-number"]/span/text()' return covid_data(*html.fromstring(requests.get(__a ).content ).xpath(__a ) ) _SCREAMING_SNAKE_CASE = """Total COVID-19 cases in the world: {} Total deaths due to COVID-19 in the world: {} Total COVID-19 patients recovered in the world: {}""" print(fmt.format(*covid_stats()))
327
1
import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_squeezebert import SqueezeBertTokenizer _SCREAMING_SNAKE_CASE = logging.get_logger(__name__) _SCREAMING_SNAKE_CASE = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""} _SCREAMING_SNAKE_CASE = { """vocab_file""": { """squeezebert/squeezebert-uncased""": ( """https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt""" ), """squeezebert/squeezebert-mnli""": """https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt""", """squeezebert/squeezebert-mnli-headless""": ( """https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt""" ), }, """tokenizer_file""": { """squeezebert/squeezebert-uncased""": ( """https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json""" ), """squeezebert/squeezebert-mnli""": ( """https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json""" ), """squeezebert/squeezebert-mnli-headless""": ( """https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json""" ), }, } _SCREAMING_SNAKE_CASE = { """squeezebert/squeezebert-uncased""": 5_12, """squeezebert/squeezebert-mnli""": 5_12, """squeezebert/squeezebert-mnli-headless""": 5_12, } _SCREAMING_SNAKE_CASE = { """squeezebert/squeezebert-uncased""": {"""do_lower_case""": True}, """squeezebert/squeezebert-mnli""": {"""do_lower_case""": True}, """squeezebert/squeezebert-mnli-headless""": {"""do_lower_case""": True}, } class SCREAMING_SNAKE_CASE_ ( snake_case_ ): __magic_name__: int = VOCAB_FILES_NAMES __magic_name__: Dict = PRETRAINED_VOCAB_FILES_MAP __magic_name__: int = PRETRAINED_INIT_CONFIGURATION __magic_name__: Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __magic_name__: List[Any] = SqueezeBertTokenizer def __init__( self : List[Any] , _A : Any=None , _A : Dict=None , _A : Dict=True , _A : int="[UNK]" , _A : Union[str, Any]="[SEP]" , _A : int="[PAD]" , _A : List[str]="[CLS]" , _A : Tuple="[MASK]" , _A : Dict=True , _A : Tuple=None , **_A : Any , ) -> str: """simple docstring""" super().__init__( _A , tokenizer_file=_A , do_lower_case=_A , unk_token=_A , sep_token=_A , pad_token=_A , cls_token=_A , mask_token=_A , tokenize_chinese_chars=_A , strip_accents=_A , **_A , ) snake_case_ : Optional[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get('lowercase' , _A ) != do_lower_case or normalizer_state.get('strip_accents' , _A ) != strip_accents or normalizer_state.get('handle_chinese_chars' , _A ) != tokenize_chinese_chars ): snake_case_ : List[str] = getattr(_A , normalizer_state.pop('type' ) ) snake_case_ : Any = do_lower_case snake_case_ : Optional[int] = strip_accents snake_case_ : str = tokenize_chinese_chars snake_case_ : Union[str, Any] = normalizer_class(**_A ) snake_case_ : Optional[Any] = do_lower_case def UpperCAmelCase_ ( self : List[str] , _A : int , _A : Dict=None ) -> str: """simple docstring""" snake_case_ : int = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def UpperCAmelCase_ ( self : List[str] , _A : List[int] , _A : Optional[List[int]] = None ) -> List[int]: """simple docstring""" snake_case_ : int = [self.sep_token_id] snake_case_ : List[Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def UpperCAmelCase_ ( self : List[Any] , _A : str , _A : Optional[str] = None ) -> Tuple[str]: """simple docstring""" snake_case_ : List[Any] = self._tokenizer.model.save(_A , name=_A ) return tuple(_A )
327
import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from .tokenization_lxmert import LxmertTokenizer _SCREAMING_SNAKE_CASE = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""} _SCREAMING_SNAKE_CASE = { """vocab_file""": { """unc-nlp/lxmert-base-uncased""": """https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/vocab.txt""", }, """tokenizer_file""": { """unc-nlp/lxmert-base-uncased""": ( """https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/tokenizer.json""" ), }, } _SCREAMING_SNAKE_CASE = { """unc-nlp/lxmert-base-uncased""": 5_12, } _SCREAMING_SNAKE_CASE = { """unc-nlp/lxmert-base-uncased""": {"""do_lower_case""": True}, } class SCREAMING_SNAKE_CASE_ ( snake_case_ ): __magic_name__: List[Any] = VOCAB_FILES_NAMES __magic_name__: List[str] = PRETRAINED_VOCAB_FILES_MAP __magic_name__: List[str] = PRETRAINED_INIT_CONFIGURATION __magic_name__: Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __magic_name__: Union[str, Any] = LxmertTokenizer def __init__( self : List[str] , _A : Union[str, Any]=None , _A : Optional[Any]=None , _A : Dict=True , _A : Dict="[UNK]" , _A : Optional[int]="[SEP]" , _A : Dict="[PAD]" , _A : Union[str, Any]="[CLS]" , _A : str="[MASK]" , _A : Tuple=True , _A : Dict=None , **_A : List[Any] , ) -> Optional[int]: """simple docstring""" super().__init__( _A , tokenizer_file=_A , do_lower_case=_A , unk_token=_A , sep_token=_A , pad_token=_A , cls_token=_A , mask_token=_A , tokenize_chinese_chars=_A , strip_accents=_A , **_A , ) snake_case_ : Optional[int] = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get('lowercase' , _A ) != do_lower_case or normalizer_state.get('strip_accents' , _A ) != strip_accents or normalizer_state.get('handle_chinese_chars' , _A ) != tokenize_chinese_chars ): snake_case_ : Tuple = getattr(_A , normalizer_state.pop('type' ) ) snake_case_ : Union[str, Any] = do_lower_case snake_case_ : int = strip_accents snake_case_ : Optional[Any] = tokenize_chinese_chars snake_case_ : List[Any] = normalizer_class(**_A ) snake_case_ : Tuple = do_lower_case def UpperCAmelCase_ ( self : Dict , _A : Any , _A : List[Any]=None ) -> Dict: """simple docstring""" snake_case_ : Optional[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def UpperCAmelCase_ ( self : Optional[Any] , _A : List[int] , _A : Optional[List[int]] = None ) -> List[int]: """simple docstring""" snake_case_ : str = [self.sep_token_id] snake_case_ : Dict = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def UpperCAmelCase_ ( self : Optional[int] , _A : str , _A : Optional[str] = None ) -> Tuple[str]: """simple docstring""" snake_case_ : Union[str, Any] = self._tokenizer.model.save(_A , name=_A ) return tuple(_A )
327
1
import numpy as np from nltk.translate import meteor_score import datasets from datasets.config import importlib_metadata, version _SCREAMING_SNAKE_CASE = version.parse(importlib_metadata.version("""nltk""")) if NLTK_VERSION >= version.Version("""3.6.4"""): from nltk import word_tokenize _SCREAMING_SNAKE_CASE = """\ @inproceedings{banarjee2005, title = {{METEOR}: An Automatic Metric for {MT} Evaluation with Improved Correlation with Human Judgments}, author = {Banerjee, Satanjeev and Lavie, Alon}, booktitle = {Proceedings of the {ACL} Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summarization}, month = jun, year = {2005}, address = {Ann Arbor, Michigan}, publisher = {Association for Computational Linguistics}, url = {https://www.aclweb.org/anthology/W05-0909}, pages = {65--72}, } """ _SCREAMING_SNAKE_CASE = """\ METEOR, an automatic metric for machine translation evaluation that is based on a generalized concept of unigram matching between the machine-produced translation and human-produced reference translations. Unigrams can be matched based on their surface forms, stemmed forms, and meanings; furthermore, METEOR can be easily extended to include more advanced matching strategies. Once all generalized unigram matches between the two strings have been found, METEOR computes a score for this matching using a combination of unigram-precision, unigram-recall, and a measure of fragmentation that is designed to directly capture how well-ordered the matched words in the machine translation are in relation to the reference. METEOR gets an R correlation value of 0.347 with human evaluation on the Arabic data and 0.331 on the Chinese data. This is shown to be an improvement on using simply unigram-precision, unigram-recall and their harmonic F1 combination. """ _SCREAMING_SNAKE_CASE = """ Computes METEOR score of translated segments against one or more references. Args: predictions: list of predictions to score. Each prediction should be a string with tokens separated by spaces. references: list of reference for each prediction. Each reference should be a string with tokens separated by spaces. alpha: Parameter for controlling relative weights of precision and recall. default: 0.9 beta: Parameter for controlling shape of penalty as a function of fragmentation. default: 3 gamma: Relative weight assigned to fragmentation penalty. default: 0.5 Returns: 'meteor': meteor score. Examples: >>> meteor = datasets.load_metric('meteor') >>> predictions = [\"It is a guide to action which ensures that the military always obeys the commands of the party\"] >>> references = [\"It is a guide to action that ensures that the military will forever heed Party commands\"] >>> results = meteor.compute(predictions=predictions, references=references) >>> print(round(results[\"meteor\"], 4)) 0.6944 """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class SCREAMING_SNAKE_CASE_ ( datasets.Metric ): def UpperCAmelCase_ ( self : Optional[int] ) -> int: """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { 'predictions': datasets.Value('string' , id='sequence' ), 'references': datasets.Value('string' , id='sequence' ), } ) , codebase_urls=['https://github.com/nltk/nltk/blob/develop/nltk/translate/meteor_score.py'] , reference_urls=[ 'https://www.nltk.org/api/nltk.translate.html#module-nltk.translate.meteor_score', 'https://en.wikipedia.org/wiki/METEOR', ] , ) def UpperCAmelCase_ ( self : Tuple , _A : Union[str, Any] ) -> Optional[int]: """simple docstring""" import nltk nltk.download('wordnet' ) if NLTK_VERSION >= version.Version('3.6.5' ): nltk.download('punkt' ) if NLTK_VERSION >= version.Version('3.6.6' ): nltk.download('omw-1.4' ) def UpperCAmelCase_ ( self : int , _A : Any , _A : Union[str, Any] , _A : List[str]=0.9 , _A : Tuple=3 , _A : Dict=0.5 ) -> Union[str, Any]: """simple docstring""" if NLTK_VERSION >= version.Version('3.6.5' ): snake_case_ : Union[str, Any] = [ meteor_score.single_meteor_score( word_tokenize(_A ) , word_tokenize(_A ) , alpha=_A , beta=_A , gamma=_A ) for ref, pred in zip(_A , _A ) ] else: snake_case_ : List[Any] = [ meteor_score.single_meteor_score(_A , _A , alpha=_A , beta=_A , gamma=_A ) for ref, pred in zip(_A , _A ) ] return {"meteor": np.mean(_A )}
327
def SCREAMING_SNAKE_CASE__ ( __a ): if not isinstance(__a , __a ): snake_case_ : int = f"""Input value of [number={number}] must be an integer""" raise TypeError(__a ) if number < 0: return False snake_case_ : Dict = number * number while number > 0: if number % 10 != number_square % 10: return False number //= 10 number_square //= 10 return True if __name__ == "__main__": import doctest doctest.testmod()
327
1
def SCREAMING_SNAKE_CASE__ ( __a , __a ): return (pointa[0] - pointa[0]) ** 2 + (pointa[1] - pointa[1]) ** 2 def SCREAMING_SNAKE_CASE__ ( __a , __a=0 ): return sorted(__a , key=lambda __a : x[column] ) def SCREAMING_SNAKE_CASE__ ( __a , __a , __a=float('inf' ) ): for i in range(points_counts - 1 ): for j in range(i + 1 , __a ): snake_case_ : Union[str, Any] = euclidean_distance_sqr(points[i] , points[j] ) if current_dis < min_dis: snake_case_ : Tuple = current_dis return min_dis def SCREAMING_SNAKE_CASE__ ( __a , __a , __a=float('inf' ) ): for i in range(min(6 , points_counts - 1 ) , __a ): for j in range(max(0 , i - 6 ) , __a ): snake_case_ : Optional[Any] = euclidean_distance_sqr(points[i] , points[j] ) if current_dis < min_dis: snake_case_ : Union[str, Any] = current_dis return min_dis def SCREAMING_SNAKE_CASE__ ( __a , __a , __a ): # base case if points_counts <= 3: return dis_between_closest_pair(__a , __a ) # recursion snake_case_ : Any = points_counts // 2 snake_case_ : Dict = closest_pair_of_points_sqr( __a , points_sorted_on_y[:mid] , __a ) snake_case_ : str = closest_pair_of_points_sqr( __a , points_sorted_on_y[mid:] , points_counts - mid ) snake_case_ : Union[str, Any] = min(__a , __a ) snake_case_ : Tuple = [] for point in points_sorted_on_x: if abs(point[0] - points_sorted_on_x[mid][0] ) < closest_pair_dis: cross_strip.append(__a ) snake_case_ : List[str] = dis_between_closest_in_strip( __a , len(__a ) , __a ) return min(__a , __a ) def SCREAMING_SNAKE_CASE__ ( __a , __a ): snake_case_ : Tuple = column_based_sort(__a , column=0 ) snake_case_ : Dict = column_based_sort(__a , column=1 ) return ( closest_pair_of_points_sqr( __a , __a , __a ) ) ** 0.5 if __name__ == "__main__": _SCREAMING_SNAKE_CASE = [(2, 3), (12, 30), (40, 50), (5, 1), (12, 10), (3, 4)] print("""Distance:""", closest_pair_of_points(points, len(points)))
327
from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _SCREAMING_SNAKE_CASE = { """configuration_autoformer""": [ """AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """AutoformerConfig""", ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _SCREAMING_SNAKE_CASE = [ """AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""", """AutoformerForPrediction""", """AutoformerModel""", """AutoformerPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_autoformer import ( AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, AutoformerConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_autoformer import ( AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, AutoformerForPrediction, AutoformerModel, AutoformerPreTrainedModel, ) else: import sys _SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
327
1
import argparse import requests import torch # pip3 install salesforce-lavis # I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis from lavis.models import load_model_and_preprocess from PIL import Image from transformers import ( AutoTokenizer, BlipaConfig, BlipaForConditionalGeneration, BlipaProcessor, BlipaVisionConfig, BlipImageProcessor, OPTConfig, TaConfig, ) from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD def SCREAMING_SNAKE_CASE__ ( ): snake_case_ : List[str] = 'https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png' snake_case_ : str = Image.open(requests.get(__a , stream=__a ).raw ).convert('RGB' ) return image def SCREAMING_SNAKE_CASE__ ( __a ): snake_case_ : Union[str, Any] = [] # fmt: off # vision encoder rename_keys.append(('visual_encoder.cls_token', 'vision_model.embeddings.class_embedding') ) rename_keys.append(('visual_encoder.pos_embed', 'vision_model.embeddings.position_embedding') ) rename_keys.append(('visual_encoder.patch_embed.proj.weight', 'vision_model.embeddings.patch_embedding.weight') ) rename_keys.append(('visual_encoder.patch_embed.proj.bias', 'vision_model.embeddings.patch_embedding.bias') ) rename_keys.append(('ln_vision.weight', 'vision_model.post_layernorm.weight') ) rename_keys.append(('ln_vision.bias', 'vision_model.post_layernorm.bias') ) for i in range(config.vision_config.num_hidden_layers ): rename_keys.append((f"""visual_encoder.blocks.{i}.norm1.weight""", f"""vision_model.encoder.layers.{i}.layer_norm1.weight""") ) rename_keys.append((f"""visual_encoder.blocks.{i}.norm1.bias""", f"""vision_model.encoder.layers.{i}.layer_norm1.bias""") ) rename_keys.append((f"""visual_encoder.blocks.{i}.norm2.weight""", f"""vision_model.encoder.layers.{i}.layer_norm2.weight""") ) rename_keys.append((f"""visual_encoder.blocks.{i}.norm2.bias""", f"""vision_model.encoder.layers.{i}.layer_norm2.bias""") ) rename_keys.append((f"""visual_encoder.blocks.{i}.attn.qkv.weight""", f"""vision_model.encoder.layers.{i}.self_attn.qkv.weight""") ) rename_keys.append((f"""visual_encoder.blocks.{i}.attn.proj.weight""", f"""vision_model.encoder.layers.{i}.self_attn.projection.weight""",) ) rename_keys.append((f"""visual_encoder.blocks.{i}.attn.proj.bias""", f"""vision_model.encoder.layers.{i}.self_attn.projection.bias""") ) rename_keys.append((f"""visual_encoder.blocks.{i}.mlp.fc1.weight""", f"""vision_model.encoder.layers.{i}.mlp.fc1.weight""") ) rename_keys.append((f"""visual_encoder.blocks.{i}.mlp.fc1.bias""", f"""vision_model.encoder.layers.{i}.mlp.fc1.bias""") ) rename_keys.append((f"""visual_encoder.blocks.{i}.mlp.fc2.weight""", f"""vision_model.encoder.layers.{i}.mlp.fc2.weight""") ) rename_keys.append((f"""visual_encoder.blocks.{i}.mlp.fc2.bias""", f"""vision_model.encoder.layers.{i}.mlp.fc2.bias""") ) # QFormer rename_keys.append(('Qformer.bert.embeddings.LayerNorm.weight', 'qformer.layernorm.weight') ) rename_keys.append(('Qformer.bert.embeddings.LayerNorm.bias', 'qformer.layernorm.bias') ) # fmt: on return rename_keys def SCREAMING_SNAKE_CASE__ ( __a , __a , __a ): snake_case_ : Optional[Any] = dct.pop(__a ) snake_case_ : Optional[Any] = val def SCREAMING_SNAKE_CASE__ ( __a , __a ): for i in range(config.vision_config.num_hidden_layers ): # read in original q and v biases snake_case_ : List[Any] = state_dict.pop(f"""visual_encoder.blocks.{i}.attn.q_bias""" ) snake_case_ : Any = state_dict.pop(f"""visual_encoder.blocks.{i}.attn.v_bias""" ) # next, set bias in the state dict snake_case_ : Optional[Any] = torch.cat((q_bias, torch.zeros_like(__a , requires_grad=__a ), v_bias) ) snake_case_ : int = qkv_bias def SCREAMING_SNAKE_CASE__ ( __a , __a ): snake_case_ : Dict = 3_64 if 'coco' in model_name else 2_24 snake_case_ : Union[str, Any] = BlipaVisionConfig(image_size=__a ).to_dict() # make sure the models have proper bos_token_id and eos_token_id set (important for generation) # seems like flan-T5 models don't have bos_token_id properly set? if "opt-2.7b" in model_name: snake_case_ : Dict = OPTConfig.from_pretrained('facebook/opt-2.7b' , eos_token_id=__a ).to_dict() elif "opt-6.7b" in model_name: snake_case_ : str = OPTConfig.from_pretrained('facebook/opt-6.7b' , eos_token_id=__a ).to_dict() elif "t5-xl" in model_name: snake_case_ : Union[str, Any] = TaConfig.from_pretrained('google/flan-t5-xl' , dense_act_fn='gelu' , bos_token_id=1 ).to_dict() elif "t5-xxl" in model_name: snake_case_ : List[str] = TaConfig.from_pretrained('google/flan-t5-xxl' , dense_act_fn='gelu' , bos_token_id=1 ).to_dict() snake_case_ : Tuple = BlipaConfig(vision_config=__a , text_config=__a ) return config, image_size @torch.no_grad() def SCREAMING_SNAKE_CASE__ ( __a , __a=None , __a=False ): snake_case_ : Optional[Any] = ( AutoTokenizer.from_pretrained('facebook/opt-2.7b' ) if 'opt' in model_name else AutoTokenizer.from_pretrained('google/flan-t5-xl' ) ) snake_case_ : Optional[int] = tokenizer('\n' , add_special_tokens=__a ).input_ids[0] snake_case_ ,snake_case_ : str = get_blipa_config(__a , eos_token_id=__a ) snake_case_ : Dict = BlipaForConditionalGeneration(__a ).eval() snake_case_ : Dict = { 'blip2-opt-2.7b': ('blip2_opt', 'pretrain_opt2.7b'), 'blip2-opt-6.7b': ('blip2_opt', 'pretrain_opt6.7b'), 'blip2-opt-2.7b-coco': ('blip2_opt', 'caption_coco_opt2.7b'), 'blip2-opt-6.7b-coco': ('blip2_opt', 'caption_coco_opt6.7b'), 'blip2-flan-t5-xl': ('blip2_t5', 'pretrain_flant5xl'), 'blip2-flan-t5-xl-coco': ('blip2_t5', 'caption_coco_flant5xl'), 'blip2-flan-t5-xxl': ('blip2_t5', 'pretrain_flant5xxl'), } snake_case_ ,snake_case_ : List[str] = model_name_to_original[model_name] # load original model print('Loading original model...' ) snake_case_ : List[str] = 'cuda' if torch.cuda.is_available() else 'cpu' snake_case_ ,snake_case_ ,snake_case_ : Optional[int] = load_model_and_preprocess( name=__a , model_type=__a , is_eval=__a , device=__a ) original_model.eval() print('Done!' ) # update state dict keys snake_case_ : str = original_model.state_dict() snake_case_ : List[str] = create_rename_keys(__a ) for src, dest in rename_keys: rename_key(__a , __a , __a ) # some keys can be renamed efficiently for key, val in state_dict.copy().items(): snake_case_ : List[str] = state_dict.pop(__a ) if key.startswith('Qformer.bert' ): snake_case_ : Any = key.replace('Qformer.bert' , 'qformer' ) if "attention.self" in key: snake_case_ : List[Any] = key.replace('self' , 'attention' ) if "opt_proj" in key: snake_case_ : int = key.replace('opt_proj' , 'language_projection' ) if "t5_proj" in key: snake_case_ : Dict = key.replace('t5_proj' , 'language_projection' ) if key.startswith('opt' ): snake_case_ : int = key.replace('opt' , 'language' ) if key.startswith('t5' ): snake_case_ : List[Any] = key.replace('t5' , 'language' ) snake_case_ : Optional[Any] = val # read in qv biases read_in_q_v_bias(__a , __a ) snake_case_ ,snake_case_ : List[str] = hf_model.load_state_dict(__a , strict=__a ) assert len(__a ) == 0 assert unexpected_keys == ["qformer.embeddings.position_ids"] snake_case_ : Dict = load_demo_image() snake_case_ : str = vis_processors['eval'](__a ).unsqueeze(0 ).to(__a ) snake_case_ : Optional[int] = tokenizer(['\n'] , return_tensors='pt' ).input_ids.to(__a ) # create processor snake_case_ : Dict = BlipImageProcessor( size={'height': image_size, 'width': image_size} , image_mean=__a , image_std=__a ) snake_case_ : str = BlipaProcessor(image_processor=__a , tokenizer=__a ) snake_case_ : Tuple = processor(images=__a , return_tensors='pt' ).pixel_values.to(__a ) # make sure processor creates exact same pixel values assert torch.allclose(__a , __a ) original_model.to(__a ) hf_model.to(__a ) with torch.no_grad(): if "opt" in model_name: snake_case_ : Optional[Any] = original_model({'image': original_pixel_values, 'text_input': ['']} ).logits snake_case_ : Dict = hf_model(__a , __a ).logits else: snake_case_ : Optional[Any] = original_model( {'image': original_pixel_values, 'text_input': ['\n'], 'text_output': ['\n']} ).logits snake_case_ : Union[str, Any] = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -1_00 ) snake_case_ : List[str] = hf_model(__a , __a , labels=__a ).logits assert original_logits.shape == logits.shape print('First values of original logits:' , original_logits[0, :3, :3] ) print('First values of HF logits:' , logits[0, :3, :3] ) # assert values if model_name == "blip2-flan-t5-xl": snake_case_ : Optional[int] = torch.tensor( [[-41.5850, -4.4440, -8.9922], [-47.4322, -5.9143, -1.7340]] , device=__a ) assert torch.allclose(logits[0, :3, :3] , __a , atol=1E-4 ) elif model_name == "blip2-flan-t5-xl-coco": snake_case_ : Union[str, Any] = torch.tensor( [[-57.0109, -9.8967, -12.6280], [-68.6578, -12.7191, -10.5065]] , device=__a ) else: # cast to same type snake_case_ : List[str] = logits.dtype assert torch.allclose(original_logits.to(__a ) , __a , atol=1E-2 ) print('Looks ok!' ) print('Generating a caption...' ) snake_case_ : Tuple = '' snake_case_ : List[Any] = tokenizer(__a , return_tensors='pt' ).input_ids.to(__a ) snake_case_ : Any = original_model.generate({'image': original_pixel_values} ) snake_case_ : Any = hf_model.generate( __a , __a , do_sample=__a , num_beams=5 , max_length=30 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , ) print('Original generation:' , __a ) snake_case_ : Tuple = input_ids.shape[1] snake_case_ : Dict = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=__a ) snake_case_ : int = [text.strip() for text in output_text] print('HF generation:' , __a ) if pytorch_dump_folder_path is not None: processor.save_pretrained(__a ) hf_model.save_pretrained(__a ) if push_to_hub: processor.push_to_hub(f"""nielsr/{model_name}""" ) hf_model.push_to_hub(f"""nielsr/{model_name}""" ) if __name__ == "__main__": _SCREAMING_SNAKE_CASE = argparse.ArgumentParser() _SCREAMING_SNAKE_CASE = [ """blip2-opt-2.7b""", """blip2-opt-6.7b""", """blip2-opt-2.7b-coco""", """blip2-opt-6.7b-coco""", """blip2-flan-t5-xl""", """blip2-flan-t5-xl-coco""", """blip2-flan-t5-xxl""", ] parser.add_argument( """--model_name""", default="""blip2-opt-2.7b""", choices=choices, type=str, help="""Path to hf config.json of model to convert""", ) parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether to push the model and processor to the hub after converting""", ) _SCREAMING_SNAKE_CASE = parser.parse_args() convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
327
from typing import Dict from .base import GenericTensor, Pipeline class SCREAMING_SNAKE_CASE_ ( snake_case_ ): def UpperCAmelCase_ ( self : str , _A : Optional[Any]=None , _A : List[str]=None , _A : Optional[Any]=None , **_A : List[str] ) -> Any: """simple docstring""" if tokenize_kwargs is None: snake_case_ : Optional[Any] = {} if truncation is not None: if "truncation" in tokenize_kwargs: raise ValueError( 'truncation parameter defined twice (given as keyword argument as well as in tokenize_kwargs)' ) snake_case_ : int = truncation snake_case_ : Optional[int] = tokenize_kwargs snake_case_ : Dict = {} if return_tensors is not None: snake_case_ : Union[str, Any] = return_tensors return preprocess_params, {}, postprocess_params def UpperCAmelCase_ ( self : Optional[int] , _A : int , **_A : Any ) -> Dict[str, GenericTensor]: """simple docstring""" snake_case_ : Dict = self.framework snake_case_ : Any = self.tokenizer(_A , return_tensors=_A , **_A ) return model_inputs def UpperCAmelCase_ ( self : Optional[Any] , _A : List[str] ) -> int: """simple docstring""" snake_case_ : Tuple = self.model(**_A ) return model_outputs def UpperCAmelCase_ ( self : Union[str, Any] , _A : str , _A : str=False ) -> Any: """simple docstring""" if return_tensors: return model_outputs[0] if self.framework == "pt": return model_outputs[0].tolist() elif self.framework == "tf": return model_outputs[0].numpy().tolist() def __call__( self : List[str] , *_A : Union[str, Any] , **_A : Tuple ) -> List[str]: """simple docstring""" return super().__call__(*_A , **_A )
327
1
from __future__ import annotations import unittest from transformers import is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow if is_tf_available(): import numpy as np import tensorflow as tf from transformers import TFCamembertModel @require_tf @require_sentencepiece @require_tokenizers class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ): @slow def UpperCAmelCase_ ( self : Optional[Any] ) -> int: """simple docstring""" snake_case_ : List[str] = TFCamembertModel.from_pretrained('jplu/tf-camembert-base' ) snake_case_ : List[str] = tf.convert_to_tensor( [[5, 121, 11, 660, 16, 730, 25543, 110, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !" snake_case_ : int = model(_A )['last_hidden_state'] snake_case_ : int = tf.TensorShape((1, 10, 768) ) self.assertEqual(output.shape , _A ) # compare the actual values for a slice. snake_case_ : Tuple = tf.convert_to_tensor( [[[-0.0_2_5_4, 0.0_2_3_5, 0.1_0_2_7], [0.0_6_0_6, -0.1_8_1_1, -0.0_4_1_8], [-0.1_5_6_1, -0.1_1_2_7, 0.2_6_8_7]]] , dtype=tf.floataa , ) # camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0') # camembert.eval() # expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach() self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
327
from itertools import permutations def SCREAMING_SNAKE_CASE__ ( __a ): if num[3] % 2 != 0: return False if (num[2] + num[3] + num[4]) % 3 != 0: return False if num[5] % 5 != 0: return False snake_case_ : Any = [7, 11, 13, 17] for i, test in enumerate(__a ): if (num[i + 4] * 1_00 + num[i + 5] * 10 + num[i + 6]) % test != 0: return False return True def SCREAMING_SNAKE_CASE__ ( __a = 10 ): return sum( int(''.join(map(__a , __a ) ) ) for num in permutations(range(__a ) ) if is_substring_divisible(__a ) ) if __name__ == "__main__": print(F'''{solution() = }''')
327
1
import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from .tokenization_lxmert import LxmertTokenizer _SCREAMING_SNAKE_CASE = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""} _SCREAMING_SNAKE_CASE = { """vocab_file""": { """unc-nlp/lxmert-base-uncased""": """https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/vocab.txt""", }, """tokenizer_file""": { """unc-nlp/lxmert-base-uncased""": ( """https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/tokenizer.json""" ), }, } _SCREAMING_SNAKE_CASE = { """unc-nlp/lxmert-base-uncased""": 5_12, } _SCREAMING_SNAKE_CASE = { """unc-nlp/lxmert-base-uncased""": {"""do_lower_case""": True}, } class SCREAMING_SNAKE_CASE_ ( snake_case_ ): __magic_name__: List[Any] = VOCAB_FILES_NAMES __magic_name__: List[str] = PRETRAINED_VOCAB_FILES_MAP __magic_name__: List[str] = PRETRAINED_INIT_CONFIGURATION __magic_name__: Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __magic_name__: Union[str, Any] = LxmertTokenizer def __init__( self : List[str] , _A : Union[str, Any]=None , _A : Optional[Any]=None , _A : Dict=True , _A : Dict="[UNK]" , _A : Optional[int]="[SEP]" , _A : Dict="[PAD]" , _A : Union[str, Any]="[CLS]" , _A : str="[MASK]" , _A : Tuple=True , _A : Dict=None , **_A : List[Any] , ) -> Optional[int]: """simple docstring""" super().__init__( _A , tokenizer_file=_A , do_lower_case=_A , unk_token=_A , sep_token=_A , pad_token=_A , cls_token=_A , mask_token=_A , tokenize_chinese_chars=_A , strip_accents=_A , **_A , ) snake_case_ : Optional[int] = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get('lowercase' , _A ) != do_lower_case or normalizer_state.get('strip_accents' , _A ) != strip_accents or normalizer_state.get('handle_chinese_chars' , _A ) != tokenize_chinese_chars ): snake_case_ : Tuple = getattr(_A , normalizer_state.pop('type' ) ) snake_case_ : Union[str, Any] = do_lower_case snake_case_ : int = strip_accents snake_case_ : Optional[Any] = tokenize_chinese_chars snake_case_ : List[Any] = normalizer_class(**_A ) snake_case_ : Tuple = do_lower_case def UpperCAmelCase_ ( self : Dict , _A : Any , _A : List[Any]=None ) -> Dict: """simple docstring""" snake_case_ : Optional[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def UpperCAmelCase_ ( self : Optional[Any] , _A : List[int] , _A : Optional[List[int]] = None ) -> List[int]: """simple docstring""" snake_case_ : str = [self.sep_token_id] snake_case_ : Dict = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def UpperCAmelCase_ ( self : Optional[int] , _A : str , _A : Optional[str] = None ) -> Tuple[str]: """simple docstring""" snake_case_ : Union[str, Any] = self._tokenizer.model.save(_A , name=_A ) return tuple(_A )
327
from __future__ import annotations from collections import namedtuple def SCREAMING_SNAKE_CASE__ ( __a , __a , __a ): snake_case_ : Any = namedtuple('result' , 'name value' ) if (voltage, current, power).count(0 ) != 1: raise ValueError('Only one argument must be 0' ) elif power < 0: raise ValueError( 'Power cannot be negative in any electrical/electronics system' ) elif voltage == 0: return result('voltage' , power / current ) elif current == 0: return result('current' , power / voltage ) elif power == 0: return result('power' , float(round(abs(voltage * current ) , 2 ) ) ) else: raise ValueError('Exactly one argument must be 0' ) if __name__ == "__main__": import doctest doctest.testmod()
327
1
import json import os import re import shutil import tempfile import unittest from typing import Tuple from transformers import AddedToken, BatchEncoding, ByTaTokenizer from transformers.utils import cached_property, is_tf_available, is_torch_available from ...test_tokenization_common import TokenizerTesterMixin if is_torch_available(): _SCREAMING_SNAKE_CASE = """pt""" elif is_tf_available(): _SCREAMING_SNAKE_CASE = """tf""" else: _SCREAMING_SNAKE_CASE = """jax""" class SCREAMING_SNAKE_CASE_ ( snake_case_ , unittest.TestCase ): __magic_name__: Any = ByTaTokenizer __magic_name__: List[Any] = False def UpperCAmelCase_ ( self : str ) -> Tuple: """simple docstring""" super().setUp() snake_case_ : str = ByTaTokenizer() tokenizer.save_pretrained(self.tmpdirname ) @cached_property def UpperCAmelCase_ ( self : Optional[Any] ) -> Dict: """simple docstring""" return ByTaTokenizer.from_pretrained('google/byt5-small' ) def UpperCAmelCase_ ( self : Union[str, Any] , **_A : Union[str, Any] ) -> ByTaTokenizer: """simple docstring""" return self.tokenizer_class.from_pretrained(self.tmpdirname , **_A ) def UpperCAmelCase_ ( self : Optional[Any] , _A : int , _A : List[str]=False , _A : int=20 , _A : List[Any]=5 ) -> Tuple[str, list]: """simple docstring""" snake_case_ : Optional[int] = [] for i in range(len(_A ) ): try: snake_case_ : List[str] = tokenizer.decode([i] , clean_up_tokenization_spaces=_A ) except UnicodeDecodeError: pass toks.append((i, tok) ) snake_case_ : Union[str, Any] = list(filter(lambda _A : re.match(R'^[ a-zA-Z]+$' , t[1] ) , _A ) ) snake_case_ : str = list(filter(lambda _A : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=_A ) , _A ) ) if max_length is not None and len(_A ) > max_length: snake_case_ : int = toks[:max_length] if min_length is not None and len(_A ) < min_length and len(_A ) > 0: while len(_A ) < min_length: snake_case_ : List[Any] = toks + toks # toks_str = [t[1] for t in toks] snake_case_ : Union[str, Any] = [t[0] for t in toks] # Ensure consistency snake_case_ : Optional[int] = tokenizer.decode(_A , clean_up_tokenization_spaces=_A ) if " " not in output_txt and len(_A ) > 1: snake_case_ : Union[str, Any] = ( tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=_A ) + ' ' + tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=_A ) ) if with_prefix_space: snake_case_ : Optional[Any] = ' ' + output_txt snake_case_ : List[str] = tokenizer.encode(_A , add_special_tokens=_A ) return output_txt, output_ids def UpperCAmelCase_ ( self : Optional[Any] ) -> Dict: """simple docstring""" snake_case_ : Dict = self.ta_base_tokenizer snake_case_ : Tuple = tokenizer(['hi</s>', 'I went to the gym</s>', '</s>'] ) snake_case_ : Any = tokenizer(['hi', 'I went to the gym', ''] ) self.assertListEqual(batch_with_eos_added['input_ids'] , batch_without_eos_added['input_ids'] ) def UpperCAmelCase_ ( self : List[str] ) -> Any: """simple docstring""" snake_case_ : Any = self.ta_base_tokenizer snake_case_ : int = 'Unicode €.' snake_case_ : Union[str, Any] = tokenizer(_A ) snake_case_ : Union[str, Any] = [88, 113, 108, 102, 114, 103, 104, 35, 229, 133, 175, 49, 1] self.assertEqual(encoded['input_ids'] , _A ) # decoding snake_case_ : Optional[Any] = tokenizer.decode(_A ) self.assertEqual(_A , 'Unicode €.</s>' ) snake_case_ : Optional[int] = tokenizer('e è é ê ë' ) snake_case_ : Any = [104, 35, 198, 171, 35, 198, 172, 35, 198, 173, 35, 198, 174, 1] self.assertEqual(encoded['input_ids'] , _A ) # decoding snake_case_ : Tuple = tokenizer.decode(_A ) self.assertEqual(_A , 'e è é ê ë</s>' ) # encode/decode, but with `encode` instead of `__call__` self.assertEqual(tokenizer.decode(tokenizer.encode('e è é ê ë' ) ) , 'e è é ê ë</s>' ) def UpperCAmelCase_ ( self : str ) -> List[str]: """simple docstring""" snake_case_ : str = self.ta_base_tokenizer snake_case_ : str = ['A long paragraph for summarization.', 'Another paragraph for summarization.'] # fmt: off snake_case_ : List[Any] = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 1, 0] # fmt: on snake_case_ : List[Any] = tokenizer(_A , padding=_A , return_tensors=_A ) self.assertIsInstance(_A , _A ) if FRAMEWORK != "jax": snake_case_ : Optional[int] = list(batch.input_ids.numpy()[0] ) else: snake_case_ : List[str] = list(batch.input_ids.tolist()[0] ) self.assertListEqual(_A , _A ) self.assertEqual((2, 37) , batch.input_ids.shape ) self.assertEqual((2, 37) , batch.attention_mask.shape ) def UpperCAmelCase_ ( self : Tuple ) -> Optional[int]: """simple docstring""" snake_case_ : int = self.ta_base_tokenizer snake_case_ : Optional[Any] = ['A long paragraph for summarization.', 'Another paragraph for summarization.'] snake_case_ : Any = tokenizer(_A , padding=_A , return_tensors=_A ) # check if input_ids are returned and no decoder_input_ids self.assertIn('input_ids' , _A ) self.assertIn('attention_mask' , _A ) self.assertNotIn('decoder_input_ids' , _A ) self.assertNotIn('decoder_attention_mask' , _A ) def UpperCAmelCase_ ( self : Optional[Any] ) -> Any: """simple docstring""" snake_case_ : Optional[int] = self.ta_base_tokenizer snake_case_ : Tuple = [ 'Summary of the text.', 'Another summary.', ] snake_case_ : Dict = tokenizer( text_target=_A , max_length=32 , padding='max_length' , truncation=_A , return_tensors=_A ) self.assertEqual(32 , targets['input_ids'].shape[1] ) def UpperCAmelCase_ ( self : List[str] ) -> Dict: """simple docstring""" snake_case_ : Tuple = self.ta_base_tokenizer snake_case_ : Union[str, Any] = ['A long paragraph for summarization. </s>'] snake_case_ : Tuple = ['Summary of the text. </s>'] # fmt: off snake_case_ : Union[str, Any] = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 35, 1] snake_case_ : Optional[int] = [86, 120, 112, 112, 100, 117, 124, 35, 114, 105, 35, 119, 107, 104, 35, 119, 104, 123, 119, 49, 35, 1] # fmt: on snake_case_ : str = tokenizer(_A , text_target=_A ) self.assertEqual(_A , batch['input_ids'][0] ) self.assertEqual(_A , batch['labels'][0] ) def UpperCAmelCase_ ( self : Dict ) -> Optional[int]: """simple docstring""" snake_case_ : Dict = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(F"""{tokenizer.__class__.__name__}""" ): self.assertNotEqual(tokenizer.model_max_length , 42 ) # Now let's start the test snake_case_ : Union[str, Any] = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(F"""{tokenizer.__class__.__name__}""" ): # Isolate this from the other tests because we save additional tokens/etc snake_case_ : Union[str, Any] = tempfile.mkdtemp() snake_case_ : Union[str, Any] = ' He is very happy, UNwant\u00E9d,running' snake_case_ : Optional[int] = tokenizer.encode(_A , add_special_tokens=_A ) tokenizer.save_pretrained(_A ) snake_case_ : List[str] = tokenizer.__class__.from_pretrained(_A ) snake_case_ : Union[str, Any] = after_tokenizer.encode(_A , add_special_tokens=_A ) self.assertListEqual(_A , _A ) shutil.rmtree(_A ) snake_case_ : Tuple = self.get_tokenizers(model_max_length=42 ) for tokenizer in tokenizers: with self.subTest(F"""{tokenizer.__class__.__name__}""" ): # Isolate this from the other tests because we save additional tokens/etc snake_case_ : Union[str, Any] = tempfile.mkdtemp() snake_case_ : Optional[int] = ' He is very happy, UNwant\u00E9d,running' tokenizer.add_tokens(['bim', 'bambam'] ) snake_case_ : Optional[Any] = tokenizer.additional_special_tokens additional_special_tokens.append('new_additional_special_token' ) tokenizer.add_special_tokens({'additional_special_tokens': additional_special_tokens} ) snake_case_ : List[Any] = tokenizer.encode(_A , add_special_tokens=_A ) tokenizer.save_pretrained(_A ) snake_case_ : Union[str, Any] = tokenizer.__class__.from_pretrained(_A ) snake_case_ : Optional[int] = after_tokenizer.encode(_A , add_special_tokens=_A ) self.assertListEqual(_A , _A ) self.assertIn('new_additional_special_token' , after_tokenizer.additional_special_tokens ) self.assertEqual(after_tokenizer.model_max_length , 42 ) snake_case_ : Tuple = tokenizer.__class__.from_pretrained(_A , model_max_length=43 ) self.assertEqual(tokenizer.model_max_length , 43 ) shutil.rmtree(_A ) def UpperCAmelCase_ ( self : Optional[Any] ) -> Tuple: """simple docstring""" snake_case_ : str = [] if self.test_slow_tokenizer: tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) ) if self.test_rust_tokenizer: tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) ) for tokenizer_class, tokenizer_utils in tokenizer_list: with tempfile.TemporaryDirectory() as tmp_dir: tokenizer_utils.save_pretrained(_A ) with open(os.path.join(_A , 'special_tokens_map.json' ) , encoding='utf-8' ) as json_file: snake_case_ : List[str] = json.load(_A ) with open(os.path.join(_A , 'tokenizer_config.json' ) , encoding='utf-8' ) as json_file: snake_case_ : Optional[Any] = json.load(_A ) snake_case_ : Union[str, Any] = [F"""<extra_id_{i}>""" for i in range(125 )] snake_case_ : Union[str, Any] = added_tokens_extra_ids + [ 'an_additional_special_token' ] snake_case_ : Tuple = added_tokens_extra_ids + [ 'an_additional_special_token' ] with open(os.path.join(_A , 'special_tokens_map.json' ) , 'w' , encoding='utf-8' ) as outfile: json.dump(_A , _A ) with open(os.path.join(_A , 'tokenizer_config.json' ) , 'w' , encoding='utf-8' ) as outfile: json.dump(_A , _A ) # the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes # into account the new value of additional_special_tokens given in the "tokenizer_config.json" and # "special_tokens_map.json" files snake_case_ : Optional[Any] = tokenizer_class.from_pretrained( _A , ) self.assertIn( 'an_additional_special_token' , tokenizer_without_change_in_init.additional_special_tokens ) # self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab self.assertEqual( ['an_additional_special_token'] , tokenizer_without_change_in_init.convert_ids_to_tokens( tokenizer_without_change_in_init.convert_tokens_to_ids(['an_additional_special_token'] ) ) , ) # Now we test that we can change the value of additional_special_tokens in the from_pretrained snake_case_ : Tuple = added_tokens_extra_ids + [AddedToken('a_new_additional_special_token' , lstrip=_A )] snake_case_ : int = tokenizer_class.from_pretrained( _A , additional_special_tokens=_A , ) self.assertIn('a_new_additional_special_token' , tokenizer.additional_special_tokens ) self.assertEqual( ['a_new_additional_special_token'] , tokenizer.convert_ids_to_tokens( tokenizer.convert_tokens_to_ids(['a_new_additional_special_token'] ) ) , ) def UpperCAmelCase_ ( self : Dict ) -> Any: """simple docstring""" snake_case_ : List[str] = [] if self.test_slow_tokenizer: tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) ) if self.test_rust_tokenizer: tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) ) for tokenizer_class, tokenizer_utils in tokenizer_list: with tempfile.TemporaryDirectory() as tmp_dir: tokenizer_utils.save_pretrained(_A ) snake_case_ : List[Any] = tokenizer_class.from_pretrained(_A ) self.assertTrue(tokenizer.decode([255] ) == '' ) def UpperCAmelCase_ ( self : int ) -> List[Any]: """simple docstring""" pass def UpperCAmelCase_ ( self : str ) -> Optional[int]: """simple docstring""" pass def UpperCAmelCase_ ( self : Tuple ) -> str: """simple docstring""" pass def UpperCAmelCase_ ( self : str ) -> int: """simple docstring""" pass def UpperCAmelCase_ ( self : str ) -> Optional[Any]: """simple docstring""" snake_case_ : int = self.get_tokenizers(fast=_A , do_lower_case=_A ) for tokenizer in tokenizers: with self.subTest(F"""{tokenizer.__class__.__name__}""" ): snake_case_ : Union[str, Any] = ['t', 'h', 'i', 's', ' ', 'i', 's', ' ', 'a', ' ', 't', 'e', 'x', 't', '</s>'] snake_case_ : Optional[Any] = tokenizer.convert_tokens_to_string(_A ) self.assertIsInstance(_A , _A ) def UpperCAmelCase_ ( self : Any ) -> Dict: """simple docstring""" snake_case_ : str = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(F"""{tokenizer.__class__.__name__}""" ): snake_case_ : Optional[int] = [ 'bos_token', 'eos_token', 'unk_token', 'sep_token', 'pad_token', 'cls_token', 'mask_token', ] snake_case_ : Union[str, Any] = 0 snake_case_ : Optional[Any] = tokenizer.convert_ids_to_tokens( _A , skip_special_tokens=_A ) for attr in attributes_list: setattr(_A , attr + '_id' , _A ) self.assertEqual(getattr(_A , _A ) , _A ) self.assertEqual(getattr(_A , attr + '_id' ) , _A ) setattr(_A , attr + '_id' , _A ) self.assertEqual(getattr(_A , _A ) , _A ) self.assertEqual(getattr(_A , attr + '_id' ) , _A ) setattr(_A , 'additional_special_tokens_ids' , [] ) self.assertListEqual(getattr(_A , 'additional_special_tokens' ) , [] ) self.assertListEqual(getattr(_A , 'additional_special_tokens_ids' ) , [] ) setattr(_A , 'additional_special_tokens_ids' , [token_id_to_test_setters] ) self.assertListEqual(getattr(_A , 'additional_special_tokens' ) , [token_to_test_setters] ) self.assertListEqual(getattr(_A , 'additional_special_tokens_ids' ) , [token_id_to_test_setters] )
327
import re import string import numpy as np import datasets _SCREAMING_SNAKE_CASE = """ Returns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list. """ _SCREAMING_SNAKE_CASE = """ Args: predictions: List of predicted texts. references: List of reference texts. regexes_to_ignore: List, defaults to None. Regex expressions of characters to ignore when calculating the exact matches. Note: these regexes are removed from the input data before the changes based on the options below (e.g. ignore_case, ignore_punctuation, ignore_numbers) are applied. ignore_case: Boolean, defaults to False. If true, turns everything to lowercase so that capitalization differences are ignored. ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before comparing predictions and references. ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before comparing predictions and references. Returns: exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive. Examples: >>> exact_match = datasets.load_metric(\"exact_match\") >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"] >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"] >>> results = exact_match.compute(references=refs, predictions=preds) >>> print(round(results[\"exact_match\"], 1)) 25.0 >>> exact_match = datasets.load_metric(\"exact_match\") >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"] >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"] >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\"], ignore_case=True, ignore_punctuation=True) >>> print(round(results[\"exact_match\"], 1)) 50.0 >>> exact_match = datasets.load_metric(\"exact_match\") >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"] >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"] >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\", \"YELL\"], ignore_case=True, ignore_punctuation=True) >>> print(round(results[\"exact_match\"], 1)) 75.0 >>> exact_match = datasets.load_metric(\"exact_match\") >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"] >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"] >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\", \"YELL\"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True) >>> print(round(results[\"exact_match\"], 1)) 100.0 >>> exact_match = datasets.load_metric(\"exact_match\") >>> refs = [\"The cat sat on the mat.\", \"Theaters are great.\", \"It's like comparing oranges and apples.\"] >>> preds = [\"The cat sat on the mat?\", \"Theaters are great.\", \"It's like comparing apples and oranges.\"] >>> results = exact_match.compute(references=refs, predictions=preds) >>> print(round(results[\"exact_match\"], 1)) 33.3 """ _SCREAMING_SNAKE_CASE = """ """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class SCREAMING_SNAKE_CASE_ ( datasets.Metric ): def UpperCAmelCase_ ( self : Union[str, Any] ) -> Optional[int]: """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { 'predictions': datasets.Value('string' , id='sequence' ), 'references': datasets.Value('string' , id='sequence' ), } ) , reference_urls=[] , ) def UpperCAmelCase_ ( self : int , _A : Tuple , _A : Tuple , _A : str=None , _A : Dict=False , _A : Tuple=False , _A : str=False , ) -> Tuple: """simple docstring""" if regexes_to_ignore is not None: for s in regexes_to_ignore: snake_case_ : List[Any] = np.array([re.sub(_A , '' , _A ) for x in predictions] ) snake_case_ : Optional[Any] = np.array([re.sub(_A , '' , _A ) for x in references] ) else: snake_case_ : Dict = np.asarray(_A ) snake_case_ : Tuple = np.asarray(_A ) if ignore_case: snake_case_ : List[str] = np.char.lower(_A ) snake_case_ : Any = np.char.lower(_A ) if ignore_punctuation: snake_case_ : int = string.punctuation.maketrans('' , '' , string.punctuation ) snake_case_ : Tuple = np.char.translate(_A , table=_A ) snake_case_ : str = np.char.translate(_A , table=_A ) if ignore_numbers: snake_case_ : Optional[int] = string.digits.maketrans('' , '' , string.digits ) snake_case_ : str = np.char.translate(_A , table=_A ) snake_case_ : Union[str, Any] = np.char.translate(_A , table=_A ) snake_case_ : int = predictions == references return {"exact_match": np.mean(_A ) * 100}
327
1
import gc import importlib.metadata import tempfile import unittest from packaging import version from transformers import ( AutoModel, AutoModelForCausalLM, AutoModelForSeqaSeqLM, AutoModelForSequenceClassification, AutoTokenizer, BitsAndBytesConfig, pipeline, ) from transformers.testing_utils import ( is_torch_available, require_accelerate, require_bitsandbytes, require_torch, require_torch_gpu, require_torch_multi_gpu, slow, ) def SCREAMING_SNAKE_CASE__ ( __a ): if model.config.model_type == "gpt2": return model.transformer.h[0].mlp.c_fc return model.transformer.h[0].mlp.dense_ah_to_h if is_torch_available(): import torch import torch.nn as nn class SCREAMING_SNAKE_CASE_ ( nn.Module ): def __init__( self : str , _A : nn.Module , _A : int ) -> List[Any]: """simple docstring""" super().__init__() snake_case_ : List[Any] = module snake_case_ : List[str] = nn.Sequential( nn.Linear(module.in_features , _A , bias=_A ) , nn.Linear(_A , module.out_features , bias=_A ) , ) snake_case_ : List[str] = (2.0 / (5 * min(module.in_features , module.out_features ))) ** 0.5 nn.init.normal_(self.adapter[0].weight , std=_A ) nn.init.zeros_(self.adapter[1].weight ) self.adapter.to(module.weight.device ) def UpperCAmelCase_ ( self : int , _A : Tuple , *_A : int , **_A : Tuple ) -> Optional[int]: """simple docstring""" return self.module(_A , *_A , **_A ) + self.adapter(_A ) @require_bitsandbytes @require_accelerate @require_torch @require_torch_gpu @slow class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ): # We keep the constants inside the init function and model loading inside setUp function # We need to test on relatively large models (aka >1b parameters otherwise the quantiztion may not work as expected) # Therefore here we use only bloom-1b3 to test our module __magic_name__: List[Any] = "bigscience/bloom-1b7" # Constant values __magic_name__: Dict = 2.1_0_9_6_5_9_5_5_2_6_9_2_5_7_4 __magic_name__: List[str] = "Hello my name is" __magic_name__: str = set() EXPECTED_OUTPUTS.add("Hello my name is John and I am a professional photographer. I" ) EXPECTED_OUTPUTS.add("Hello my name is John.\nI am a friend of your father.\n" ) EXPECTED_OUTPUTS.add("Hello my name is John Doe, I am a student at the University" ) __magic_name__: int = 10 def UpperCAmelCase_ ( self : List[Any] ) -> int: """simple docstring""" snake_case_ : Union[str, Any] = AutoTokenizer.from_pretrained(self.model_name ) class SCREAMING_SNAKE_CASE_ ( snake_case_ ): def UpperCAmelCase_ ( self : Any ) -> Union[str, Any]: """simple docstring""" super().setUp() # Models and tokenizer snake_case_ : str = AutoModelForCausalLM.from_pretrained( self.model_name , torch_dtype=torch.floataa , device_map='auto' ) snake_case_ : Tuple = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=_A , device_map='auto' ) def UpperCAmelCase_ ( self : int ) -> Any: """simple docstring""" del self.model_fpaa del self.model_abit gc.collect() torch.cuda.empty_cache() def UpperCAmelCase_ ( self : Optional[int] ) -> List[str]: """simple docstring""" snake_case_ : Optional[Any] = self.model_abit.config self.assertTrue(hasattr(_A , 'quantization_config' ) ) snake_case_ : Optional[Any] = config.to_dict() snake_case_ : Optional[Any] = config.to_diff_dict() snake_case_ : Dict = config.to_json_string() def UpperCAmelCase_ ( self : List[str] ) -> List[Any]: """simple docstring""" from bitsandbytes.nn import Paramsabit snake_case_ : List[str] = self.model_fpaa.get_memory_footprint() snake_case_ : Dict = self.model_abit.get_memory_footprint() self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE ) snake_case_ : Optional[int] = get_some_linear_layer(self.model_abit ) self.assertTrue(linear.weight.__class__ == Paramsabit ) def UpperCAmelCase_ ( self : Optional[Any] ) -> Optional[Any]: """simple docstring""" from transformers import TaPreTrainedModel self.model_fpaa.get_memory_footprint() self.model_abit.get_memory_footprint() for name, module in self.model_abit.named_modules(): if isinstance(_A , torch.nn.Linear ): if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules: # 4-bit parameters are packed in uint8 variables self.assertTrue(module.weight.dtype == torch.uinta ) def UpperCAmelCase_ ( self : List[Any] ) -> Dict: """simple docstring""" snake_case_ : str = self.tokenizer(self.input_text , return_tensors='pt' ) snake_case_ : int = self.model_abit.generate(input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 ) self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=_A ) , self.EXPECTED_OUTPUTS ) def UpperCAmelCase_ ( self : Tuple ) -> Optional[int]: """simple docstring""" snake_case_ : Optional[Any] = BitsAndBytesConfig() snake_case_ : Optional[Any] = True snake_case_ : str = AutoModelForCausalLM.from_pretrained( self.model_name , quantization_config=_A , device_map='auto' ) snake_case_ : Any = self.tokenizer(self.input_text , return_tensors='pt' ) snake_case_ : Any = model_abit_from_config.generate( input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 ) self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=_A ) , self.EXPECTED_OUTPUTS ) def UpperCAmelCase_ ( self : Dict ) -> str: """simple docstring""" with self.assertRaises(_A ), tempfile.TemporaryDirectory() as tmpdirname: self.model_abit.save_pretrained(_A ) def UpperCAmelCase_ ( self : Tuple ) -> Tuple: """simple docstring""" snake_case_ : Optional[Any] = BitsAndBytesConfig() with self.assertRaises(_A ): snake_case_ : List[str] = AutoModelForCausalLM.from_pretrained( self.model_name , quantization_config=_A , load_in_abit=_A , device_map='auto' , bnb_abit_quant_type='nf4' , ) def UpperCAmelCase_ ( self : int ) -> Tuple: """simple docstring""" with self.assertRaises(_A ): # Tries with `str` self.model_abit.to('cpu' ) with self.assertRaises(_A ): # Tries with a `dtype`` self.model_abit.to(torch.floataa ) with self.assertRaises(_A ): # Tries with a `device` self.model_abit.to(torch.device('cuda:0' ) ) with self.assertRaises(_A ): # Tries with a `device` self.model_abit.float() with self.assertRaises(_A ): # Tries with a `device` self.model_abit.half() # Test if we did not break anything snake_case_ : Optional[int] = self.tokenizer(self.input_text , return_tensors='pt' ) snake_case_ : Union[str, Any] = self.model_fpaa.to(torch.floataa ) snake_case_ : Optional[int] = self.model_fpaa.generate(input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 ) # Check this does not throw an error snake_case_ : Union[str, Any] = self.model_fpaa.to('cpu' ) # Check this does not throw an error snake_case_ : Any = self.model_fpaa.half() # Check this does not throw an error snake_case_ : Tuple = self.model_fpaa.float() def UpperCAmelCase_ ( self : Dict ) -> Optional[int]: """simple docstring""" snake_case_ : str = AutoModelForSeqaSeqLM.from_pretrained('t5-small' , load_in_abit=_A , device_map='auto' ) self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa ) @require_bitsandbytes @require_accelerate @require_torch @require_torch_gpu @slow class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ): @classmethod def UpperCAmelCase_ ( cls : Tuple ) -> Tuple: """simple docstring""" snake_case_ : Any = 't5-small' snake_case_ : int = 'google/flan-t5-small' # flan-t5 uses dense-act instead of dense-relu-dense snake_case_ : Any = AutoTokenizer.from_pretrained(cls.model_name ) snake_case_ : Any = 'Translate in German: Hello, my dog is cute' def UpperCAmelCase_ ( self : Optional[Any] ) -> Optional[int]: """simple docstring""" gc.collect() torch.cuda.empty_cache() def UpperCAmelCase_ ( self : str ) -> List[str]: """simple docstring""" from transformers import TaForConditionalGeneration snake_case_ : Union[str, Any] = TaForConditionalGeneration._keep_in_fpaa_modules snake_case_ : str = None # test with `t5-small` snake_case_ : str = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=_A , device_map='auto' ) snake_case_ : Optional[Any] = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 ) snake_case_ : List[str] = model.generate(**_A ) # test with `flan-t5-small` snake_case_ : Optional[int] = TaForConditionalGeneration.from_pretrained( self.dense_act_model_name , load_in_abit=_A , device_map='auto' ) snake_case_ : str = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 ) snake_case_ : List[Any] = model.generate(**_A ) snake_case_ : Tuple = modules def UpperCAmelCase_ ( self : Union[str, Any] ) -> List[str]: """simple docstring""" import bitsandbytes as bnb from transformers import TaForConditionalGeneration # test with `t5-small` snake_case_ : Optional[Any] = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=_A , device_map='auto' ) # there was a bug with decoders - this test checks that it is fixed self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit ) ) snake_case_ : Optional[int] = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 ) snake_case_ : Optional[int] = model.generate(**_A ) # test with `flan-t5-small` snake_case_ : int = TaForConditionalGeneration.from_pretrained( self.dense_act_model_name , load_in_abit=_A , device_map='auto' ) snake_case_ : Any = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 ) snake_case_ : str = model.generate(**_A ) class SCREAMING_SNAKE_CASE_ ( snake_case_ ): def UpperCAmelCase_ ( self : List[Any] ) -> int: """simple docstring""" super().setUp() # model_name snake_case_ : Union[str, Any] = 'bigscience/bloom-560m' snake_case_ : str = 't5-small' # Different types of model snake_case_ : List[Any] = AutoModel.from_pretrained(self.model_name , load_in_abit=_A , device_map='auto' ) # Sequence classification model snake_case_ : Optional[Any] = AutoModelForSequenceClassification.from_pretrained( self.model_name , load_in_abit=_A , device_map='auto' ) # CausalLM model snake_case_ : Tuple = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=_A , device_map='auto' ) # Seq2seq model snake_case_ : List[str] = AutoModelForSeqaSeqLM.from_pretrained( self.seq_to_seq_name , load_in_abit=_A , device_map='auto' ) def UpperCAmelCase_ ( self : int ) -> Optional[int]: """simple docstring""" del self.base_model del self.sequence_model del self.model_abit del self.seq_to_seq_model gc.collect() torch.cuda.empty_cache() def UpperCAmelCase_ ( self : Union[str, Any] ) -> List[Any]: """simple docstring""" from bitsandbytes.nn import Paramsabit self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit ) # Other heads should be nn.Parameter self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter ) self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter ) self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter ) class SCREAMING_SNAKE_CASE_ ( snake_case_ ): def UpperCAmelCase_ ( self : Dict ) -> Dict: """simple docstring""" super().setUp() def UpperCAmelCase_ ( self : List[str] ) -> int: """simple docstring""" del self.pipe gc.collect() torch.cuda.empty_cache() def UpperCAmelCase_ ( self : Any ) -> Union[str, Any]: """simple docstring""" snake_case_ : str = pipeline( 'text-generation' , model=self.model_name , model_kwargs={'device_map': 'auto', 'load_in_4bit': True, 'torch_dtype': torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , ) # Real second forward pass snake_case_ : Tuple = self.pipe(self.input_text ) self.assertIn(pipeline_output[0]['generated_text'] , self.EXPECTED_OUTPUTS ) @require_torch_multi_gpu class SCREAMING_SNAKE_CASE_ ( snake_case_ ): def UpperCAmelCase_ ( self : Union[str, Any] ) -> List[Any]: """simple docstring""" super().setUp() def UpperCAmelCase_ ( self : Dict ) -> str: """simple docstring""" snake_case_ : int = AutoModelForCausalLM.from_pretrained( self.model_name , load_in_abit=_A , device_map='balanced' ) # Check correct device map self.assertEqual(set(model_parallel.hf_device_map.values() ) , {0, 1} ) # Check that inference pass works on the model snake_case_ : int = self.tokenizer(self.input_text , return_tensors='pt' ) # Second real batch snake_case_ : List[str] = model_parallel.generate(input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 ) self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=_A ) , self.EXPECTED_OUTPUTS ) class SCREAMING_SNAKE_CASE_ ( snake_case_ ): def UpperCAmelCase_ ( self : Tuple ) -> int: """simple docstring""" snake_case_ : int = 'facebook/opt-350m' super().setUp() def UpperCAmelCase_ ( self : Dict ) -> List[str]: """simple docstring""" if version.parse(importlib.metadata.version('bitsandbytes' ) ) < version.parse('0.37.0' ): return # Step 1: freeze all parameters snake_case_ : List[str] = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=_A ) self.assertEqual(set(model.hf_device_map.values() ) , {torch.cuda.current_device()} ) for param in model.parameters(): snake_case_ : Any = False # freeze the model - train adapters later if param.ndim == 1: # cast the small parameters (e.g. layernorm) to fp32 for stability snake_case_ : List[str] = param.data.to(torch.floataa ) # Step 2: add adapters for _, module in model.named_modules(): if "OPTAttention" in repr(type(_A ) ): snake_case_ : Dict = LoRALayer(module.q_proj , rank=16 ) snake_case_ : Dict = LoRALayer(module.k_proj , rank=16 ) snake_case_ : Union[str, Any] = LoRALayer(module.v_proj , rank=16 ) # Step 3: dummy batch snake_case_ : Optional[Any] = self.tokenizer('Test batch ' , return_tensors='pt' ).to(0 ) # Step 4: Check if the gradient is not None with torch.cuda.amp.autocast(): snake_case_ : Optional[Any] = model.forward(**_A ) out.logits.norm().backward() for module in model.modules(): if isinstance(_A , _A ): self.assertTrue(module.adapter[1].weight.grad is not None ) self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 ) elif isinstance(_A , nn.Embedding ): self.assertTrue(module.weight.grad is None ) class SCREAMING_SNAKE_CASE_ ( snake_case_ ): __magic_name__: Tuple = "gpt2-xl" __magic_name__: Union[str, Any] = 3.3_1_9_1_8_5_4_8_5_4_1_5_2_1_8_7
327
from __future__ import annotations import copy import inspect import json import math import os import tempfile import unittest from importlib import import_module import numpy as np from transformers import ViTMAEConfig from transformers.file_utils import cached_property, is_tf_available, is_vision_available from transformers.testing_utils import require_tf, require_vision, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFViTMAEForPreTraining, TFViTMAEModel if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class SCREAMING_SNAKE_CASE_ : def __init__( self : List[Any] , _A : Optional[Any] , _A : Dict=13 , _A : Union[str, Any]=30 , _A : Tuple=2 , _A : Union[str, Any]=3 , _A : Optional[int]=True , _A : Optional[Any]=True , _A : str=32 , _A : int=2 , _A : List[str]=4 , _A : List[str]=37 , _A : Tuple="gelu" , _A : Dict=0.1 , _A : Optional[Any]=0.1 , _A : Optional[int]=10 , _A : Optional[int]=0.0_2 , _A : Optional[Any]=3 , _A : str=0.6 , _A : Union[str, Any]=None , ) -> Any: """simple docstring""" snake_case_ : Optional[int] = parent snake_case_ : Tuple = batch_size snake_case_ : List[Any] = image_size snake_case_ : List[str] = patch_size snake_case_ : List[str] = num_channels snake_case_ : Optional[Any] = is_training snake_case_ : Any = use_labels snake_case_ : Tuple = hidden_size snake_case_ : Union[str, Any] = num_hidden_layers snake_case_ : List[Any] = num_attention_heads snake_case_ : Optional[Any] = intermediate_size snake_case_ : List[Any] = hidden_act snake_case_ : Union[str, Any] = hidden_dropout_prob snake_case_ : Any = attention_probs_dropout_prob snake_case_ : Tuple = type_sequence_label_size snake_case_ : List[str] = initializer_range snake_case_ : Optional[Any] = mask_ratio snake_case_ : Any = scope # in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above # (we add 1 for the [CLS] token) snake_case_ : Optional[int] = (image_size // patch_size) ** 2 snake_case_ : str = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) ) def UpperCAmelCase_ ( self : List[str] ) -> Optional[Any]: """simple docstring""" snake_case_ : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) snake_case_ : Union[str, Any] = None if self.use_labels: snake_case_ : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) snake_case_ : Union[str, Any] = self.get_config() return config, pixel_values, labels def UpperCAmelCase_ ( self : int ) -> Optional[Any]: """simple docstring""" return ViTMAEConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , decoder_hidden_size=self.hidden_size , decoder_num_hidden_layers=self.num_hidden_layers , decoder_num_attention_heads=self.num_attention_heads , decoder_intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_A , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , ) def UpperCAmelCase_ ( self : List[Any] , _A : int , _A : Dict , _A : str ) -> Dict: """simple docstring""" snake_case_ : Union[str, Any] = TFViTMAEModel(config=_A ) snake_case_ : str = model(_A , training=_A ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCAmelCase_ ( self : Dict , _A : Dict , _A : Any , _A : List[Any] ) -> int: """simple docstring""" snake_case_ : Any = TFViTMAEForPreTraining(_A ) snake_case_ : Optional[Any] = model(_A , training=_A ) # expected sequence length = num_patches snake_case_ : List[str] = (self.image_size // self.patch_size) ** 2 snake_case_ : Optional[Any] = self.patch_size**2 * self.num_channels self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) ) # test greyscale images snake_case_ : str = 1 snake_case_ : Dict = TFViTMAEForPreTraining(_A ) snake_case_ : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) snake_case_ : List[str] = model(_A , training=_A ) snake_case_ : Optional[Any] = self.patch_size**2 self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) ) def UpperCAmelCase_ ( self : Union[str, Any] ) -> Tuple: """simple docstring""" snake_case_ : List[Any] = self.prepare_config_and_inputs() ((snake_case_) ,(snake_case_) ,(snake_case_)) : Any = config_and_inputs snake_case_ : Optional[Any] = {'pixel_values': pixel_values} return config, inputs_dict @require_tf class SCREAMING_SNAKE_CASE_ ( snake_case_ , snake_case_ , unittest.TestCase ): __magic_name__: List[str] = (TFViTMAEModel, TFViTMAEForPreTraining) if is_tf_available() else () __magic_name__: str = {"feature-extraction": TFViTMAEModel} if is_tf_available() else {} __magic_name__: Dict = False __magic_name__: Dict = False __magic_name__: List[Any] = False __magic_name__: Dict = False def UpperCAmelCase_ ( self : Any ) -> List[Any]: """simple docstring""" snake_case_ : List[Any] = TFViTMAEModelTester(self ) snake_case_ : Tuple = ConfigTester(self , config_class=_A , has_text_modality=_A , hidden_size=37 ) def UpperCAmelCase_ ( self : int ) -> Union[str, Any]: """simple docstring""" self.config_tester.run_common_tests() @unittest.skip(reason='ViTMAE does not use inputs_embeds' ) def UpperCAmelCase_ ( self : Tuple ) -> Union[str, Any]: """simple docstring""" pass def UpperCAmelCase_ ( self : List[str] ) -> Union[str, Any]: """simple docstring""" snake_case_ ,snake_case_ : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: snake_case_ : List[Any] = model_class(_A ) self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) ) snake_case_ : Optional[int] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(_A , tf.keras.layers.Layer ) ) def UpperCAmelCase_ ( self : List[str] ) -> Dict: """simple docstring""" snake_case_ ,snake_case_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: snake_case_ : List[str] = model_class(_A ) snake_case_ : Any = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic snake_case_ : Dict = [*signature.parameters.keys()] snake_case_ : Dict = ['pixel_values'] self.assertListEqual(arg_names[:1] , _A ) def UpperCAmelCase_ ( self : Dict ) -> List[str]: """simple docstring""" snake_case_ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_A ) def UpperCAmelCase_ ( self : List[Any] ) -> List[str]: """simple docstring""" snake_case_ : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*_A ) def UpperCAmelCase_ ( self : Tuple ) -> Dict: """simple docstring""" np.random.seed(2 ) snake_case_ ,snake_case_ : int = self.model_tester.prepare_config_and_inputs_for_common() snake_case_ : Optional[int] = int((config.image_size // config.patch_size) ** 2 ) snake_case_ : Optional[Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) for model_class in self.all_model_classes: snake_case_ : Optional[Any] = model_class(_A ) snake_case_ : Union[str, Any] = self._prepare_for_class(_A , _A ) snake_case_ : List[str] = model(_A , noise=_A ) snake_case_ : Tuple = copy.deepcopy(self._prepare_for_class(_A , _A ) ) snake_case_ : str = model(**_A , noise=_A ) snake_case_ : Union[str, Any] = outputs_dict[0].numpy() snake_case_ : Optional[Any] = outputs_keywords[0].numpy() self.assertLess(np.sum(np.abs(output_dict - output_keywords ) ) , 1E-6 ) def UpperCAmelCase_ ( self : List[Any] ) -> List[Any]: """simple docstring""" np.random.seed(2 ) snake_case_ ,snake_case_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() snake_case_ : Tuple = int((config.image_size // config.patch_size) ** 2 ) snake_case_ : Optional[int] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) def prepare_numpy_arrays(_A : int ): snake_case_ : Any = {} for k, v in inputs_dict.items(): if tf.is_tensor(_A ): snake_case_ : str = v.numpy() else: snake_case_ : Optional[Any] = np.array(_A ) return inputs_np_dict for model_class in self.all_model_classes: snake_case_ : int = model_class(_A ) snake_case_ : List[Any] = self._prepare_for_class(_A , _A ) snake_case_ : Any = prepare_numpy_arrays(_A ) snake_case_ : List[Any] = model(_A , noise=_A ) snake_case_ : List[Any] = model(**_A , noise=_A ) self.assert_outputs_same(_A , _A ) def UpperCAmelCase_ ( self : Tuple , _A : Union[str, Any] , _A : Union[str, Any] , _A : List[Any] ) -> List[str]: """simple docstring""" np.random.seed(2 ) snake_case_ : Optional[int] = int((tf_model.config.image_size // tf_model.config.patch_size) ** 2 ) snake_case_ : Optional[int] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) snake_case_ : Optional[int] = tf.constant(_A ) # Add `noise` argument. # PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument snake_case_ : Optional[Any] = tf_noise super().check_pt_tf_models(_A , _A , _A ) def UpperCAmelCase_ ( self : Optional[Any] ) -> Dict: """simple docstring""" np.random.seed(2 ) snake_case_ ,snake_case_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common() snake_case_ : int = { module_member for model_class in self.all_model_classes for module in (import_module(model_class.__module__ ),) for module_member_name in dir(_A ) if module_member_name.endswith('MainLayer' ) # This condition is required, since `modeling_tf_clip.py` has 3 classes whose names end with `MainLayer`. and module_member_name[: -len('MainLayer' )] == model_class.__name__[: -len('Model' )] for module_member in (getattr(_A , _A ),) if isinstance(_A , _A ) and tf.keras.layers.Layer in module_member.__bases__ and getattr(_A , '_keras_serializable' , _A ) } snake_case_ : List[Any] = int((config.image_size // config.patch_size) ** 2 ) snake_case_ : List[Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) snake_case_ : Optional[int] = tf.convert_to_tensor(_A ) inputs_dict.update({'noise': noise} ) for main_layer_class in tf_main_layer_classes: snake_case_ : Optional[Any] = main_layer_class(_A ) snake_case_ : List[str] = { name: tf.keras.Input(tensor.shape[1:] , dtype=tensor.dtype ) for name, tensor in inputs_dict.items() } snake_case_ : Union[str, Any] = tf.keras.Model(_A , outputs=main_layer(_A ) ) snake_case_ : int = model(_A ) with tempfile.TemporaryDirectory() as tmpdirname: snake_case_ : List[Any] = os.path.join(_A , 'keras_model.h5' ) model.save(_A ) snake_case_ : str = tf.keras.models.load_model( _A , custom_objects={main_layer_class.__name__: main_layer_class} ) assert isinstance(_A , tf.keras.Model ) snake_case_ : List[str] = model(_A ) self.assert_outputs_same(_A , _A ) @slow def UpperCAmelCase_ ( self : Union[str, Any] ) -> Union[str, Any]: """simple docstring""" np.random.seed(2 ) snake_case_ ,snake_case_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() snake_case_ : int = int((config.image_size // config.patch_size) ** 2 ) snake_case_ : int = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) for model_class in self.all_model_classes: snake_case_ : Optional[Any] = model_class(_A ) snake_case_ : Optional[Any] = self._prepare_for_class(_A , _A ) snake_case_ : int = model(_A , noise=_A ) if model_class.__name__ == "TFViTMAEModel": snake_case_ : Any = outputs.last_hidden_state.numpy() snake_case_ : Optional[int] = 0 else: snake_case_ : str = outputs.logits.numpy() snake_case_ : Optional[Any] = 0 with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(_A , saved_model=_A ) snake_case_ : Any = model_class.from_pretrained(_A ) snake_case_ : Any = model(_A , noise=_A ) if model_class.__name__ == "TFViTMAEModel": snake_case_ : Dict = after_outputs['last_hidden_state'].numpy() snake_case_ : Dict = 0 else: snake_case_ : Any = after_outputs['logits'].numpy() snake_case_ : Optional[Any] = 0 snake_case_ : Any = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(_A , 1E-5 ) def UpperCAmelCase_ ( self : Any ) -> str: """simple docstring""" np.random.seed(2 ) snake_case_ ,snake_case_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() snake_case_ : Optional[int] = int((config.image_size // config.patch_size) ** 2 ) snake_case_ : Union[str, Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) for model_class in self.all_model_classes: snake_case_ : str = model_class(_A ) snake_case_ : int = self._prepare_for_class(_A , _A ) snake_case_ : str = model(_A , noise=_A ) snake_case_ : Dict = model.get_config() # make sure that returned config is jsonifiable, which is required by keras json.dumps(_A ) snake_case_ : Any = model_class.from_config(model.get_config() ) # make sure it also accepts a normal config snake_case_ : str = model_class.from_config(model.config ) snake_case_ : Union[str, Any] = new_model(_A ) # Build model new_model.set_weights(model.get_weights() ) snake_case_ : List[str] = new_model(_A , noise=_A ) self.assert_outputs_same(_A , _A ) @unittest.skip( reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.' ) def UpperCAmelCase_ ( self : List[Any] ) -> Optional[int]: """simple docstring""" pass @unittest.skip(reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load' ) def UpperCAmelCase_ ( self : Optional[int] ) -> Tuple: """simple docstring""" pass @slow def UpperCAmelCase_ ( self : Tuple ) -> Tuple: """simple docstring""" snake_case_ : Optional[Any] = TFViTMAEModel.from_pretrained('google/vit-base-patch16-224' ) self.assertIsNotNone(_A ) def SCREAMING_SNAKE_CASE__ ( ): snake_case_ : Optional[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_tf @require_vision class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ): @cached_property def UpperCAmelCase_ ( self : str ) -> Dict: """simple docstring""" return ViTImageProcessor.from_pretrained('facebook/vit-mae-base' ) if is_vision_available() else None @slow def UpperCAmelCase_ ( self : str ) -> Dict: """simple docstring""" np.random.seed(2 ) snake_case_ : List[str] = TFViTMAEForPreTraining.from_pretrained('facebook/vit-mae-base' ) snake_case_ : List[Any] = self.default_image_processor snake_case_ : Dict = prepare_img() snake_case_ : Optional[Any] = image_processor(images=_A , return_tensors='tf' ) # prepare a noise vector that will be also used for testing the TF model # (this way we can ensure that the PT and TF models operate on the same inputs) snake_case_ : int = ViTMAEConfig() snake_case_ : List[Any] = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 ) snake_case_ : List[Any] = np.random.uniform(size=(1, num_patches) ) # forward pass snake_case_ : Optional[Any] = model(**_A , noise=_A ) # verify the logits snake_case_ : Optional[int] = tf.convert_to_tensor([1, 196, 768] ) self.assertEqual(outputs.logits.shape , _A ) snake_case_ : Any = tf.convert_to_tensor( [[-0.0_5_4_8, -1.7_0_2_3, -0.9_3_2_5], [0.3_7_2_1, -0.5_6_7_0, -0.2_2_3_3], [0.8_2_3_5, -1.3_8_7_8, -0.3_5_2_4]] ) tf.debugging.assert_near(outputs.logits[0, :3, :3] , _A , atol=1E-4 )
327
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available _SCREAMING_SNAKE_CASE = { """configuration_groupvit""": [ """GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GroupViTConfig""", """GroupViTOnnxConfig""", """GroupViTTextConfig""", """GroupViTVisionConfig""", ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _SCREAMING_SNAKE_CASE = [ """GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST""", """GroupViTModel""", """GroupViTPreTrainedModel""", """GroupViTTextModel""", """GroupViTVisionModel""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _SCREAMING_SNAKE_CASE = [ """TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFGroupViTModel""", """TFGroupViTPreTrainedModel""", """TFGroupViTTextModel""", """TFGroupViTVisionModel""", ] if TYPE_CHECKING: from .configuration_groupvit import ( GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GroupViTConfig, GroupViTOnnxConfig, GroupViTTextConfig, GroupViTVisionConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_groupvit import ( GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST, GroupViTModel, GroupViTPreTrainedModel, GroupViTTextModel, GroupViTVisionModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_groupvit import ( TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST, TFGroupViTModel, TFGroupViTPreTrainedModel, TFGroupViTTextModel, TFGroupViTVisionModel, ) else: import sys _SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
327
from __future__ import annotations def SCREAMING_SNAKE_CASE__ ( __a , __a ): snake_case_ : list[list[int]] = [] snake_case_ : list[int] = [] snake_case_ : List[Any] = 0 snake_case_ : Union[str, Any] = sum(__a ) create_state_space_tree(__a , __a , __a , __a , __a , __a ) return result def SCREAMING_SNAKE_CASE__ ( __a , __a , __a , __a , __a , __a , ): if sum(__a ) > max_sum or (remaining_nums_sum + sum(__a )) < max_sum: return if sum(__a ) == max_sum: result.append(__a ) return for index in range(__a , len(__a ) ): create_state_space_tree( __a , __a , index + 1 , [*path, nums[index]] , __a , remaining_nums_sum - nums[index] , ) _SCREAMING_SNAKE_CASE = [3, 34, 4, 12, 5, 2] _SCREAMING_SNAKE_CASE = 9 _SCREAMING_SNAKE_CASE = generate_sum_of_subsets_soln(nums, max_sum) print(*result)
327
1
from dataclasses import dataclass from typing import Optional, Tuple import torch from torch import nn from transformers import RobertaPreTrainedModel, XLMRobertaConfig, XLMRobertaModel from transformers.utils import ModelOutput @dataclass class SCREAMING_SNAKE_CASE_ ( snake_case_ ): __magic_name__: Optional[torch.FloatTensor] = None __magic_name__: torch.FloatTensor = None __magic_name__: Optional[Tuple[torch.FloatTensor]] = None __magic_name__: Optional[Tuple[torch.FloatTensor]] = None class SCREAMING_SNAKE_CASE_ ( snake_case_ ): def __init__( self : Tuple , _A : Optional[int]=1 , _A : Dict=0 , _A : Optional[int]=2 , _A : Any=512 , _A : Dict="cls" , _A : Optional[int]=False , _A : Dict=True , **_A : Optional[Any] , ) -> int: """simple docstring""" super().__init__(pad_token_id=_A , bos_token_id=_A , eos_token_id=_A , **_A ) snake_case_ : List[Any] = project_dim snake_case_ : Optional[Any] = pooler_fn snake_case_ : Dict = learn_encoder snake_case_ : Dict = use_attention_mask class SCREAMING_SNAKE_CASE_ ( snake_case_ ): __magic_name__: Union[str, Any] = [r"pooler", r"logit_scale"] __magic_name__: int = [r"position_ids", r"predictions.decoder.bias"] __magic_name__: Optional[Any] = "roberta" __magic_name__: Dict = RobertaSeriesConfig def __init__( self : List[str] , _A : Optional[Any] ) -> Any: """simple docstring""" super().__init__(_A ) snake_case_ : Optional[Any] = XLMRobertaModel(_A ) snake_case_ : int = nn.Linear(config.hidden_size , config.project_dim ) snake_case_ : List[str] = getattr(_A , 'has_pre_transformation' , _A ) if self.has_pre_transformation: snake_case_ : str = nn.Linear(config.hidden_size , config.project_dim ) snake_case_ : Union[str, Any] = nn.LayerNorm(config.hidden_size , eps=config.layer_norm_eps ) self.post_init() def UpperCAmelCase_ ( self : List[Any] , _A : Optional[torch.Tensor] = None , _A : Optional[torch.Tensor] = None , _A : Optional[torch.Tensor] = None , _A : Optional[torch.Tensor] = None , _A : Optional[torch.Tensor] = None , _A : Optional[torch.Tensor] = None , _A : Optional[torch.Tensor] = None , _A : Optional[torch.Tensor] = None , _A : Optional[bool] = None , _A : Optional[bool] = None , _A : Optional[bool] = None , ) -> int: """simple docstring""" snake_case_ : List[str] = return_dict if return_dict is not None else self.config.use_return_dict snake_case_ : int = self.base_model( input_ids=_A , attention_mask=_A , token_type_ids=_A , position_ids=_A , head_mask=_A , inputs_embeds=_A , encoder_hidden_states=_A , encoder_attention_mask=_A , output_attentions=_A , output_hidden_states=True if self.has_pre_transformation else output_hidden_states , return_dict=_A , ) if self.has_pre_transformation: snake_case_ : str = outputs['hidden_states'][-2] snake_case_ : Tuple = self.pre_LN(_A ) snake_case_ : List[str] = self.transformation_pre(_A ) return TransformationModelOutput( projection_state=_A , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , ) else: snake_case_ : int = self.transformation(outputs.last_hidden_state ) return TransformationModelOutput( projection_state=_A , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
327
def SCREAMING_SNAKE_CASE__ ( __a , __a ): if density <= 0: raise ValueError('Impossible fluid density' ) if bulk_modulus <= 0: raise ValueError('Impossible bulk modulus' ) return (bulk_modulus / density) ** 0.5 if __name__ == "__main__": import doctest doctest.testmod()
327
1
import pytest from datasets import inspect_metric, list_metrics, load_metric @pytest.fixture def SCREAMING_SNAKE_CASE__ ( __a ): monkeypatch.setattr('datasets.utils.deprecation_utils._emitted_deprecation_warnings' , set() ) @pytest.fixture def SCREAMING_SNAKE_CASE__ ( __a ): class SCREAMING_SNAKE_CASE_ : def __init__( self : Union[str, Any] , _A : Tuple ) -> Tuple: """simple docstring""" snake_case_ : Tuple = metric_id class SCREAMING_SNAKE_CASE_ : __magic_name__: Optional[int] = [MetricMock(snake_case_ ) for metric_id in ["accuracy", "mse", "precision", "codeparrot/apps_metric"]] def UpperCAmelCase_ ( self : Any ) -> int: """simple docstring""" return self._metrics monkeypatch.setattr('datasets.inspect.huggingface_hub' , HfhMock() ) @pytest.mark.parametrize( 'func, args' , [(load_metric, ('metrics/mse',)), (list_metrics, ()), (inspect_metric, ('metrics/mse', 'tmp_path'))] ) def SCREAMING_SNAKE_CASE__ ( __a , __a , __a , __a , __a ): if "tmp_path" in args: snake_case_ : Optional[Any] = tuple(arg if arg != 'tmp_path' else tmp_path for arg in args ) with pytest.warns(__a , match='https://huggingface.co/docs/evaluate' ): func(*__a )
327
from math import pi def SCREAMING_SNAKE_CASE__ ( __a , __a ): return 2 * pi * radius * (angle / 3_60) if __name__ == "__main__": print(arc_length(90, 10))
327
1
from __future__ import annotations from collections import namedtuple def SCREAMING_SNAKE_CASE__ ( __a , __a , __a ): snake_case_ : Any = namedtuple('result' , 'name value' ) if (voltage, current, power).count(0 ) != 1: raise ValueError('Only one argument must be 0' ) elif power < 0: raise ValueError( 'Power cannot be negative in any electrical/electronics system' ) elif voltage == 0: return result('voltage' , power / current ) elif current == 0: return result('current' , power / voltage ) elif power == 0: return result('power' , float(round(abs(voltage * current ) , 2 ) ) ) else: raise ValueError('Exactly one argument must be 0' ) if __name__ == "__main__": import doctest doctest.testmod()
327
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import center_crop, normalize, rescale, resize, to_channel_dimension_format from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): import PIL _SCREAMING_SNAKE_CASE = logging.get_logger(__name__) class SCREAMING_SNAKE_CASE_ ( snake_case_ ): __magic_name__: Optional[Any] = ["pixel_values"] def __init__( self : str , _A : bool = True , _A : Dict[str, int] = None , _A : PILImageResampling = PIL.Image.BICUBIC , _A : bool = True , _A : Dict[str, int] = None , _A : Union[int, float] = 1 / 255 , _A : bool = True , _A : bool = True , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[float, List[float]]] = None , **_A : str , ) -> None: """simple docstring""" super().__init__(**_A ) snake_case_ : Dict = size if size is not None else {'height': 256, 'width': 256} snake_case_ : Tuple = get_size_dict(_A ) snake_case_ : str = crop_size if crop_size is not None else {'height': 224, 'width': 224} snake_case_ : int = get_size_dict(_A , param_name='crop_size' ) snake_case_ : Union[str, Any] = do_resize snake_case_ : str = size snake_case_ : List[str] = resample snake_case_ : List[Any] = do_center_crop snake_case_ : Dict = crop_size snake_case_ : Tuple = do_rescale snake_case_ : Optional[Any] = rescale_factor snake_case_ : Any = do_normalize snake_case_ : Any = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN snake_case_ : Optional[int] = image_std if image_std is not None else IMAGENET_STANDARD_STD def UpperCAmelCase_ ( self : Optional[int] , _A : np.ndarray , _A : Dict[str, int] , _A : PILImageResampling = PIL.Image.BICUBIC , _A : Optional[Union[str, ChannelDimension]] = None , **_A : List[str] , ) -> np.ndarray: """simple docstring""" snake_case_ : Tuple = get_size_dict(_A ) if "height" not in size or "width" not in size: raise ValueError(F"""The size dictionary must have keys 'height' and 'width'. Got {size.keys()}""" ) return resize( _A , size=(size['height'], size['width']) , resample=_A , data_format=_A , **_A ) def UpperCAmelCase_ ( self : int , _A : np.ndarray , _A : Dict[str, int] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Optional[Any] , ) -> np.ndarray: """simple docstring""" snake_case_ : Optional[int] = get_size_dict(_A ) if "height" not in size or "width" not in size: raise ValueError(F"""The size dictionary must have keys 'height' and 'width'. Got {size.keys()}""" ) return center_crop(_A , size=(size['height'], size['width']) , data_format=_A , **_A ) def UpperCAmelCase_ ( self : Dict , _A : np.ndarray , _A : Union[int, float] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : str , ) -> str: """simple docstring""" return rescale(_A , scale=_A , data_format=_A , **_A ) def UpperCAmelCase_ ( self : Any , _A : np.ndarray , _A : Union[float, List[float]] , _A : Union[float, List[float]] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Tuple , ) -> np.ndarray: """simple docstring""" return normalize(_A , mean=_A , std=_A , data_format=_A , **_A ) def UpperCAmelCase_ ( self : List[str] , _A : ImageInput , _A : bool = None , _A : Dict[str, int] = None , _A : Union[str, Any]=None , _A : bool = None , _A : Dict[str, int] = None , _A : bool = None , _A : float = None , _A : bool = None , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[str, TensorType]] = None , _A : ChannelDimension = ChannelDimension.FIRST , **_A : int , ) -> PIL.Image.Image: """simple docstring""" snake_case_ : int = do_resize if do_resize is not None else self.do_resize snake_case_ : str = resample if resample is not None else self.resample snake_case_ : Any = do_center_crop if do_center_crop is not None else self.do_center_crop snake_case_ : List[str] = do_rescale if do_rescale is not None else self.do_rescale snake_case_ : Any = rescale_factor if rescale_factor is not None else self.rescale_factor snake_case_ : List[str] = do_normalize if do_normalize is not None else self.do_normalize snake_case_ : Any = image_mean if image_mean is not None else self.image_mean snake_case_ : Dict = image_std if image_std is not None else self.image_std snake_case_ : int = size if size is not None else self.size snake_case_ : Optional[int] = get_size_dict(_A ) snake_case_ : int = crop_size if crop_size is not None else self.crop_size snake_case_ : Any = get_size_dict(_A , param_name='crop_size' ) snake_case_ : Optional[Any] = make_list_of_images(_A ) if not valid_images(_A ): raise ValueError( 'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ' 'torch.Tensor, tf.Tensor or jax.ndarray.' ) if do_resize and size is None or resample is None: raise ValueError('Size and resample must be specified if do_resize is True.' ) if do_center_crop and crop_size is None: raise ValueError('Crop size must be specified if do_center_crop is True.' ) if do_rescale and rescale_factor is None: raise ValueError('Rescale factor must be specified if do_rescale is True.' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('Image mean and std must be specified if do_normalize is True.' ) # All transformations expect numpy arrays. snake_case_ : Optional[Any] = [to_numpy_array(_A ) for image in images] if do_resize: snake_case_ : Dict = [self.resize(image=_A , size=_A , resample=_A ) for image in images] if do_center_crop: snake_case_ : Optional[Any] = [self.center_crop(image=_A , size=_A ) for image in images] if do_rescale: snake_case_ : Optional[int] = [self.rescale(image=_A , scale=_A ) for image in images] if do_normalize: snake_case_ : str = [self.normalize(image=_A , mean=_A , std=_A ) for image in images] snake_case_ : Dict = [to_channel_dimension_format(_A , _A ) for image in images] snake_case_ : Tuple = {'pixel_values': images} return BatchFeature(data=_A , tensor_type=_A )
327
1
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, logging _SCREAMING_SNAKE_CASE = logging.get_logger(__name__) class SCREAMING_SNAKE_CASE_ ( snake_case_ ): __magic_name__: str = ["pixel_values"] def __init__( self : Dict , _A : bool = True , _A : Optional[Dict[str, int]] = None , _A : PILImageResampling = PILImageResampling.BILINEAR , _A : bool = True , _A : Dict[str, int] = None , _A : bool = True , _A : Union[int, float] = 1 / 255 , _A : bool = True , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[float, List[float]]] = None , **_A : Tuple , ) -> None: """simple docstring""" super().__init__(**_A ) snake_case_ : Any = size if size is not None else {'shortest_edge': 256} snake_case_ : Union[str, Any] = get_size_dict(_A , default_to_square=_A ) snake_case_ : Any = crop_size if crop_size is not None else {'height': 224, 'width': 224} snake_case_ : int = get_size_dict(_A ) snake_case_ : int = do_resize snake_case_ : List[Any] = size snake_case_ : List[str] = resample snake_case_ : List[Any] = do_center_crop snake_case_ : int = crop_size snake_case_ : List[Any] = do_rescale snake_case_ : List[str] = rescale_factor snake_case_ : Any = do_normalize snake_case_ : Dict = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN snake_case_ : Any = image_std if image_std is not None else IMAGENET_STANDARD_STD def UpperCAmelCase_ ( self : int , _A : np.ndarray , _A : Dict[str, int] , _A : PILImageResampling = PILImageResampling.BICUBIC , _A : Optional[Union[str, ChannelDimension]] = None , **_A : int , ) -> np.ndarray: """simple docstring""" snake_case_ : Tuple = get_size_dict(_A , default_to_square=_A ) if "shortest_edge" not in size: raise ValueError(F"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" ) snake_case_ : Optional[Any] = get_resize_output_image_size(_A , size=size['shortest_edge'] , default_to_square=_A ) return resize(_A , size=_A , resample=_A , data_format=_A , **_A ) def UpperCAmelCase_ ( self : List[str] , _A : np.ndarray , _A : Dict[str, int] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : int , ) -> np.ndarray: """simple docstring""" snake_case_ : Dict = get_size_dict(_A ) return center_crop(_A , size=(size['height'], size['width']) , data_format=_A , **_A ) def UpperCAmelCase_ ( self : List[str] , _A : np.ndarray , _A : float , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Any ) -> np.ndarray: """simple docstring""" return rescale(_A , scale=_A , data_format=_A , **_A ) def UpperCAmelCase_ ( self : List[str] , _A : np.ndarray , _A : Union[float, List[float]] , _A : Union[float, List[float]] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Dict , ) -> np.ndarray: """simple docstring""" return normalize(_A , mean=_A , std=_A , data_format=_A , **_A ) def UpperCAmelCase_ ( self : Any , _A : ImageInput , _A : Optional[bool] = None , _A : Dict[str, int] = None , _A : PILImageResampling = None , _A : bool = None , _A : Dict[str, int] = None , _A : Optional[bool] = None , _A : Optional[float] = None , _A : Optional[bool] = None , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[str, TensorType]] = None , _A : Union[str, ChannelDimension] = ChannelDimension.FIRST , **_A : Dict , ) -> int: """simple docstring""" snake_case_ : Optional[int] = do_resize if do_resize is not None else self.do_resize snake_case_ : Dict = size if size is not None else self.size snake_case_ : str = get_size_dict(_A , default_to_square=_A ) snake_case_ : Optional[Any] = resample if resample is not None else self.resample snake_case_ : str = do_center_crop if do_center_crop is not None else self.do_center_crop snake_case_ : List[str] = crop_size if crop_size is not None else self.crop_size snake_case_ : Optional[Any] = get_size_dict(_A ) snake_case_ : int = do_rescale if do_rescale is not None else self.do_rescale snake_case_ : Union[str, Any] = rescale_factor if rescale_factor is not None else self.rescale_factor snake_case_ : Any = do_normalize if do_normalize is not None else self.do_normalize snake_case_ : int = image_mean if image_mean is not None else self.image_mean snake_case_ : Tuple = image_std if image_std is not None else self.image_std snake_case_ : str = make_list_of_images(_A ) if not valid_images(_A ): raise ValueError( 'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ' 'torch.Tensor, tf.Tensor or jax.ndarray.' ) if do_resize and size is None: raise ValueError('Size must be specified if do_resize is True.' ) if do_center_crop and crop_size is None: raise ValueError('Crop size must be specified if do_center_crop is True.' ) if do_rescale and rescale_factor is None: raise ValueError('Rescale factor must be specified if do_rescale is True.' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('Image mean and std must be specified if do_normalize is True.' ) # All transformations expect numpy arrays. snake_case_ : List[str] = [to_numpy_array(_A ) for image in images] if do_resize: snake_case_ : Dict = [self.resize(image=_A , size=_A , resample=_A ) for image in images] if do_center_crop: snake_case_ : Dict = [self.center_crop(image=_A , size=_A ) for image in images] if do_rescale: snake_case_ : List[Any] = [self.rescale(image=_A , scale=_A ) for image in images] if do_normalize: snake_case_ : List[Any] = [self.normalize(image=_A , mean=_A , std=_A ) for image in images] snake_case_ : Dict = [to_channel_dimension_format(_A , _A ) for image in images] snake_case_ : Optional[Any] = {'pixel_values': images} return BatchFeature(data=_A , tensor_type=_A )
327
import sys _SCREAMING_SNAKE_CASE = ( """73167176531330624919225119674426574742355349194934""" """96983520312774506326239578318016984801869478851843""" """85861560789112949495459501737958331952853208805511""" """12540698747158523863050715693290963295227443043557""" """66896648950445244523161731856403098711121722383113""" """62229893423380308135336276614282806444486645238749""" """30358907296290491560440772390713810515859307960866""" """70172427121883998797908792274921901699720888093776""" """65727333001053367881220235421809751254540594752243""" """52584907711670556013604839586446706324415722155397""" """53697817977846174064955149290862569321978468622482""" """83972241375657056057490261407972968652414535100474""" """82166370484403199890008895243450658541227588666881""" """16427171479924442928230863465674813919123162824586""" """17866458359124566529476545682848912883142607690042""" """24219022671055626321111109370544217506941658960408""" """07198403850962455444362981230987879927244284909188""" """84580156166097919133875499200524063689912560717606""" """05886116467109405077541002256983155200055935729725""" """71636269561882670428252483600823257530420752963450""" ) def SCREAMING_SNAKE_CASE__ ( __a = N ): snake_case_ : Optional[Any] = -sys.maxsize - 1 for i in range(len(__a ) - 12 ): snake_case_ : Optional[Any] = 1 for j in range(13 ): product *= int(n[i + j] ) if product > largest_product: snake_case_ : int = product return largest_product if __name__ == "__main__": print(F'''{solution() = }''')
327
1
from __future__ import annotations def SCREAMING_SNAKE_CASE__ ( __a ): if not nums: raise ValueError('List is empty' ) return sum(__a ) / len(__a ) if __name__ == "__main__": import doctest doctest.testmod()
327
import logging import os import sys from dataclasses import dataclass, field from typing import Optional from seqaseq_trainer import SeqaSeqTrainer from seqaseq_training_args import SeqaSeqTrainingArguments import transformers from transformers import ( AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer, HfArgumentParser, MBartTokenizer, MBartTokenizerFast, set_seed, ) from transformers.trainer_utils import EvaluationStrategy, is_main_process from transformers.training_args import ParallelMode from utils import ( SeqaSeqDataCollator, SeqaSeqDataset, assert_all_frozen, build_compute_metrics_fn, check_output_dir, freeze_embeds, freeze_params, lmap, save_json, use_task_specific_params, write_txt_file, ) _SCREAMING_SNAKE_CASE = logging.getLogger(__name__) @dataclass class SCREAMING_SNAKE_CASE_ : __magic_name__: str = field( metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} ) __magic_name__: Optional[str] = field( default=snake_case_ , metadata={"help": "Pretrained config name or path if not the same as model_name"} ) __magic_name__: Optional[str] = field( default=snake_case_ , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} ) __magic_name__: Optional[str] = field( default=snake_case_ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , ) __magic_name__: bool = field(default=snake_case_ , metadata={"help": "Whether tp freeze the encoder."} ) __magic_name__: bool = field(default=snake_case_ , metadata={"help": "Whether to freeze the embeddings."} ) @dataclass class SCREAMING_SNAKE_CASE_ : __magic_name__: str = field( metadata={"help": "The input data dir. Should contain the .tsv files (or other data files) for the task."} ) __magic_name__: Optional[str] = field( default="summarization" , metadata={"help": "Task name, summarization (or summarization_{dataset} for pegasus) or translation"} , ) __magic_name__: Optional[int] = field( default=1024 , metadata={ "help": ( "The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) } , ) __magic_name__: Optional[int] = field( default=128 , metadata={ "help": ( "The maximum total sequence length for target text after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) } , ) __magic_name__: Optional[int] = field( default=142 , metadata={ "help": ( "The maximum total sequence length for validation target text after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded. " "This argument is also used to override the ``max_length`` param of ``model.generate``, which is used " "during ``evaluate`` and ``predict``." ) } , ) __magic_name__: Optional[int] = field( default=142 , metadata={ "help": ( "The maximum total sequence length for test target text after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) } , ) __magic_name__: Optional[int] = field(default=-1 , metadata={"help": "# training examples. -1 means use all."} ) __magic_name__: Optional[int] = field(default=-1 , metadata={"help": "# validation examples. -1 means use all."} ) __magic_name__: Optional[int] = field(default=-1 , metadata={"help": "# test examples. -1 means use all."} ) __magic_name__: Optional[str] = field(default=snake_case_ , metadata={"help": "Source language id for translation."} ) __magic_name__: Optional[str] = field(default=snake_case_ , metadata={"help": "Target language id for translation."} ) __magic_name__: Optional[int] = field(default=snake_case_ , metadata={"help": "# num_beams to use for evaluation."} ) __magic_name__: bool = field( default=snake_case_ , metadata={"help": "If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined."} , ) def SCREAMING_SNAKE_CASE__ ( __a , __a , __a ): logger.info(f"""***** {split} metrics *****""" ) for key in sorted(metrics.keys() ): logger.info(f""" {key} = {metrics[key]}""" ) save_json(__a , os.path.join(__a , f"""{split}_results.json""" ) ) def SCREAMING_SNAKE_CASE__ ( ): # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. snake_case_ : Any = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. snake_case_ ,snake_case_ ,snake_case_ : List[Any] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: snake_case_ ,snake_case_ ,snake_case_ : List[str] = parser.parse_args_into_dataclasses() check_output_dir(__a ) # Setup logging logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , ) logger.warning( 'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() logger.info('Training/evaluation parameters %s' , __a ) # Set seed set_seed(training_args.seed ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. snake_case_ : Tuple = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) snake_case_ : Any = ('encoder_layerdrop', 'decoder_layerdrop', 'dropout', 'attention_dropout') for p in extra_model_params: if getattr(__a , __a , __a ): assert hasattr(__a , __a ), f"""({config.__class__.__name__}) doesn't have a `{p}` attribute""" setattr(__a , __a , getattr(__a , __a ) ) snake_case_ : Tuple = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) snake_case_ : Any = AutoModelForSeqaSeqLM.from_pretrained( model_args.model_name_or_path , from_tf='.ckpt' in model_args.model_name_or_path , config=__a , cache_dir=model_args.cache_dir , ) # use task specific params use_task_specific_params(__a , data_args.task ) # set num_beams for evaluation if data_args.eval_beams is None: snake_case_ : Any = model.config.num_beams # set decoder_start_token_id for MBart if model.config.decoder_start_token_id is None and isinstance(__a , (MBartTokenizer, MBartTokenizerFast) ): assert ( data_args.tgt_lang is not None and data_args.src_lang is not None ), "mBart requires --tgt_lang and --src_lang" if isinstance(__a , __a ): snake_case_ : int = tokenizer.lang_code_to_id[data_args.tgt_lang] else: snake_case_ : int = tokenizer.convert_tokens_to_ids(data_args.tgt_lang ) if model_args.freeze_embeds: freeze_embeds(__a ) if model_args.freeze_encoder: freeze_params(model.get_encoder() ) assert_all_frozen(model.get_encoder() ) snake_case_ : List[Any] = SeqaSeqDataset # Get datasets snake_case_ : List[Any] = ( dataset_class( __a , type_path='train' , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '' , ) if training_args.do_train else None ) snake_case_ : List[str] = ( dataset_class( __a , type_path='val' , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '' , ) if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO else None ) snake_case_ : List[Any] = ( dataset_class( __a , type_path='test' , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '' , ) if training_args.do_predict else None ) # Initialize our Trainer snake_case_ : Any = ( build_compute_metrics_fn(data_args.task , __a ) if training_args.predict_with_generate else None ) snake_case_ : List[str] = SeqaSeqTrainer( model=__a , args=__a , data_args=__a , train_dataset=__a , eval_dataset=__a , data_collator=SeqaSeqDataCollator( __a , __a , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=__a , tokenizer=__a , ) snake_case_ : Optional[int] = {} # Training if training_args.do_train: logger.info('*** Train ***' ) snake_case_ : Any = trainer.train( model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None ) snake_case_ : Tuple = train_result.metrics snake_case_ : List[str] = data_args.n_train trainer.save_model() # this also saves the tokenizer if trainer.is_world_process_zero(): handle_metrics('train' , __a , training_args.output_dir ) all_metrics.update(__a ) # Need to save the state, since Trainer.save_model saves only the tokenizer with the model trainer.state.save_to_json(os.path.join(training_args.output_dir , 'trainer_state.json' ) ) # For convenience, we also re-save the tokenizer to the same directory, # so that you can share your model easily on huggingface.co/models =) tokenizer.save_pretrained(training_args.output_dir ) # Evaluation if training_args.do_eval: logger.info('*** Evaluate ***' ) snake_case_ : List[Any] = trainer.evaluate(metric_key_prefix='val' ) snake_case_ : str = data_args.n_val snake_case_ : Union[str, Any] = round(metrics['val_loss'] , 4 ) if trainer.is_world_process_zero(): handle_metrics('val' , __a , training_args.output_dir ) all_metrics.update(__a ) if training_args.do_predict: logger.info('*** Predict ***' ) snake_case_ : Dict = trainer.predict(test_dataset=__a , metric_key_prefix='test' ) snake_case_ : Union[str, Any] = test_output.metrics snake_case_ : int = data_args.n_test if trainer.is_world_process_zero(): snake_case_ : List[str] = round(metrics['test_loss'] , 4 ) handle_metrics('test' , __a , training_args.output_dir ) all_metrics.update(__a ) if training_args.predict_with_generate: snake_case_ : Any = tokenizer.batch_decode( test_output.predictions , skip_special_tokens=__a , clean_up_tokenization_spaces=__a ) snake_case_ : Any = lmap(str.strip , __a ) write_txt_file(__a , os.path.join(training_args.output_dir , 'test_generations.txt' ) ) if trainer.is_world_process_zero(): save_json(__a , os.path.join(training_args.output_dir , 'all_results.json' ) ) return all_metrics def SCREAMING_SNAKE_CASE__ ( __a ): # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
327
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available, ) _SCREAMING_SNAKE_CASE = { """configuration_convbert""": ["""CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ConvBertConfig""", """ConvBertOnnxConfig"""], """tokenization_convbert""": ["""ConvBertTokenizer"""], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _SCREAMING_SNAKE_CASE = ["""ConvBertTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _SCREAMING_SNAKE_CASE = [ """CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST""", """ConvBertForMaskedLM""", """ConvBertForMultipleChoice""", """ConvBertForQuestionAnswering""", """ConvBertForSequenceClassification""", """ConvBertForTokenClassification""", """ConvBertLayer""", """ConvBertModel""", """ConvBertPreTrainedModel""", """load_tf_weights_in_convbert""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _SCREAMING_SNAKE_CASE = [ """TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFConvBertForMaskedLM""", """TFConvBertForMultipleChoice""", """TFConvBertForQuestionAnswering""", """TFConvBertForSequenceClassification""", """TFConvBertForTokenClassification""", """TFConvBertLayer""", """TFConvBertModel""", """TFConvBertPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig from .tokenization_convbert import ConvBertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_convbert_fast import ConvBertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_convbert import ( CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST, ConvBertForMaskedLM, ConvBertForMultipleChoice, ConvBertForQuestionAnswering, ConvBertForSequenceClassification, ConvBertForTokenClassification, ConvBertLayer, ConvBertModel, ConvBertPreTrainedModel, load_tf_weights_in_convbert, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_convbert import ( TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFConvBertForMaskedLM, TFConvBertForMultipleChoice, TFConvBertForQuestionAnswering, TFConvBertForSequenceClassification, TFConvBertForTokenClassification, TFConvBertLayer, TFConvBertModel, TFConvBertPreTrainedModel, ) else: import sys _SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
327
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available _SCREAMING_SNAKE_CASE = { """configuration_poolformer""": [ """POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """PoolFormerConfig""", """PoolFormerOnnxConfig""", ] } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _SCREAMING_SNAKE_CASE = ["""PoolFormerFeatureExtractor"""] _SCREAMING_SNAKE_CASE = ["""PoolFormerImageProcessor"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _SCREAMING_SNAKE_CASE = [ """POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""", """PoolFormerForImageClassification""", """PoolFormerModel""", """PoolFormerPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_poolformer import ( POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, PoolFormerConfig, PoolFormerOnnxConfig, ) try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_poolformer import PoolFormerFeatureExtractor from .image_processing_poolformer import PoolFormerImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_poolformer import ( POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, PoolFormerForImageClassification, PoolFormerModel, PoolFormerPreTrainedModel, ) else: import sys _SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
327
1
import math def SCREAMING_SNAKE_CASE__ ( __a , __a ): return math.pow(__a , 2 ) - a def SCREAMING_SNAKE_CASE__ ( __a ): return 2 * x def SCREAMING_SNAKE_CASE__ ( __a ): snake_case_ : List[Any] = 2.0 while start <= a: snake_case_ : Union[str, Any] = math.pow(__a , 2 ) return start def SCREAMING_SNAKE_CASE__ ( __a , __a = 99_99 , __a = 0.00000000000001 ): if a < 0: raise ValueError('math domain error' ) snake_case_ : int = get_initial_point(__a ) for _ in range(__a ): snake_case_ : Any = value snake_case_ : Any = value - fx(__a , __a ) / fx_derivative(__a ) if abs(prev_value - value ) < tolerance: return value return value if __name__ == "__main__": from doctest import testmod testmod()
327
import tempfile import unittest from transformers import AutoModelForSeqaSeqLM, AutoTokenizer from transformers.testing_utils import ( is_torch_available, require_optimum, require_torch, slow, ) if is_torch_available(): import torch @require_torch @require_optimum @slow class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ): def UpperCAmelCase_ ( self : Dict ) -> List[Any]: """simple docstring""" snake_case_ : Any = 'hf-internal-testing/tiny-random-t5' snake_case_ : Optional[Any] = AutoTokenizer.from_pretrained(_A ) snake_case_ : Optional[int] = AutoModelForSeqaSeqLM.from_pretrained(_A ) snake_case_ : List[Any] = tokenizer('This is me' , return_tensors='pt' ) snake_case_ : Any = model.to_bettertransformer() self.assertTrue(any('BetterTransformer' in mod.__class__.__name__ for _, mod in model.named_modules() ) ) snake_case_ : Optional[Any] = model.generate(**_A ) snake_case_ : int = model.reverse_bettertransformer() self.assertFalse(any('BetterTransformer' in mod.__class__.__name__ for _, mod in model.named_modules() ) ) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(_A ) snake_case_ : Tuple = AutoModelForSeqaSeqLM.from_pretrained(_A ) self.assertFalse( any('BetterTransformer' in mod.__class__.__name__ for _, mod in model_reloaded.named_modules() ) ) snake_case_ : Optional[Any] = model_reloaded.generate(**_A ) self.assertTrue(torch.allclose(_A , _A ) ) def UpperCAmelCase_ ( self : Optional[Any] ) -> Tuple: """simple docstring""" snake_case_ : Any = 'hf-internal-testing/tiny-random-t5' snake_case_ : int = AutoModelForSeqaSeqLM.from_pretrained(_A ) snake_case_ : Dict = model.to_bettertransformer() with tempfile.TemporaryDirectory() as tmpdirname: with self.assertRaises(_A ): model.save_pretrained(_A ) snake_case_ : Union[str, Any] = model.reverse_bettertransformer() model.save_pretrained(_A )
327
1
import sys import tempfile import unittest import unittest.mock as mock from pathlib import Path from huggingface_hub import HfFolder, delete_repo from requests.exceptions import HTTPError from transformers import AutoFeatureExtractor, WavaVecaFeatureExtractor from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test sys.path.append(str(Path(__file__).parent.parent / """utils""")) from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402 _SCREAMING_SNAKE_CASE = get_tests_dir("""fixtures""") class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ): def UpperCAmelCase_ ( self : Optional[int] ) -> str: """simple docstring""" snake_case_ : Any = mock.Mock() snake_case_ : Dict = 500 snake_case_ : Optional[int] = {} snake_case_ : List[str] = HTTPError snake_case_ : Tuple = {} # Download this model to make sure it's in the cache. snake_case_ : List[Any] = WavaVecaFeatureExtractor.from_pretrained('hf-internal-testing/tiny-random-wav2vec2' ) # Under the mock environment we get a 500 error when trying to reach the model. with mock.patch('requests.Session.request' , return_value=_A ) as mock_head: snake_case_ : Any = WavaVecaFeatureExtractor.from_pretrained('hf-internal-testing/tiny-random-wav2vec2' ) # This check we did call the fake head request mock_head.assert_called() def UpperCAmelCase_ ( self : Union[str, Any] ) -> Any: """simple docstring""" snake_case_ : List[Any] = WavaVecaFeatureExtractor.from_pretrained( 'https://huggingface.co/hf-internal-testing/tiny-random-wav2vec2/resolve/main/preprocessor_config.json' ) @is_staging_test class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ): @classmethod def UpperCAmelCase_ ( cls : str ) -> Optional[int]: """simple docstring""" snake_case_ : Union[str, Any] = TOKEN HfFolder.save_token(_A ) @classmethod def UpperCAmelCase_ ( cls : int ) -> Optional[int]: """simple docstring""" try: delete_repo(token=cls._token , repo_id='test-feature-extractor' ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id='valid_org/test-feature-extractor-org' ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id='test-dynamic-feature-extractor' ) except HTTPError: pass def UpperCAmelCase_ ( self : List[str] ) -> List[str]: """simple docstring""" snake_case_ : Dict = WavaVecaFeatureExtractor.from_pretrained(_A ) feature_extractor.push_to_hub('test-feature-extractor' , use_auth_token=self._token ) snake_case_ : Optional[int] = WavaVecaFeatureExtractor.from_pretrained(F"""{USER}/test-feature-extractor""" ) for k, v in feature_extractor.__dict__.items(): self.assertEqual(_A , getattr(_A , _A ) ) # Reset repo delete_repo(token=self._token , repo_id='test-feature-extractor' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: feature_extractor.save_pretrained( _A , repo_id='test-feature-extractor' , push_to_hub=_A , use_auth_token=self._token ) snake_case_ : Optional[int] = WavaVecaFeatureExtractor.from_pretrained(F"""{USER}/test-feature-extractor""" ) for k, v in feature_extractor.__dict__.items(): self.assertEqual(_A , getattr(_A , _A ) ) def UpperCAmelCase_ ( self : List[str] ) -> Dict: """simple docstring""" snake_case_ : List[Any] = WavaVecaFeatureExtractor.from_pretrained(_A ) feature_extractor.push_to_hub('valid_org/test-feature-extractor' , use_auth_token=self._token ) snake_case_ : Optional[Any] = WavaVecaFeatureExtractor.from_pretrained('valid_org/test-feature-extractor' ) for k, v in feature_extractor.__dict__.items(): self.assertEqual(_A , getattr(_A , _A ) ) # Reset repo delete_repo(token=self._token , repo_id='valid_org/test-feature-extractor' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: feature_extractor.save_pretrained( _A , repo_id='valid_org/test-feature-extractor-org' , push_to_hub=_A , use_auth_token=self._token ) snake_case_ : str = WavaVecaFeatureExtractor.from_pretrained('valid_org/test-feature-extractor-org' ) for k, v in feature_extractor.__dict__.items(): self.assertEqual(_A , getattr(_A , _A ) ) def UpperCAmelCase_ ( self : Optional[int] ) -> Any: """simple docstring""" CustomFeatureExtractor.register_for_auto_class() snake_case_ : str = CustomFeatureExtractor.from_pretrained(_A ) feature_extractor.push_to_hub('test-dynamic-feature-extractor' , use_auth_token=self._token ) # This has added the proper auto_map field to the config self.assertDictEqual( feature_extractor.auto_map , {'AutoFeatureExtractor': 'custom_feature_extraction.CustomFeatureExtractor'} , ) snake_case_ : int = AutoFeatureExtractor.from_pretrained( F"""{USER}/test-dynamic-feature-extractor""" , trust_remote_code=_A ) # Can't make an isinstance check because the new_feature_extractor is from the CustomFeatureExtractor class of a dynamic module self.assertEqual(new_feature_extractor.__class__.__name__ , 'CustomFeatureExtractor' )
327
import numpy as np import torch from torch.utils.data import Dataset from utils import logger class SCREAMING_SNAKE_CASE_ ( snake_case_ ): def __init__( self : Union[str, Any] , _A : Any , _A : Dict ) -> Union[str, Any]: """simple docstring""" snake_case_ : str = params snake_case_ : int = np.array(_A ) snake_case_ : Optional[int] = np.array([len(_A ) for t in data] ) self.check() self.remove_long_sequences() self.remove_empty_sequences() self.remove_unknown_sequences() self.check() self.print_statistics() def __getitem__( self : Tuple , _A : Optional[int] ) -> str: """simple docstring""" return (self.token_ids[index], self.lengths[index]) def __len__( self : List[str] ) -> str: """simple docstring""" return len(self.lengths ) def UpperCAmelCase_ ( self : Dict ) -> str: """simple docstring""" assert len(self.token_ids ) == len(self.lengths ) assert all(self.lengths[i] == len(self.token_ids[i] ) for i in range(len(self.lengths ) ) ) def UpperCAmelCase_ ( self : Any ) -> Optional[Any]: """simple docstring""" snake_case_ : Dict = self.params.max_model_input_size snake_case_ : Tuple = self.lengths > max_len logger.info(F"""Splitting {sum(_A )} too long sequences.""" ) def divide_chunks(_A : Union[str, Any] , _A : Dict ): return [l[i : i + n] for i in range(0 , len(_A ) , _A )] snake_case_ : Dict = [] snake_case_ : Union[str, Any] = [] if self.params.mlm: snake_case_ ,snake_case_ : Optional[int] = self.params.special_tok_ids['cls_token'], self.params.special_tok_ids['sep_token'] else: snake_case_ ,snake_case_ : Any = self.params.special_tok_ids['bos_token'], self.params.special_tok_ids['eos_token'] for seq_, len_ in zip(self.token_ids , self.lengths ): assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_ if len_ <= max_len: new_tok_ids.append(seq_ ) new_lengths.append(len_ ) else: snake_case_ : List[Any] = [] for sub_s in divide_chunks(seq_ , max_len - 2 ): if sub_s[0] != cls_id: snake_case_ : Optional[int] = np.insert(_A , 0 , _A ) if sub_s[-1] != sep_id: snake_case_ : Optional[Any] = np.insert(_A , len(_A ) , _A ) assert len(_A ) <= max_len assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s sub_seqs.append(_A ) new_tok_ids.extend(_A ) new_lengths.extend([len(_A ) for l in sub_seqs] ) snake_case_ : Tuple = np.array(_A ) snake_case_ : int = np.array(_A ) def UpperCAmelCase_ ( self : List[str] ) -> List[str]: """simple docstring""" snake_case_ : Tuple = len(self ) snake_case_ : int = self.lengths > 11 snake_case_ : Dict = self.token_ids[indices] snake_case_ : int = self.lengths[indices] snake_case_ : List[Any] = len(self ) logger.info(F"""Remove {init_size - new_size} too short (<=11 tokens) sequences.""" ) def UpperCAmelCase_ ( self : Union[str, Any] ) -> int: """simple docstring""" if "unk_token" not in self.params.special_tok_ids: return else: snake_case_ : Optional[Any] = self.params.special_tok_ids['unk_token'] snake_case_ : Dict = len(self ) snake_case_ : str = np.array([np.count_nonzero(a == unk_token_id ) for a in self.token_ids] ) snake_case_ : Any = (unk_occs / self.lengths) < 0.5 snake_case_ : List[Any] = self.token_ids[indices] snake_case_ : int = self.lengths[indices] snake_case_ : Tuple = len(self ) logger.info(F"""Remove {init_size - new_size} sequences with a high level of unknown tokens (50%).""" ) def UpperCAmelCase_ ( self : Union[str, Any] ) -> Any: """simple docstring""" if not self.params.is_master: return logger.info(F"""{len(self )} sequences""" ) # data_len = sum(self.lengths) # nb_unique_tokens = len(Counter(list(chain(*self.token_ids)))) # logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)') # unk_idx = self.params.special_tok_ids['unk_token'] # nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids]) # logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)') def UpperCAmelCase_ ( self : Optional[int] , _A : Union[str, Any] ) -> List[Any]: """simple docstring""" snake_case_ : Any = [t[0] for t in batch] snake_case_ : int = [t[1] for t in batch] assert len(_A ) == len(_A ) # Max for paddings snake_case_ : str = max(_A ) # Pad token ids if self.params.mlm: snake_case_ : int = self.params.special_tok_ids['pad_token'] else: snake_case_ : Dict = self.params.special_tok_ids['unk_token'] snake_case_ : Dict = [list(t.astype(_A ) ) + [pad_idx] * (max_seq_len_ - len(_A )) for t in token_ids] assert len(tk_ ) == len(_A ) assert all(len(_A ) == max_seq_len_ for t in tk_ ) snake_case_ : Any = torch.tensor(tk_ ) # (bs, max_seq_len_) snake_case_ : Optional[Any] = torch.tensor(_A ) # (bs) return tk_t, lg_t
327
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _SCREAMING_SNAKE_CASE = { """configuration_time_series_transformer""": [ """TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TimeSeriesTransformerConfig""", ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _SCREAMING_SNAKE_CASE = [ """TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""", """TimeSeriesTransformerForPrediction""", """TimeSeriesTransformerModel""", """TimeSeriesTransformerPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_time_series_transformer import ( TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimeSeriesTransformerConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_time_series_transformer import ( TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TimeSeriesTransformerForPrediction, TimeSeriesTransformerModel, TimeSeriesTransformerPreTrainedModel, ) else: import sys _SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
327
def SCREAMING_SNAKE_CASE__ ( __a , __a ): while b: snake_case_ ,snake_case_ : Any = b, a % b return a def SCREAMING_SNAKE_CASE__ ( __a , __a ): return a if b == 0 else euclidean_gcd_recursive(__a , a % b ) def SCREAMING_SNAKE_CASE__ ( ): print(f"""euclidean_gcd(3, 5) = {euclidean_gcd(3 , 5 )}""" ) print(f"""euclidean_gcd(5, 3) = {euclidean_gcd(5 , 3 )}""" ) print(f"""euclidean_gcd(1, 3) = {euclidean_gcd(1 , 3 )}""" ) print(f"""euclidean_gcd(3, 6) = {euclidean_gcd(3 , 6 )}""" ) print(f"""euclidean_gcd(6, 3) = {euclidean_gcd(6 , 3 )}""" ) print(f"""euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 , 5 )}""" ) print(f"""euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 , 3 )}""" ) print(f"""euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 , 3 )}""" ) print(f"""euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 , 6 )}""" ) print(f"""euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 , 3 )}""" ) if __name__ == "__main__": main()
327
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available, ) _SCREAMING_SNAKE_CASE = { """configuration_perceiver""": ["""PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """PerceiverConfig""", """PerceiverOnnxConfig"""], """tokenization_perceiver""": ["""PerceiverTokenizer"""], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _SCREAMING_SNAKE_CASE = ["""PerceiverFeatureExtractor"""] _SCREAMING_SNAKE_CASE = ["""PerceiverImageProcessor"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _SCREAMING_SNAKE_CASE = [ """PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST""", """PerceiverForImageClassificationConvProcessing""", """PerceiverForImageClassificationFourier""", """PerceiverForImageClassificationLearned""", """PerceiverForMaskedLM""", """PerceiverForMultimodalAutoencoding""", """PerceiverForOpticalFlow""", """PerceiverForSequenceClassification""", """PerceiverLayer""", """PerceiverModel""", """PerceiverPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_perceiver import PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP, PerceiverConfig, PerceiverOnnxConfig from .tokenization_perceiver import PerceiverTokenizer try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_perceiver import PerceiverFeatureExtractor from .image_processing_perceiver import PerceiverImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_perceiver import ( PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST, PerceiverForImageClassificationConvProcessing, PerceiverForImageClassificationFourier, PerceiverForImageClassificationLearned, PerceiverForMaskedLM, PerceiverForMultimodalAutoencoding, PerceiverForOpticalFlow, PerceiverForSequenceClassification, PerceiverLayer, PerceiverModel, PerceiverPreTrainedModel, ) else: import sys _SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
327
import os import torch from ..logging import get_logger from .constants import FSDP_PYTORCH_VERSION, MODEL_NAME, OPTIMIZER_NAME from .versions import is_torch_version if is_torch_version(""">=""", FSDP_PYTORCH_VERSION): import torch.distributed.checkpoint as dist_cp from torch.distributed.checkpoint.default_planner import DefaultLoadPlanner, DefaultSavePlanner from torch.distributed.checkpoint.optimizer import load_sharded_optimizer_state_dict from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType _SCREAMING_SNAKE_CASE = get_logger(__name__) def SCREAMING_SNAKE_CASE__ ( __a , __a , __a , __a , __a=0 ): os.makedirs(__a , exist_ok=__a ) with FSDP.state_dict_type( __a , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ): snake_case_ : Dict = model.state_dict() if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: snake_case_ : Optional[int] = f"""{MODEL_NAME}.bin""" if model_index == 0 else f"""{MODEL_NAME}_{model_index}.bin""" snake_case_ : Dict = os.path.join(__a , __a ) if accelerator.process_index == 0: logger.info(f"""Saving model to {output_model_file}""" ) torch.save(__a , __a ) logger.info(f"""Model saved to {output_model_file}""" ) elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT: snake_case_ : Dict = ( f"""{MODEL_NAME}_rank{accelerator.process_index}.bin""" if model_index == 0 else f"""{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin""" ) snake_case_ : Dict = os.path.join(__a , __a ) logger.info(f"""Saving model to {output_model_file}""" ) torch.save(__a , __a ) logger.info(f"""Model saved to {output_model_file}""" ) elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT: snake_case_ : Optional[int] = os.path.join(__a , f"""{MODEL_NAME}_{model_index}""" ) os.makedirs(__a , exist_ok=__a ) logger.info(f"""Saving model to {ckpt_dir}""" ) snake_case_ : int = {'model': state_dict} dist_cp.save_state_dict( state_dict=__a , storage_writer=dist_cp.FileSystemWriter(__a ) , planner=DefaultSavePlanner() , ) logger.info(f"""Model saved to {ckpt_dir}""" ) def SCREAMING_SNAKE_CASE__ ( __a , __a , __a , __a , __a=0 ): accelerator.wait_for_everyone() with FSDP.state_dict_type( __a , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ): if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: if type(__a ) != FSDP and accelerator.process_index != 0: if not fsdp_plugin.sync_module_states: raise ValueError( 'Set the `sync_module_states` flag to `True` so that model states are synced across processes when ' 'initializing FSDP object' ) return snake_case_ : Optional[int] = f"""{MODEL_NAME}.bin""" if model_index == 0 else f"""{MODEL_NAME}_{model_index}.bin""" snake_case_ : Optional[Any] = os.path.join(__a , __a ) logger.info(f"""Loading model from {input_model_file}""" ) snake_case_ : Optional[Any] = torch.load(__a ) logger.info(f"""Model loaded from {input_model_file}""" ) elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT: snake_case_ : Optional[Any] = ( f"""{MODEL_NAME}_rank{accelerator.process_index}.bin""" if model_index == 0 else f"""{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin""" ) snake_case_ : Tuple = os.path.join(__a , __a ) logger.info(f"""Loading model from {input_model_file}""" ) snake_case_ : Optional[int] = torch.load(__a ) logger.info(f"""Model loaded from {input_model_file}""" ) elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT: snake_case_ : Tuple = ( os.path.join(__a , f"""{MODEL_NAME}_{model_index}""" ) if f"""{MODEL_NAME}""" not in input_dir else input_dir ) logger.info(f"""Loading model from {ckpt_dir}""" ) snake_case_ : List[Any] = {'model': model.state_dict()} dist_cp.load_state_dict( state_dict=__a , storage_reader=dist_cp.FileSystemReader(__a ) , planner=DefaultLoadPlanner() , ) snake_case_ : Any = state_dict['model'] logger.info(f"""Model loaded from {ckpt_dir}""" ) model.load_state_dict(__a ) def SCREAMING_SNAKE_CASE__ ( __a , __a , __a , __a , __a , __a=0 ): os.makedirs(__a , exist_ok=__a ) with FSDP.state_dict_type( __a , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ): snake_case_ : List[str] = FSDP.optim_state_dict(__a , __a ) if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: if accelerator.process_index == 0: snake_case_ : str = ( f"""{OPTIMIZER_NAME}.bin""" if optimizer_index == 0 else f"""{OPTIMIZER_NAME}_{optimizer_index}.bin""" ) snake_case_ : Any = os.path.join(__a , __a ) logger.info(f"""Saving Optimizer state to {output_optimizer_file}""" ) torch.save(__a , __a ) logger.info(f"""Optimizer state saved in {output_optimizer_file}""" ) else: snake_case_ : Optional[int] = os.path.join(__a , f"""{OPTIMIZER_NAME}_{optimizer_index}""" ) os.makedirs(__a , exist_ok=__a ) logger.info(f"""Saving Optimizer state to {ckpt_dir}""" ) dist_cp.save_state_dict( state_dict={'optimizer': optim_state} , storage_writer=dist_cp.FileSystemWriter(__a ) , planner=DefaultSavePlanner() , ) logger.info(f"""Optimizer state saved in {ckpt_dir}""" ) def SCREAMING_SNAKE_CASE__ ( __a , __a , __a , __a , __a , __a=0 ): accelerator.wait_for_everyone() with FSDP.state_dict_type( __a , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ): if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: snake_case_ : Optional[Any] = None # below check should work but currently it isn't working (mostly opytorch issue), # in the meantime disabling it at the cost of excess memory usage # if accelerator.process_index == 0 or not fsdp_plugin.optim_state_dict_config.rank0_only: snake_case_ : Union[str, Any] = ( f"""{OPTIMIZER_NAME}.bin""" if optimizer_index == 0 else f"""{OPTIMIZER_NAME}_{optimizer_index}.bin""" ) snake_case_ : List[Any] = os.path.join(__a , __a ) logger.info(f"""Loading Optimizer state from {input_optimizer_file}""" ) snake_case_ : Optional[int] = torch.load(__a ) logger.info(f"""Optimizer state loaded from {input_optimizer_file}""" ) else: snake_case_ : str = ( os.path.join(__a , f"""{OPTIMIZER_NAME}_{optimizer_index}""" ) if f"""{OPTIMIZER_NAME}""" not in input_dir else input_dir ) logger.info(f"""Loading Optimizer from {ckpt_dir}""" ) snake_case_ : Any = load_sharded_optimizer_state_dict( model_state_dict=model.state_dict() , optimizer_key='optimizer' , storage_reader=dist_cp.FileSystemReader(__a ) , ) snake_case_ : Optional[int] = optim_state['optimizer'] logger.info(f"""Optimizer loaded from {ckpt_dir}""" ) snake_case_ : Optional[Any] = FSDP.optim_state_dict_to_load(__a , __a , __a ) optimizer.load_state_dict(__a )
327
1
import argparse import os import re import numpy as np import PIL import torch from timm import create_model from torch.optim.lr_scheduler import OneCycleLR from torch.utils.data import DataLoader, Dataset from torchvision.transforms import Compose, RandomResizedCrop, Resize, ToTensor from accelerate import Accelerator def SCREAMING_SNAKE_CASE__ ( __a ): snake_case_ : List[Any] = fname.split(os.path.sep )[-1] return re.search(r'^(.*)_\d+\.jpg$' , __a ).groups()[0] class SCREAMING_SNAKE_CASE_ ( snake_case_ ): def __init__( self : List[str] , _A : str , _A : Union[str, Any]=None , _A : str=None ) -> str: """simple docstring""" snake_case_ : int = file_names snake_case_ : List[str] = image_transform snake_case_ : int = label_to_id def __len__( self : int ) -> Optional[Any]: """simple docstring""" return len(self.file_names ) def __getitem__( self : List[Any] , _A : Union[str, Any] ) -> Union[str, Any]: """simple docstring""" snake_case_ : Union[str, Any] = self.file_names[idx] snake_case_ : Optional[int] = PIL.Image.open(_A ) snake_case_ : int = raw_image.convert('RGB' ) if self.image_transform is not None: snake_case_ : Any = self.image_transform(_A ) snake_case_ : List[str] = extract_label(_A ) if self.label_to_id is not None: snake_case_ : Optional[int] = self.label_to_id[label] return {"image": image, "label": label} def SCREAMING_SNAKE_CASE__ ( __a , __a ): # Initialize accelerator if args.with_tracking: snake_case_ : str = Accelerator( cpu=args.cpu , mixed_precision=args.mixed_precision , log_with='all' , project_dir=args.project_dir ) else: snake_case_ : int = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs snake_case_ : Optional[Any] = config['lr'] snake_case_ : Optional[int] = int(config['num_epochs'] ) snake_case_ : int = int(config['seed'] ) snake_case_ : Union[str, Any] = int(config['batch_size'] ) snake_case_ : List[str] = config['image_size'] if not isinstance(__a , (list, tuple) ): snake_case_ : int = (image_size, image_size) # Parse out whether we are saving every epoch or after a certain number of batches if hasattr(args.checkpointing_steps , 'isdigit' ): if args.checkpointing_steps == "epoch": snake_case_ : str = args.checkpointing_steps elif args.checkpointing_steps.isdigit(): snake_case_ : Tuple = int(args.checkpointing_steps ) else: raise ValueError( f"""Argument `checkpointing_steps` must be either a number or `epoch`. `{args.checkpointing_steps}` passed.""" ) else: snake_case_ : Tuple = None # We need to initialize the trackers we use, and also store our configuration if args.with_tracking: snake_case_ : Optional[int] = os.path.split(__a )[-1].split('.' )[0] accelerator.init_trackers(__a , __a ) # Grab all the image filenames snake_case_ : Optional[Any] = [os.path.join(args.data_dir , __a ) for fname in os.listdir(args.data_dir ) if fname.endswith('.jpg' )] # Build the label correspondences snake_case_ : Dict = [extract_label(__a ) for fname in file_names] snake_case_ : Any = list(set(__a ) ) id_to_label.sort() snake_case_ : Any = {lbl: i for i, lbl in enumerate(__a )} # Set the seed before splitting the data. np.random.seed(__a ) torch.manual_seed(__a ) torch.cuda.manual_seed_all(__a ) # Split our filenames between train and validation snake_case_ : Optional[Any] = np.random.permutation(len(__a ) ) snake_case_ : Dict = int(0.8 * len(__a ) ) snake_case_ : Dict = random_perm[:cut] snake_case_ : Any = random_perm[cut:] # For training we use a simple RandomResizedCrop snake_case_ : int = Compose([RandomResizedCrop(__a , scale=(0.5, 1.0) ), ToTensor()] ) snake_case_ : Tuple = PetsDataset( [file_names[i] for i in train_split] , image_transform=__a , label_to_id=__a ) # For evaluation, we use a deterministic Resize snake_case_ : Optional[int] = Compose([Resize(__a ), ToTensor()] ) snake_case_ : List[str] = PetsDataset([file_names[i] for i in eval_split] , image_transform=__a , label_to_id=__a ) # Instantiate dataloaders. snake_case_ : Optional[Any] = DataLoader(__a , shuffle=__a , batch_size=__a , num_workers=4 ) snake_case_ : Optional[int] = DataLoader(__a , shuffle=__a , batch_size=__a , num_workers=4 ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) snake_case_ : Dict = create_model('resnet50d' , pretrained=__a , num_classes=len(__a ) ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). snake_case_ : List[str] = model.to(accelerator.device ) # Freezing the base model for param in model.parameters(): snake_case_ : Optional[int] = False for param in model.get_classifier().parameters(): snake_case_ : str = True # We normalize the batches of images to be a bit faster. snake_case_ : str = torch.tensor(model.default_cfg['mean'] )[None, :, None, None].to(accelerator.device ) snake_case_ : Union[str, Any] = torch.tensor(model.default_cfg['std'] )[None, :, None, None].to(accelerator.device ) # Instantiate optimizer snake_case_ : Optional[Any] = torch.optim.Adam(params=model.parameters() , lr=lr / 25 ) # Instantiate learning rate scheduler snake_case_ : List[str] = OneCycleLR(optimizer=__a , max_lr=__a , epochs=__a , steps_per_epoch=len(__a ) ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. snake_case_ ,snake_case_ ,snake_case_ ,snake_case_ ,snake_case_ : List[str] = accelerator.prepare( __a , __a , __a , __a , __a ) # We need to keep track of how many total steps we have iterated over snake_case_ : str = 0 # We also need to keep track of the starting epoch so files are named properly snake_case_ : Dict = 0 # Potentially load in the weights and states from a previous save if args.resume_from_checkpoint: if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != "": accelerator.print(f"""Resumed from checkpoint: {args.resume_from_checkpoint}""" ) accelerator.load_state(args.resume_from_checkpoint ) snake_case_ : List[Any] = os.path.basename(args.resume_from_checkpoint ) else: # Get the most recent checkpoint snake_case_ : List[Any] = [f.name for f in os.scandir(os.getcwd() ) if f.is_dir()] dirs.sort(key=os.path.getctime ) snake_case_ : Dict = dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last # Extract `epoch_{i}` or `step_{i}` snake_case_ : Optional[Any] = os.path.splitext(__a )[0] if "epoch" in training_difference: snake_case_ : Tuple = int(training_difference.replace('epoch_' , '' ) ) + 1 snake_case_ : Any = None else: snake_case_ : Optional[Any] = int(training_difference.replace('step_' , '' ) ) snake_case_ : str = resume_step // len(__a ) resume_step -= starting_epoch * len(__a ) # Now we train the model for epoch in range(__a , __a ): model.train() if args.with_tracking: snake_case_ : Union[str, Any] = 0 if args.resume_from_checkpoint and epoch == starting_epoch and resume_step is not None: # We need to skip steps until we reach the resumed step snake_case_ : List[Any] = accelerator.skip_first_batches(__a , __a ) overall_step += resume_step else: # After the first iteration though, we need to go back to the original dataloader snake_case_ : List[str] = train_dataloader for batch in active_dataloader: # We could avoid this line since we set the accelerator with `device_placement=True`. snake_case_ : Tuple = {k: v.to(accelerator.device ) for k, v in batch.items()} snake_case_ : Union[str, Any] = (batch['image'] - mean) / std snake_case_ : Optional[int] = model(__a ) snake_case_ : Optional[Any] = torch.nn.functional.cross_entropy(__a , batch['label'] ) # We keep track of the loss at each epoch if args.with_tracking: total_loss += loss.detach().float() accelerator.backward(__a ) optimizer.step() lr_scheduler.step() optimizer.zero_grad() overall_step += 1 if isinstance(__a , __a ): snake_case_ : Tuple = f"""step_{overall_step}""" if overall_step % checkpointing_steps == 0: if args.output_dir is not None: snake_case_ : Optional[Any] = os.path.join(args.output_dir , __a ) accelerator.save_state(__a ) model.eval() snake_case_ : str = 0 snake_case_ : int = 0 for step, batch in enumerate(__a ): # We could avoid this line since we set the accelerator with `device_placement=True`. snake_case_ : Optional[int] = {k: v.to(accelerator.device ) for k, v in batch.items()} snake_case_ : str = (batch['image'] - mean) / std with torch.no_grad(): snake_case_ : Tuple = model(__a ) snake_case_ : int = outputs.argmax(dim=-1 ) snake_case_ ,snake_case_ : Any = accelerator.gather_for_metrics((predictions, batch['label']) ) snake_case_ : Dict = predictions == references num_elems += accurate_preds.shape[0] accurate += accurate_preds.long().sum() snake_case_ : Optional[int] = accurate.item() / num_elems # Use accelerator.print to print only on the main process. accelerator.print(f"""epoch {epoch}: {1_00 * eval_metric:.2f}""" ) if args.with_tracking: accelerator.log( { 'accuracy': 1_00 * eval_metric, 'train_loss': total_loss.item() / len(__a ), 'epoch': epoch, } , step=__a , ) if checkpointing_steps == "epoch": snake_case_ : List[str] = f"""epoch_{epoch}""" if args.output_dir is not None: snake_case_ : Any = os.path.join(args.output_dir , __a ) accelerator.save_state(__a ) if args.with_tracking: accelerator.end_training() def SCREAMING_SNAKE_CASE__ ( ): snake_case_ : List[Any] = argparse.ArgumentParser(description='Simple example of training script.' ) parser.add_argument('--data_dir' , required=__a , help='The data folder on disk.' ) parser.add_argument('--fp16' , action='store_true' , help='If passed, will use FP16 training.' ) parser.add_argument( '--mixed_precision' , type=__a , default=__a , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose' 'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.' 'and an Nvidia Ampere GPU.' , ) parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' ) parser.add_argument( '--checkpointing_steps' , type=__a , default=__a , help='Whether the various states should be saved at the end of every n steps, or \'epoch\' for each epoch.' , ) parser.add_argument( '--output_dir' , type=__a , default='.' , help='Optional save directory where all checkpoint folders will be stored. Default is the current working directory.' , ) parser.add_argument( '--resume_from_checkpoint' , type=__a , default=__a , help='If the training should continue from a checkpoint folder.' , ) parser.add_argument( '--with_tracking' , action='store_true' , help='Whether to load in all available experiment trackers from the environment and use them for logging.' , ) parser.add_argument( '--project_dir' , type=__a , default='logs' , help='Location on where to store experiment tracking logs` and relevent project information' , ) snake_case_ : Dict = parser.parse_args() snake_case_ : str = {'lr': 3E-2, 'num_epochs': 3, 'seed': 42, 'batch_size': 64, 'image_size': 2_24} training_function(__a , __a ) if __name__ == "__main__": main()
327
import unittest from typing import Dict, List, Optional, Union import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import BridgeTowerImageProcessor class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ): def __init__( self : List[str] , _A : List[Any] , _A : bool = True , _A : Dict[str, int] = None , _A : int = 32 , _A : bool = True , _A : Union[int, float] = 1 / 255 , _A : bool = True , _A : bool = True , _A : Optional[Union[float, List[float]]] = [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3] , _A : Optional[Union[float, List[float]]] = [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1] , _A : bool = True , _A : Tuple=7 , _A : Tuple=30 , _A : int=400 , _A : Tuple=3 , ) -> Optional[int]: """simple docstring""" snake_case_ : str = parent snake_case_ : str = do_resize snake_case_ : str = size if size is not None else {'shortest_edge': 288} snake_case_ : Any = size_divisor snake_case_ : Any = do_rescale snake_case_ : Union[str, Any] = rescale_factor snake_case_ : str = do_normalize snake_case_ : int = do_center_crop snake_case_ : str = image_mean snake_case_ : int = image_std snake_case_ : Any = do_pad snake_case_ : Optional[int] = batch_size snake_case_ : List[str] = num_channels snake_case_ : Any = min_resolution snake_case_ : str = max_resolution def UpperCAmelCase_ ( self : Tuple ) -> Optional[int]: """simple docstring""" return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "size": self.size, "size_divisor": self.size_divisor, } def UpperCAmelCase_ ( self : Dict , _A : str , _A : Union[str, Any]=False ) -> int: """simple docstring""" if not batched: snake_case_ : Optional[int] = self.size['shortest_edge'] snake_case_ : List[Any] = image_inputs[0] if isinstance(_A , Image.Image ): snake_case_ ,snake_case_ : Optional[Any] = image.size else: snake_case_ ,snake_case_ : str = image.shape[1], image.shape[2] snake_case_ : Dict = size / min(_A , _A ) if h < w: snake_case_ ,snake_case_ : str = size, scale * w else: snake_case_ ,snake_case_ : Tuple = scale * h, size snake_case_ : Dict = int((1333 / 800) * size ) if max(_A , _A ) > max_size: snake_case_ : Union[str, Any] = max_size / max(_A , _A ) snake_case_ : Any = newh * scale snake_case_ : Union[str, Any] = neww * scale snake_case_ ,snake_case_ : Any = int(newh + 0.5 ), int(neww + 0.5 ) snake_case_ ,snake_case_ : int = ( newh // self.size_divisor * self.size_divisor, neww // self.size_divisor * self.size_divisor, ) else: snake_case_ : Optional[int] = [] for image in image_inputs: snake_case_ ,snake_case_ : Optional[int] = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) snake_case_ : str = max(_A , key=lambda _A : item[0] )[0] snake_case_ : List[str] = max(_A , key=lambda _A : item[1] )[1] return expected_height, expected_width @require_torch @require_vision class SCREAMING_SNAKE_CASE_ ( snake_case_ , unittest.TestCase ): __magic_name__: List[Any] = BridgeTowerImageProcessor if is_vision_available() else None def UpperCAmelCase_ ( self : List[Any] ) -> Tuple: """simple docstring""" snake_case_ : int = BridgeTowerImageProcessingTester(self ) @property def UpperCAmelCase_ ( self : int ) -> Any: """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def UpperCAmelCase_ ( self : Union[str, Any] ) -> Dict: """simple docstring""" snake_case_ : Optional[Any] = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_A , 'image_mean' ) ) self.assertTrue(hasattr(_A , 'image_std' ) ) self.assertTrue(hasattr(_A , 'do_normalize' ) ) self.assertTrue(hasattr(_A , 'do_resize' ) ) self.assertTrue(hasattr(_A , 'size' ) ) self.assertTrue(hasattr(_A , 'size_divisor' ) ) def UpperCAmelCase_ ( self : Optional[int] ) -> List[str]: """simple docstring""" pass def UpperCAmelCase_ ( self : Tuple ) -> Tuple: """simple docstring""" snake_case_ : Optional[Any] = self.image_processing_class(**self.image_processor_dict ) # create random PIL images snake_case_ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A ) for image in image_inputs: self.assertIsInstance(_A , Image.Image ) # Test not batched input snake_case_ : Dict = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values snake_case_ ,snake_case_ : Optional[Any] = self.image_processor_tester.get_expected_values(_A ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched snake_case_ : List[str] = image_processing(_A , return_tensors='pt' ).pixel_values snake_case_ ,snake_case_ : Optional[Any] = self.image_processor_tester.get_expected_values(_A , batched=_A ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def UpperCAmelCase_ ( self : Union[str, Any] ) -> Any: """simple docstring""" snake_case_ : int = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors snake_case_ : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , numpify=_A ) for image in image_inputs: self.assertIsInstance(_A , np.ndarray ) # Test not batched input snake_case_ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values snake_case_ ,snake_case_ : Tuple = self.image_processor_tester.get_expected_values(_A ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched snake_case_ : Any = image_processing(_A , return_tensors='pt' ).pixel_values snake_case_ ,snake_case_ : Any = self.image_processor_tester.get_expected_values(_A , batched=_A ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def UpperCAmelCase_ ( self : Dict ) -> List[str]: """simple docstring""" snake_case_ : Optional[Any] = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors snake_case_ : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , torchify=_A ) for image in image_inputs: self.assertIsInstance(_A , torch.Tensor ) # Test not batched input snake_case_ : Optional[int] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values snake_case_ ,snake_case_ : Optional[Any] = self.image_processor_tester.get_expected_values(_A ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched snake_case_ : str = image_processing(_A , return_tensors='pt' ).pixel_values snake_case_ ,snake_case_ : Tuple = self.image_processor_tester.get_expected_values(_A , batched=_A ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , )
327
1
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import center_crop, normalize, rescale, resize, to_channel_dimension_format from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): import PIL _SCREAMING_SNAKE_CASE = logging.get_logger(__name__) class SCREAMING_SNAKE_CASE_ ( snake_case_ ): __magic_name__: Optional[Any] = ["pixel_values"] def __init__( self : str , _A : bool = True , _A : Dict[str, int] = None , _A : PILImageResampling = PIL.Image.BICUBIC , _A : bool = True , _A : Dict[str, int] = None , _A : Union[int, float] = 1 / 255 , _A : bool = True , _A : bool = True , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[float, List[float]]] = None , **_A : str , ) -> None: """simple docstring""" super().__init__(**_A ) snake_case_ : Dict = size if size is not None else {'height': 256, 'width': 256} snake_case_ : Tuple = get_size_dict(_A ) snake_case_ : str = crop_size if crop_size is not None else {'height': 224, 'width': 224} snake_case_ : int = get_size_dict(_A , param_name='crop_size' ) snake_case_ : Union[str, Any] = do_resize snake_case_ : str = size snake_case_ : List[str] = resample snake_case_ : List[Any] = do_center_crop snake_case_ : Dict = crop_size snake_case_ : Tuple = do_rescale snake_case_ : Optional[Any] = rescale_factor snake_case_ : Any = do_normalize snake_case_ : Any = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN snake_case_ : Optional[int] = image_std if image_std is not None else IMAGENET_STANDARD_STD def UpperCAmelCase_ ( self : Optional[int] , _A : np.ndarray , _A : Dict[str, int] , _A : PILImageResampling = PIL.Image.BICUBIC , _A : Optional[Union[str, ChannelDimension]] = None , **_A : List[str] , ) -> np.ndarray: """simple docstring""" snake_case_ : Tuple = get_size_dict(_A ) if "height" not in size or "width" not in size: raise ValueError(F"""The size dictionary must have keys 'height' and 'width'. Got {size.keys()}""" ) return resize( _A , size=(size['height'], size['width']) , resample=_A , data_format=_A , **_A ) def UpperCAmelCase_ ( self : int , _A : np.ndarray , _A : Dict[str, int] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Optional[Any] , ) -> np.ndarray: """simple docstring""" snake_case_ : Optional[int] = get_size_dict(_A ) if "height" not in size or "width" not in size: raise ValueError(F"""The size dictionary must have keys 'height' and 'width'. Got {size.keys()}""" ) return center_crop(_A , size=(size['height'], size['width']) , data_format=_A , **_A ) def UpperCAmelCase_ ( self : Dict , _A : np.ndarray , _A : Union[int, float] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : str , ) -> str: """simple docstring""" return rescale(_A , scale=_A , data_format=_A , **_A ) def UpperCAmelCase_ ( self : Any , _A : np.ndarray , _A : Union[float, List[float]] , _A : Union[float, List[float]] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Tuple , ) -> np.ndarray: """simple docstring""" return normalize(_A , mean=_A , std=_A , data_format=_A , **_A ) def UpperCAmelCase_ ( self : List[str] , _A : ImageInput , _A : bool = None , _A : Dict[str, int] = None , _A : Union[str, Any]=None , _A : bool = None , _A : Dict[str, int] = None , _A : bool = None , _A : float = None , _A : bool = None , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[str, TensorType]] = None , _A : ChannelDimension = ChannelDimension.FIRST , **_A : int , ) -> PIL.Image.Image: """simple docstring""" snake_case_ : int = do_resize if do_resize is not None else self.do_resize snake_case_ : str = resample if resample is not None else self.resample snake_case_ : Any = do_center_crop if do_center_crop is not None else self.do_center_crop snake_case_ : List[str] = do_rescale if do_rescale is not None else self.do_rescale snake_case_ : Any = rescale_factor if rescale_factor is not None else self.rescale_factor snake_case_ : List[str] = do_normalize if do_normalize is not None else self.do_normalize snake_case_ : Any = image_mean if image_mean is not None else self.image_mean snake_case_ : Dict = image_std if image_std is not None else self.image_std snake_case_ : int = size if size is not None else self.size snake_case_ : Optional[int] = get_size_dict(_A ) snake_case_ : int = crop_size if crop_size is not None else self.crop_size snake_case_ : Any = get_size_dict(_A , param_name='crop_size' ) snake_case_ : Optional[Any] = make_list_of_images(_A ) if not valid_images(_A ): raise ValueError( 'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ' 'torch.Tensor, tf.Tensor or jax.ndarray.' ) if do_resize and size is None or resample is None: raise ValueError('Size and resample must be specified if do_resize is True.' ) if do_center_crop and crop_size is None: raise ValueError('Crop size must be specified if do_center_crop is True.' ) if do_rescale and rescale_factor is None: raise ValueError('Rescale factor must be specified if do_rescale is True.' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('Image mean and std must be specified if do_normalize is True.' ) # All transformations expect numpy arrays. snake_case_ : Optional[Any] = [to_numpy_array(_A ) for image in images] if do_resize: snake_case_ : Dict = [self.resize(image=_A , size=_A , resample=_A ) for image in images] if do_center_crop: snake_case_ : Optional[Any] = [self.center_crop(image=_A , size=_A ) for image in images] if do_rescale: snake_case_ : Optional[int] = [self.rescale(image=_A , scale=_A ) for image in images] if do_normalize: snake_case_ : str = [self.normalize(image=_A , mean=_A , std=_A ) for image in images] snake_case_ : Dict = [to_channel_dimension_format(_A , _A ) for image in images] snake_case_ : Tuple = {'pixel_values': images} return BatchFeature(data=_A , tensor_type=_A )
327
import json import os import tempfile import transformers import datasets from utils import generate_example_dataset, get_duration _SCREAMING_SNAKE_CASE = 50_00_00 _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = os.path.split(__file__) _SCREAMING_SNAKE_CASE = os.path.join(RESULTS_BASEPATH, """results""", RESULTS_FILENAME.replace(""".py""", """.json""")) @get_duration def SCREAMING_SNAKE_CASE__ ( __a , **__a ): snake_case_ : int = dataset.map(**__a ) @get_duration def SCREAMING_SNAKE_CASE__ ( __a , **__a ): snake_case_ : Dict = dataset.filter(**__a ) def SCREAMING_SNAKE_CASE__ ( ): snake_case_ : Tuple = {'num examples': SPEED_TEST_N_EXAMPLES} with tempfile.TemporaryDirectory() as tmp_dir: snake_case_ : Dict = datasets.Features({'text': datasets.Value('string' ), 'numbers': datasets.Value('float32' )} ) snake_case_ : List[Any] = generate_example_dataset( os.path.join(__a , 'dataset.arrow' ) , __a , num_examples=__a ) snake_case_ : str = transformers.AutoTokenizer.from_pretrained('bert-base-cased' , use_fast=__a ) def tokenize(__a ): return tokenizer(examples['text'] ) snake_case_ : Any = map(__a ) snake_case_ : Tuple = map(__a , batched=__a ) snake_case_ : str = map(__a , function=lambda __a : None , batched=__a ) with dataset.formatted_as(type='numpy' ): snake_case_ : Optional[int] = map(__a , function=lambda __a : None , batched=__a ) with dataset.formatted_as(type='pandas' ): snake_case_ : str = map(__a , function=lambda __a : None , batched=__a ) with dataset.formatted_as(type='torch' , columns='numbers' ): snake_case_ : int = map(__a , function=lambda __a : None , batched=__a ) with dataset.formatted_as(type='tensorflow' , columns='numbers' ): snake_case_ : List[Any] = map(__a , function=lambda __a : None , batched=__a ) snake_case_ : int = map(__a , function=__a , batched=__a ) snake_case_ : Optional[Any] = filter(__a ) # Activate later when tokenizer support batched inputs # with dataset.formatted_as(type='numpy'): # times[func.__name__ + " fast-tokenizer batched numpy"] = func(dataset, function=tokenize, batched=True) with open(__a , 'wb' ) as f: f.write(json.dumps(__a ).encode('utf-8' ) ) if __name__ == "__main__": # useful to run the profiler benchmark_map_filter()
327
1
from random import randint, random def SCREAMING_SNAKE_CASE__ ( __a , __a , __a , __a = False , __a = False , __a = 5 , ): snake_case_ : Union[str, Any] = [[-1] * number_of_cells] # Create a highway without any car snake_case_ : List[str] = 0 snake_case_ : Dict = max(__a , 0 ) while i < number_of_cells: snake_case_ : Dict = ( randint(0 , __a ) if random_speed else initial_speed ) # Place the cars i += ( randint(1 , max_speed * 2 ) if random_frequency else frequency ) # Arbitrary number, may need tuning return highway def SCREAMING_SNAKE_CASE__ ( __a , __a ): snake_case_ : Optional[Any] = 0 snake_case_ : Tuple = highway_now[car_index + 1 :] for cell in range(len(__a ) ): # May need a better name for this if cells[cell] != -1: # If the cell is not empty then return distance # we have the distance we wanted distance += 1 # Here if the car is near the end of the highway return distance + get_distance(__a , -1 ) def SCREAMING_SNAKE_CASE__ ( __a , __a , __a ): snake_case_ : Dict = len(__a ) # Beforce calculations, the highway is empty snake_case_ : int = [-1] * number_of_cells for car_index in range(__a ): if highway_now[car_index] != -1: # Add 1 to the current speed of the car and cap the speed snake_case_ : str = min(highway_now[car_index] + 1 , __a ) # Number of empty cell before the next car snake_case_ : str = get_distance(__a , __a ) - 1 # We can't have the car causing an accident snake_case_ : str = min(next_highway[car_index] , __a ) if random() < probability: # Randomly, a driver will slow down snake_case_ : Union[str, Any] = max(next_highway[car_index] - 1 , 0 ) return next_highway def SCREAMING_SNAKE_CASE__ ( __a , __a , __a , __a ): snake_case_ : str = len(highway[0] ) for i in range(__a ): snake_case_ : List[Any] = update(highway[i] , __a , __a ) snake_case_ : Tuple = [-1] * number_of_cells for car_index in range(__a ): snake_case_ : List[Any] = next_speeds_calculated[car_index] if speed != -1: # Change the position based on the speed (with % to create the loop) snake_case_ : Tuple = (car_index + speed) % number_of_cells # Commit the change of position snake_case_ : Tuple = speed highway.append(__a ) return highway if __name__ == "__main__": import doctest doctest.testmod()
327
from collections import namedtuple import requests from lxml import html # type: ignore _SCREAMING_SNAKE_CASE = namedtuple("""covid_data""", """cases deaths recovered""") def SCREAMING_SNAKE_CASE__ ( __a = "https://www.worldometers.info/coronavirus/" ): snake_case_ : Union[str, Any] = '//div[@class = "maincounter-number"]/span/text()' return covid_data(*html.fromstring(requests.get(__a ).content ).xpath(__a ) ) _SCREAMING_SNAKE_CASE = """Total COVID-19 cases in the world: {} Total deaths due to COVID-19 in the world: {} Total COVID-19 patients recovered in the world: {}""" print(fmt.format(*covid_stats()))
327
1
from __future__ import annotations from functools import lru_cache from math import ceil _SCREAMING_SNAKE_CASE = 1_00 _SCREAMING_SNAKE_CASE = set(range(3, NUM_PRIMES, 2)) primes.add(2) _SCREAMING_SNAKE_CASE = 42 for prime in range(3, ceil(NUM_PRIMES**0.5), 2): if prime not in primes: continue primes.difference_update(set(range(prime * prime, NUM_PRIMES, prime))) @lru_cache(maxsize=1_00 ) def SCREAMING_SNAKE_CASE__ ( __a ): if number_to_partition < 0: return set() elif number_to_partition == 0: return {1} snake_case_ : set[int] = set() snake_case_ : int snake_case_ : int for prime in primes: if prime > number_to_partition: continue for sub in partition(number_to_partition - prime ): ret.add(sub * prime ) return ret def SCREAMING_SNAKE_CASE__ ( __a = 50_00 ): for number_to_partition in range(1 , __a ): if len(partition(__a ) ) > number_unique_partitions: return number_to_partition return None if __name__ == "__main__": print(F'''{solution() = }''')
327
import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from .tokenization_lxmert import LxmertTokenizer _SCREAMING_SNAKE_CASE = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""} _SCREAMING_SNAKE_CASE = { """vocab_file""": { """unc-nlp/lxmert-base-uncased""": """https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/vocab.txt""", }, """tokenizer_file""": { """unc-nlp/lxmert-base-uncased""": ( """https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/tokenizer.json""" ), }, } _SCREAMING_SNAKE_CASE = { """unc-nlp/lxmert-base-uncased""": 5_12, } _SCREAMING_SNAKE_CASE = { """unc-nlp/lxmert-base-uncased""": {"""do_lower_case""": True}, } class SCREAMING_SNAKE_CASE_ ( snake_case_ ): __magic_name__: List[Any] = VOCAB_FILES_NAMES __magic_name__: List[str] = PRETRAINED_VOCAB_FILES_MAP __magic_name__: List[str] = PRETRAINED_INIT_CONFIGURATION __magic_name__: Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __magic_name__: Union[str, Any] = LxmertTokenizer def __init__( self : List[str] , _A : Union[str, Any]=None , _A : Optional[Any]=None , _A : Dict=True , _A : Dict="[UNK]" , _A : Optional[int]="[SEP]" , _A : Dict="[PAD]" , _A : Union[str, Any]="[CLS]" , _A : str="[MASK]" , _A : Tuple=True , _A : Dict=None , **_A : List[Any] , ) -> Optional[int]: """simple docstring""" super().__init__( _A , tokenizer_file=_A , do_lower_case=_A , unk_token=_A , sep_token=_A , pad_token=_A , cls_token=_A , mask_token=_A , tokenize_chinese_chars=_A , strip_accents=_A , **_A , ) snake_case_ : Optional[int] = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get('lowercase' , _A ) != do_lower_case or normalizer_state.get('strip_accents' , _A ) != strip_accents or normalizer_state.get('handle_chinese_chars' , _A ) != tokenize_chinese_chars ): snake_case_ : Tuple = getattr(_A , normalizer_state.pop('type' ) ) snake_case_ : Union[str, Any] = do_lower_case snake_case_ : int = strip_accents snake_case_ : Optional[Any] = tokenize_chinese_chars snake_case_ : List[Any] = normalizer_class(**_A ) snake_case_ : Tuple = do_lower_case def UpperCAmelCase_ ( self : Dict , _A : Any , _A : List[Any]=None ) -> Dict: """simple docstring""" snake_case_ : Optional[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def UpperCAmelCase_ ( self : Optional[Any] , _A : List[int] , _A : Optional[List[int]] = None ) -> List[int]: """simple docstring""" snake_case_ : str = [self.sep_token_id] snake_case_ : Dict = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def UpperCAmelCase_ ( self : Optional[int] , _A : str , _A : Optional[str] = None ) -> Tuple[str]: """simple docstring""" snake_case_ : Union[str, Any] = self._tokenizer.model.save(_A , name=_A ) return tuple(_A )
327
1
import collections import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging _SCREAMING_SNAKE_CASE = logging.get_logger(__name__) _SCREAMING_SNAKE_CASE = """▁""" _SCREAMING_SNAKE_CASE = {"""vocab_file""": """prophetnet.tokenizer"""} _SCREAMING_SNAKE_CASE = { """vocab_file""": { """microsoft/xprophetnet-large-wiki100-cased""": ( """https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/prophetnet.tokenizer""" ), } } _SCREAMING_SNAKE_CASE = { """microsoft/xprophetnet-large-wiki100-cased""": {"""do_lower_case""": False}, } _SCREAMING_SNAKE_CASE = { """microsoft/xprophetnet-large-wiki100-cased""": 5_12, } def SCREAMING_SNAKE_CASE__ ( __a ): snake_case_ : Optional[Any] = collections.OrderedDict() with open(__a , 'r' , encoding='utf-8' ) as reader: snake_case_ : int = reader.readlines() for index, token in enumerate(__a ): snake_case_ : Optional[int] = token.rstrip('\n' ) snake_case_ : List[Any] = index return vocab class SCREAMING_SNAKE_CASE_ ( snake_case_ ): __magic_name__: int = VOCAB_FILES_NAMES __magic_name__: str = PRETRAINED_VOCAB_FILES_MAP __magic_name__: Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __magic_name__: Any = ["input_ids", "attention_mask"] def __init__( self : Optional[Any] , _A : Union[str, Any] , _A : Tuple="[SEP]" , _A : List[Any]="[SEP]" , _A : str="[SEP]" , _A : Any="[UNK]" , _A : List[str]="[PAD]" , _A : List[Any]="[CLS]" , _A : int="[MASK]" , _A : Optional[Dict[str, Any]] = None , **_A : Dict , ) -> None: """simple docstring""" snake_case_ : int = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=_A , eos_token=_A , sep_token=_A , unk_token=_A , pad_token=_A , cls_token=_A , mask_token=_A , sp_model_kwargs=self.sp_model_kwargs , **_A , ) try: import sentencepiece as spm except ImportError: logger.warning( 'You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece' ' pip install sentencepiece' ) raise snake_case_ : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(_A ) ) snake_case_ : Optional[Any] = vocab_file # Original fairseq vocab and spm vocab must be "aligned": # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 # -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ---- # fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-' # spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a' # put special tokens and [unused] tokens into the vocab snake_case_ : Optional[Any] = {'[PAD]': 0, '[CLS]': 1, '[SEP]': 2, '[UNK]': 3, '[MASK]': 4} for i in range(10 ): snake_case_ : Optional[int] = F"""[unused{i}]""" snake_case_ : str = 5 + i # The first "real" token "," has position 15 in the embedding vocab and position 3 in the spm vocab snake_case_ : Union[str, Any] = 12 snake_case_ : int = {v: k for k, v in self.fairseq_tokens_to_ids.items()} for k in self.fairseq_tokens_to_ids.keys(): self.unique_no_split_tokens.append(_A ) def __getstate__( self : Optional[int] ) -> Dict: """simple docstring""" snake_case_ : Any = self.__dict__.copy() snake_case_ : Union[str, Any] = None return state def __setstate__( self : List[str] , _A : Dict ) -> int: """simple docstring""" snake_case_ : Any = d try: import sentencepiece as spm except ImportError: logger.warning( 'You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece' ' pip install sentencepiece' ) raise # for backward compatibility if not hasattr(self , 'sp_model_kwargs' ): snake_case_ : Optional[int] = {} snake_case_ : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def UpperCAmelCase_ ( self : Optional[int] , _A : List[int] , _A : Optional[List[int]] = None , _A : bool = False ) -> List[int]: """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_A , token_ids_a=_A , already_has_special_tokens=_A ) if token_ids_a is None: return ([0] * len(_A )) + [1] return ([0] * len(_A )) + [1] + ([0] * len(_A )) + [1] def UpperCAmelCase_ ( self : List[str] , _A : List[int] , _A : Optional[List[int]] = None ) -> List[int]: """simple docstring""" snake_case_ : Optional[Any] = [self.sep_token_id] if token_ids_a is None: return len(token_ids_a + sep ) * [0] return len(token_ids_a + sep + sep + token_ids_a + sep ) * [0] @property def UpperCAmelCase_ ( self : Dict ) -> int: """simple docstring""" return len(self.sp_model ) + self.fairseq_offset def UpperCAmelCase_ ( self : List[Any] ) -> Optional[int]: """simple docstring""" snake_case_ : int = {self.convert_ids_to_tokens(_A ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def UpperCAmelCase_ ( self : List[Any] , _A : str ) -> str: """simple docstring""" return self.sp_model.encode(_A , out_type=_A ) def UpperCAmelCase_ ( self : Any , _A : Optional[int] ) -> int: """simple docstring""" if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] snake_case_ : Optional[int] = self.sp_model.PieceToId(_A ) # Need to return unknown token if the SP model returned 0 return spm_id + self.fairseq_offset if spm_id else self.unk_token_id def UpperCAmelCase_ ( self : Optional[int] , _A : Union[str, Any] ) -> int: """simple docstring""" if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset ) def UpperCAmelCase_ ( self : Tuple , _A : Any ) -> Optional[Any]: """simple docstring""" snake_case_ : List[Any] = ''.join(_A ).replace(_A , ' ' ).strip() return out_string def UpperCAmelCase_ ( self : Tuple , _A : str , _A : Optional[str] = None ) -> Tuple[str]: """simple docstring""" if not os.path.isdir(_A ): logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" ) return snake_case_ : Union[str, Any] = os.path.join( _A , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_A ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , _A ) elif not os.path.isfile(self.vocab_file ): with open(_A , 'wb' ) as fi: snake_case_ : Dict = self.sp_model.serialized_model_proto() fi.write(_A ) return (out_vocab_file,) def UpperCAmelCase_ ( self : List[Any] , _A : List[int] , _A : Optional[List[int]] = None ) -> List[int]: """simple docstring""" if token_ids_a is None: return token_ids_a + [self.sep_token_id] snake_case_ : Optional[Any] = [self.sep_token_id] return token_ids_a + sep + token_ids_a + sep
327
def SCREAMING_SNAKE_CASE__ ( __a ): if not isinstance(__a , __a ): snake_case_ : int = f"""Input value of [number={number}] must be an integer""" raise TypeError(__a ) if number < 0: return False snake_case_ : Dict = number * number while number > 0: if number % 10 != number_square % 10: return False number //= 10 number_square //= 10 return True if __name__ == "__main__": import doctest doctest.testmod()
327
1
import warnings from ...utils import is_sklearn_available, requires_backends if is_sklearn_available(): from scipy.stats import pearsonr, spearmanr from sklearn.metrics import fa_score, matthews_corrcoef _SCREAMING_SNAKE_CASE = ( """This metric will be removed from the library soon, metrics should be handled with the 🤗 Evaluate """ """library. You can have a look at this example script for pointers: """ """https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py""" ) def SCREAMING_SNAKE_CASE__ ( __a , __a ): warnings.warn(__a , __a ) requires_backends(__a , 'sklearn' ) return (preds == labels).mean() def SCREAMING_SNAKE_CASE__ ( __a , __a ): warnings.warn(__a , __a ) requires_backends(__a , 'sklearn' ) snake_case_ : int = simple_accuracy(__a , __a ) snake_case_ : Tuple = fa_score(y_true=__a , y_pred=__a ) return { "acc": acc, "f1": fa, "acc_and_f1": (acc + fa) / 2, } def SCREAMING_SNAKE_CASE__ ( __a , __a ): warnings.warn(__a , __a ) requires_backends(__a , 'sklearn' ) snake_case_ : List[str] = pearsonr(__a , __a )[0] snake_case_ : Optional[Any] = spearmanr(__a , __a )[0] return { "pearson": pearson_corr, "spearmanr": spearman_corr, "corr": (pearson_corr + spearman_corr) / 2, } def SCREAMING_SNAKE_CASE__ ( __a , __a , __a ): warnings.warn(__a , __a ) requires_backends(__a , 'sklearn' ) assert len(__a ) == len(__a ), f"""Predictions and labels have mismatched lengths {len(__a )} and {len(__a )}""" if task_name == "cola": return {"mcc": matthews_corrcoef(__a , __a )} elif task_name == "sst-2": return {"acc": simple_accuracy(__a , __a )} elif task_name == "mrpc": return acc_and_fa(__a , __a ) elif task_name == "sts-b": return pearson_and_spearman(__a , __a ) elif task_name == "qqp": return acc_and_fa(__a , __a ) elif task_name == "mnli": return {"mnli/acc": simple_accuracy(__a , __a )} elif task_name == "mnli-mm": return {"mnli-mm/acc": simple_accuracy(__a , __a )} elif task_name == "qnli": return {"acc": simple_accuracy(__a , __a )} elif task_name == "rte": return {"acc": simple_accuracy(__a , __a )} elif task_name == "wnli": return {"acc": simple_accuracy(__a , __a )} elif task_name == "hans": return {"acc": simple_accuracy(__a , __a )} else: raise KeyError(__a ) def SCREAMING_SNAKE_CASE__ ( __a , __a , __a ): warnings.warn(__a , __a ) requires_backends(__a , 'sklearn' ) if len(__a ) != len(__a ): raise ValueError(f"""Predictions and labels have mismatched lengths {len(__a )} and {len(__a )}""" ) if task_name == "xnli": return {"acc": simple_accuracy(__a , __a )} else: raise KeyError(__a )
327
from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _SCREAMING_SNAKE_CASE = { """configuration_autoformer""": [ """AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """AutoformerConfig""", ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _SCREAMING_SNAKE_CASE = [ """AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""", """AutoformerForPrediction""", """AutoformerModel""", """AutoformerPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_autoformer import ( AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, AutoformerConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_autoformer import ( AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, AutoformerForPrediction, AutoformerModel, AutoformerPreTrainedModel, ) else: import sys _SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
327
1
from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _SCREAMING_SNAKE_CASE = { """configuration_autoformer""": [ """AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """AutoformerConfig""", ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _SCREAMING_SNAKE_CASE = [ """AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""", """AutoformerForPrediction""", """AutoformerModel""", """AutoformerPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_autoformer import ( AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, AutoformerConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_autoformer import ( AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, AutoformerForPrediction, AutoformerModel, AutoformerPreTrainedModel, ) else: import sys _SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
327
from typing import Dict from .base import GenericTensor, Pipeline class SCREAMING_SNAKE_CASE_ ( snake_case_ ): def UpperCAmelCase_ ( self : str , _A : Optional[Any]=None , _A : List[str]=None , _A : Optional[Any]=None , **_A : List[str] ) -> Any: """simple docstring""" if tokenize_kwargs is None: snake_case_ : Optional[Any] = {} if truncation is not None: if "truncation" in tokenize_kwargs: raise ValueError( 'truncation parameter defined twice (given as keyword argument as well as in tokenize_kwargs)' ) snake_case_ : int = truncation snake_case_ : Optional[int] = tokenize_kwargs snake_case_ : Dict = {} if return_tensors is not None: snake_case_ : Union[str, Any] = return_tensors return preprocess_params, {}, postprocess_params def UpperCAmelCase_ ( self : Optional[int] , _A : int , **_A : Any ) -> Dict[str, GenericTensor]: """simple docstring""" snake_case_ : Dict = self.framework snake_case_ : Any = self.tokenizer(_A , return_tensors=_A , **_A ) return model_inputs def UpperCAmelCase_ ( self : Optional[Any] , _A : List[str] ) -> int: """simple docstring""" snake_case_ : Tuple = self.model(**_A ) return model_outputs def UpperCAmelCase_ ( self : Union[str, Any] , _A : str , _A : str=False ) -> Any: """simple docstring""" if return_tensors: return model_outputs[0] if self.framework == "pt": return model_outputs[0].tolist() elif self.framework == "tf": return model_outputs[0].numpy().tolist() def __call__( self : List[str] , *_A : Union[str, Any] , **_A : Tuple ) -> List[str]: """simple docstring""" return super().__call__(*_A , **_A )
327
1
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import convert_to_rgb, normalize, rescale, resize, to_channel_dimension_format from ...image_utils import ( OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): import PIL _SCREAMING_SNAKE_CASE = logging.get_logger(__name__) class SCREAMING_SNAKE_CASE_ ( snake_case_ ): __magic_name__: Any = ["pixel_values"] def __init__( self : str , _A : bool = True , _A : Dict[str, int] = None , _A : PILImageResampling = PILImageResampling.BICUBIC , _A : bool = True , _A : Union[int, float] = 1 / 255 , _A : bool = True , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[float, List[float]]] = None , _A : bool = True , **_A : str , ) -> None: """simple docstring""" super().__init__(**_A ) snake_case_ : Any = size if size is not None else {'height': 384, 'width': 384} snake_case_ : Tuple = get_size_dict(_A , default_to_square=_A ) snake_case_ : Union[str, Any] = do_resize snake_case_ : List[Any] = size snake_case_ : List[Any] = resample snake_case_ : Any = do_rescale snake_case_ : List[Any] = rescale_factor snake_case_ : Optional[int] = do_normalize snake_case_ : str = image_mean if image_mean is not None else OPENAI_CLIP_MEAN snake_case_ : Dict = image_std if image_std is not None else OPENAI_CLIP_STD snake_case_ : Any = do_convert_rgb def UpperCAmelCase_ ( self : List[str] , _A : np.ndarray , _A : Dict[str, int] , _A : PILImageResampling = PILImageResampling.BICUBIC , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Dict , ) -> np.ndarray: """simple docstring""" snake_case_ : List[Any] = get_size_dict(_A , default_to_square=_A ) if "height" not in size or "width" not in size: raise ValueError(F"""The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}""" ) snake_case_ : List[Any] = (size['height'], size['width']) return resize(_A , size=_A , resample=_A , data_format=_A , **_A ) def UpperCAmelCase_ ( self : List[Any] , _A : np.ndarray , _A : Union[int, float] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Tuple , ) -> Optional[int]: """simple docstring""" return rescale(_A , scale=_A , data_format=_A , **_A ) def UpperCAmelCase_ ( self : List[Any] , _A : np.ndarray , _A : Union[float, List[float]] , _A : Union[float, List[float]] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Optional[int] , ) -> np.ndarray: """simple docstring""" return normalize(_A , mean=_A , std=_A , data_format=_A , **_A ) def UpperCAmelCase_ ( self : Optional[Any] , _A : ImageInput , _A : Optional[bool] = None , _A : Optional[Dict[str, int]] = None , _A : PILImageResampling = None , _A : Optional[bool] = None , _A : Optional[float] = None , _A : Optional[bool] = None , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[str, TensorType]] = None , _A : bool = None , _A : ChannelDimension = ChannelDimension.FIRST , **_A : Optional[int] , ) -> PIL.Image.Image: """simple docstring""" snake_case_ : str = do_resize if do_resize is not None else self.do_resize snake_case_ : Any = resample if resample is not None else self.resample snake_case_ : str = do_rescale if do_rescale is not None else self.do_rescale snake_case_ : Optional[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor snake_case_ : Optional[Any] = do_normalize if do_normalize is not None else self.do_normalize snake_case_ : int = image_mean if image_mean is not None else self.image_mean snake_case_ : Any = image_std if image_std is not None else self.image_std snake_case_ : List[Any] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb snake_case_ : Tuple = size if size is not None else self.size snake_case_ : Optional[Any] = get_size_dict(_A , default_to_square=_A ) snake_case_ : Any = make_list_of_images(_A ) if not valid_images(_A ): raise ValueError( 'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ' 'torch.Tensor, tf.Tensor or jax.ndarray.' ) if do_resize and size is None or resample is None: raise ValueError('Size and resample must be specified if do_resize is True.' ) if do_rescale and rescale_factor is None: raise ValueError('Rescale factor must be specified if do_rescale is True.' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('Image mean and std must be specified if do_normalize is True.' ) # PIL RGBA images are converted to RGB if do_convert_rgb: snake_case_ : Optional[Any] = [convert_to_rgb(_A ) for image in images] # All transformations expect numpy arrays. snake_case_ : Any = [to_numpy_array(_A ) for image in images] if do_resize: snake_case_ : str = [self.resize(image=_A , size=_A , resample=_A ) for image in images] if do_rescale: snake_case_ : str = [self.rescale(image=_A , scale=_A ) for image in images] if do_normalize: snake_case_ : int = [self.normalize(image=_A , mean=_A , std=_A ) for image in images] snake_case_ : Optional[Any] = [to_channel_dimension_format(_A , _A ) for image in images] snake_case_ : Tuple = BatchFeature(data={'pixel_values': images} , tensor_type=_A ) return encoded_outputs
327
from itertools import permutations def SCREAMING_SNAKE_CASE__ ( __a ): if num[3] % 2 != 0: return False if (num[2] + num[3] + num[4]) % 3 != 0: return False if num[5] % 5 != 0: return False snake_case_ : Any = [7, 11, 13, 17] for i, test in enumerate(__a ): if (num[i + 4] * 1_00 + num[i + 5] * 10 + num[i + 6]) % test != 0: return False return True def SCREAMING_SNAKE_CASE__ ( __a = 10 ): return sum( int(''.join(map(__a , __a ) ) ) for num in permutations(range(__a ) ) if is_substring_divisible(__a ) ) if __name__ == "__main__": print(F'''{solution() = }''')
327
1
import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES from ...utils import logging from ..auto import CONFIG_MAPPING _SCREAMING_SNAKE_CASE = logging.get_logger(__name__) _SCREAMING_SNAKE_CASE = { """salesforce/blip2-opt-2.7b""": """https://huggingface.co/salesforce/blip2-opt-2.7b/resolve/main/config.json""", } class SCREAMING_SNAKE_CASE_ ( snake_case_ ): __magic_name__: List[str] = "blip_2_vision_model" def __init__( self : Any , _A : Tuple=1408 , _A : Any=6144 , _A : str=39 , _A : int=16 , _A : Tuple=224 , _A : int=14 , _A : Optional[int]="gelu" , _A : List[str]=0.0_0_0_0_1 , _A : List[str]=0.0 , _A : int=1E-10 , _A : Tuple=True , **_A : Optional[Any] , ) -> Optional[int]: """simple docstring""" super().__init__(**_A ) snake_case_ : Optional[int] = hidden_size snake_case_ : Optional[int] = intermediate_size snake_case_ : Tuple = num_hidden_layers snake_case_ : Optional[Any] = num_attention_heads snake_case_ : Dict = patch_size snake_case_ : int = image_size snake_case_ : Union[str, Any] = initializer_range snake_case_ : List[Any] = attention_dropout snake_case_ : Tuple = layer_norm_eps snake_case_ : Tuple = hidden_act snake_case_ : List[str] = qkv_bias @classmethod def UpperCAmelCase_ ( cls : Union[str, Any] , _A : Union[str, os.PathLike] , **_A : Dict ) -> "PretrainedConfig": """simple docstring""" cls._set_token_in_kwargs(_A ) snake_case_ ,snake_case_ : Union[str, Any] = cls.get_config_dict(_A , **_A ) # get the vision config dict if we are loading from Blip2Config if config_dict.get('model_type' ) == "blip-2": snake_case_ : Any = config_dict['vision_config'] if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type: logger.warning( F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """ F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" ) return cls.from_dict(_A , **_A ) class SCREAMING_SNAKE_CASE_ ( snake_case_ ): __magic_name__: Any = "blip_2_qformer" def __init__( self : Optional[int] , _A : Any=30522 , _A : List[Any]=768 , _A : List[str]=12 , _A : Union[str, Any]=12 , _A : List[Any]=3072 , _A : str="gelu" , _A : Any=0.1 , _A : int=0.1 , _A : Tuple=512 , _A : Dict=0.0_2 , _A : Dict=1E-12 , _A : Optional[Any]=0 , _A : List[Any]="absolute" , _A : Dict=2 , _A : str=1408 , **_A : Optional[int] , ) -> Any: """simple docstring""" super().__init__(pad_token_id=_A , **_A ) snake_case_ : Any = vocab_size snake_case_ : Optional[int] = hidden_size snake_case_ : str = num_hidden_layers snake_case_ : List[Any] = num_attention_heads snake_case_ : str = hidden_act snake_case_ : str = intermediate_size snake_case_ : int = hidden_dropout_prob snake_case_ : Optional[Any] = attention_probs_dropout_prob snake_case_ : Optional[int] = max_position_embeddings snake_case_ : Tuple = initializer_range snake_case_ : List[str] = layer_norm_eps snake_case_ : Dict = position_embedding_type snake_case_ : Optional[int] = cross_attention_frequency snake_case_ : Optional[int] = encoder_hidden_size @classmethod def UpperCAmelCase_ ( cls : Optional[Any] , _A : Union[str, os.PathLike] , **_A : Tuple ) -> "PretrainedConfig": """simple docstring""" cls._set_token_in_kwargs(_A ) snake_case_ ,snake_case_ : List[Any] = cls.get_config_dict(_A , **_A ) # get the qformer config dict if we are loading from Blip2Config if config_dict.get('model_type' ) == "blip-2": snake_case_ : int = config_dict['qformer_config'] if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type: logger.warning( F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """ F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" ) return cls.from_dict(_A , **_A ) class SCREAMING_SNAKE_CASE_ ( snake_case_ ): __magic_name__: List[Any] = "blip-2" __magic_name__: Optional[int] = True def __init__( self : str , _A : Optional[Any]=None , _A : Union[str, Any]=None , _A : Any=None , _A : Optional[Any]=32 , **_A : int ) -> Union[str, Any]: """simple docstring""" super().__init__(**_A ) if vision_config is None: snake_case_ : Any = {} logger.info('vision_config is None. initializing the Blip2VisionConfig with default values.' ) if qformer_config is None: snake_case_ : List[str] = {} logger.info('qformer_config is None. Initializing the Blip2QFormerConfig with default values.' ) if text_config is None: snake_case_ : List[str] = {} logger.info('text_config is None. Initializing the text config with default values (`OPTConfig`).' ) snake_case_ : Union[str, Any] = BlipaVisionConfig(**_A ) snake_case_ : Tuple = BlipaQFormerConfig(**_A ) snake_case_ : Any = text_config['model_type'] if 'model_type' in text_config else 'opt' snake_case_ : Union[str, Any] = CONFIG_MAPPING[text_model_type](**_A ) snake_case_ : Any = self.text_config.tie_word_embeddings snake_case_ : List[Any] = self.text_config.is_encoder_decoder snake_case_ : Optional[Any] = num_query_tokens snake_case_ : List[str] = self.vision_config.hidden_size snake_case_ : Dict = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES snake_case_ : Optional[Any] = 1.0 snake_case_ : Optional[Any] = 0.0_2 @classmethod def UpperCAmelCase_ ( cls : Tuple , _A : BlipaVisionConfig , _A : BlipaQFormerConfig , _A : PretrainedConfig , **_A : List[Any] , ) -> Optional[int]: """simple docstring""" return cls( vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **_A , ) def UpperCAmelCase_ ( self : Union[str, Any] ) -> Union[str, Any]: """simple docstring""" snake_case_ : Optional[int] = copy.deepcopy(self.__dict__ ) snake_case_ : Dict = self.vision_config.to_dict() snake_case_ : Union[str, Any] = self.qformer_config.to_dict() snake_case_ : Dict = self.text_config.to_dict() snake_case_ : Optional[int] = self.__class__.model_type return output
327
from __future__ import annotations from collections import namedtuple def SCREAMING_SNAKE_CASE__ ( __a , __a , __a ): snake_case_ : Any = namedtuple('result' , 'name value' ) if (voltage, current, power).count(0 ) != 1: raise ValueError('Only one argument must be 0' ) elif power < 0: raise ValueError( 'Power cannot be negative in any electrical/electronics system' ) elif voltage == 0: return result('voltage' , power / current ) elif current == 0: return result('current' , power / voltage ) elif power == 0: return result('power' , float(round(abs(voltage * current ) , 2 ) ) ) else: raise ValueError('Exactly one argument must be 0' ) if __name__ == "__main__": import doctest doctest.testmod()
327
1
from maths.is_square_free import is_square_free from maths.prime_factors import prime_factors def SCREAMING_SNAKE_CASE__ ( __a ): snake_case_ : int = prime_factors(__a ) if is_square_free(__a ): return -1 if len(__a ) % 2 else 1 return 0 if __name__ == "__main__": import doctest doctest.testmod()
327
import re import string import numpy as np import datasets _SCREAMING_SNAKE_CASE = """ Returns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list. """ _SCREAMING_SNAKE_CASE = """ Args: predictions: List of predicted texts. references: List of reference texts. regexes_to_ignore: List, defaults to None. Regex expressions of characters to ignore when calculating the exact matches. Note: these regexes are removed from the input data before the changes based on the options below (e.g. ignore_case, ignore_punctuation, ignore_numbers) are applied. ignore_case: Boolean, defaults to False. If true, turns everything to lowercase so that capitalization differences are ignored. ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before comparing predictions and references. ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before comparing predictions and references. Returns: exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive. Examples: >>> exact_match = datasets.load_metric(\"exact_match\") >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"] >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"] >>> results = exact_match.compute(references=refs, predictions=preds) >>> print(round(results[\"exact_match\"], 1)) 25.0 >>> exact_match = datasets.load_metric(\"exact_match\") >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"] >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"] >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\"], ignore_case=True, ignore_punctuation=True) >>> print(round(results[\"exact_match\"], 1)) 50.0 >>> exact_match = datasets.load_metric(\"exact_match\") >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"] >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"] >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\", \"YELL\"], ignore_case=True, ignore_punctuation=True) >>> print(round(results[\"exact_match\"], 1)) 75.0 >>> exact_match = datasets.load_metric(\"exact_match\") >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"] >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"] >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\", \"YELL\"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True) >>> print(round(results[\"exact_match\"], 1)) 100.0 >>> exact_match = datasets.load_metric(\"exact_match\") >>> refs = [\"The cat sat on the mat.\", \"Theaters are great.\", \"It's like comparing oranges and apples.\"] >>> preds = [\"The cat sat on the mat?\", \"Theaters are great.\", \"It's like comparing apples and oranges.\"] >>> results = exact_match.compute(references=refs, predictions=preds) >>> print(round(results[\"exact_match\"], 1)) 33.3 """ _SCREAMING_SNAKE_CASE = """ """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class SCREAMING_SNAKE_CASE_ ( datasets.Metric ): def UpperCAmelCase_ ( self : Union[str, Any] ) -> Optional[int]: """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { 'predictions': datasets.Value('string' , id='sequence' ), 'references': datasets.Value('string' , id='sequence' ), } ) , reference_urls=[] , ) def UpperCAmelCase_ ( self : int , _A : Tuple , _A : Tuple , _A : str=None , _A : Dict=False , _A : Tuple=False , _A : str=False , ) -> Tuple: """simple docstring""" if regexes_to_ignore is not None: for s in regexes_to_ignore: snake_case_ : List[Any] = np.array([re.sub(_A , '' , _A ) for x in predictions] ) snake_case_ : Optional[Any] = np.array([re.sub(_A , '' , _A ) for x in references] ) else: snake_case_ : Dict = np.asarray(_A ) snake_case_ : Tuple = np.asarray(_A ) if ignore_case: snake_case_ : List[str] = np.char.lower(_A ) snake_case_ : Any = np.char.lower(_A ) if ignore_punctuation: snake_case_ : int = string.punctuation.maketrans('' , '' , string.punctuation ) snake_case_ : Tuple = np.char.translate(_A , table=_A ) snake_case_ : str = np.char.translate(_A , table=_A ) if ignore_numbers: snake_case_ : Optional[int] = string.digits.maketrans('' , '' , string.digits ) snake_case_ : str = np.char.translate(_A , table=_A ) snake_case_ : Union[str, Any] = np.char.translate(_A , table=_A ) snake_case_ : int = predictions == references return {"exact_match": np.mean(_A ) * 100}
327
1
from typing import List, Optional, Union import numpy as np import tensorflow as tf from .utils import logging _SCREAMING_SNAKE_CASE = logging.get_logger(__name__) def SCREAMING_SNAKE_CASE__ ( __a ): if isinstance(__a , np.ndarray ): return list(tensor.shape ) snake_case_ : List[str] = tf.shape(__a ) if tensor.shape == tf.TensorShape(__a ): return dynamic snake_case_ : List[Any] = tensor.shape.as_list() return [dynamic[i] if s is None else s for i, s in enumerate(__a )] def SCREAMING_SNAKE_CASE__ ( __a , __a = None , __a = None ): return tf.nn.softmax(logits=logits + 1E-9 , axis=__a , name=__a ) def SCREAMING_SNAKE_CASE__ ( __a , __a , __a , __a=1E-5 , __a=-1 ): # This is a very simplified functional layernorm, designed to duplicate # the functionality of PyTorch nn.functional.layer_norm when this is needed to port # models in Transformers. if weight.shape.rank != 1 or bias.shape.rank != 1 or not isinstance(__a , __a ): raise NotImplementedError('Only 1D weight and bias tensors are supported for now, with only a single axis.' ) # Get mean and variance on the axis to be normalized snake_case_ ,snake_case_ : List[Any] = tf.nn.moments(__a , axes=[axis] , keepdims=__a ) if axis != -1: # Reshape scale and weight to have the same rank as inputs, but with 1 dimensions # on every dimension except axis snake_case_ : Dict = [1] * inputs.shape.rank snake_case_ : List[str] = shape_list(__a )[axis] snake_case_ : Optional[int] = tf.reshape(__a , __a ) snake_case_ : Union[str, Any] = tf.reshape(__a , __a ) # Compute layer normalization using the batch_normalization # function. snake_case_ : Tuple = tf.nn.batch_normalization( __a , __a , __a , offset=__a , scale=__a , variance_epsilon=__a , ) return outputs def SCREAMING_SNAKE_CASE__ ( __a , __a=0 , __a=-1 ): # Replicates the behavior of torch.flatten in TF # If end_dim or start_dim is negative, count them from the end if end_dim < 0: end_dim += input.shape.rank if start_dim < 0: start_dim += input.shape.rank if start_dim == end_dim: return input snake_case_ : List[Any] = tf.shape(__a ) snake_case_ : Optional[Any] = tf.math.reduce_prod(in_shape[start_dim : end_dim + 1] ) snake_case_ : str = tf.concat([in_shape[:start_dim], [flattened_dim], in_shape[end_dim + 1 :]] , axis=0 ) return tf.reshape(__a , __a ) def SCREAMING_SNAKE_CASE__ ( __a ): if not isinstance(__a , tf.Tensor ): snake_case_ : Dict = tf.convert_to_tensor(__a ) # Catches stray NumPy inputs if encoder_attention_mask.shape.rank == 3: snake_case_ : Dict = encoder_attention_mask[:, None, :, :] if encoder_attention_mask.shape.rank == 2: snake_case_ : Dict = encoder_attention_mask[:, None, None, :] # T5 has a mask that can compare sequence ids, we can simulate this here with this transposition # Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow # /transformer/transformer_layers.py#L270 # encoder_extended_attention_mask = (encoder_extended_attention_mask == # encoder_extended_attention_mask.transpose(-1, -2)) snake_case_ : Union[str, Any] = ( tf.cast(1 , encoder_attention_mask.dtype ) - encoder_extended_attention_mask ) * encoder_extended_attention_mask.dtype.min return encoder_extended_attention_mask def SCREAMING_SNAKE_CASE__ ( __a , __a , __a = "input_ids" ): tf.debugging.assert_less( __a , tf.cast(__a , dtype=tensor.dtype ) , message=( f"""The maximum value of {tensor_name} ({tf.math.reduce_max(__a )}) must be smaller than the embedding """ f"""layer's input dimension ({embed_dim}). The likely cause is some problem at tokenization time.""" ) , ) def SCREAMING_SNAKE_CASE__ ( __a , __a , __a ): snake_case_ : Optional[Any] = 6_45_12 # Check that no item in `data` is larger than `HDF5_OBJECT_HEADER_LIMIT` # because in that case even chunking the array would not make the saving # possible. snake_case_ : Dict = [x for x in data if len(__a ) > HDF5_OBJECT_HEADER_LIMIT] # Expecting this to never be true. if bad_attributes: raise RuntimeError( 'The following attributes cannot be saved to HDF5 file because ' f"""they are larger than {HDF5_OBJECT_HEADER_LIMIT} """ f"""bytes: {bad_attributes}""" ) snake_case_ : Optional[Any] = np.asarray(__a ) snake_case_ : List[str] = 1 snake_case_ : Any = np.array_split(__a , __a ) # This will never loop forever thanks to the test above. while any(x.nbytes > HDF5_OBJECT_HEADER_LIMIT for x in chunked_data ): num_chunks += 1 snake_case_ : int = np.array_split(__a , __a ) if num_chunks > 1: for chunk_id, chunk_data in enumerate(__a ): snake_case_ : List[Any] = chunk_data else: snake_case_ : List[str] = data def SCREAMING_SNAKE_CASE__ ( __a , __a ): if name in group.attrs: snake_case_ : Dict = [n.decode('utf8' ) if hasattr(__a , 'decode' ) else n for n in group.attrs[name]] else: snake_case_ : Optional[Any] = [] snake_case_ : Optional[int] = 0 while "%s%d" % (name, chunk_id) in group.attrs: data.extend( [n.decode('utf8' ) if hasattr(__a , 'decode' ) else n for n in group.attrs['%s%d' % (name, chunk_id)]] ) chunk_id += 1 return data def SCREAMING_SNAKE_CASE__ ( __a ): def _expand_single_ad_tensor(__a ): if isinstance(__a , tf.Tensor ) and t.shape.rank == 1: return tf.expand_dims(__a , axis=-1 ) return t return tf.nest.map_structure(_expand_single_ad_tensor , __a )
327
from __future__ import annotations import copy import inspect import json import math import os import tempfile import unittest from importlib import import_module import numpy as np from transformers import ViTMAEConfig from transformers.file_utils import cached_property, is_tf_available, is_vision_available from transformers.testing_utils import require_tf, require_vision, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFViTMAEForPreTraining, TFViTMAEModel if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class SCREAMING_SNAKE_CASE_ : def __init__( self : List[Any] , _A : Optional[Any] , _A : Dict=13 , _A : Union[str, Any]=30 , _A : Tuple=2 , _A : Union[str, Any]=3 , _A : Optional[int]=True , _A : Optional[Any]=True , _A : str=32 , _A : int=2 , _A : List[str]=4 , _A : List[str]=37 , _A : Tuple="gelu" , _A : Dict=0.1 , _A : Optional[Any]=0.1 , _A : Optional[int]=10 , _A : Optional[int]=0.0_2 , _A : Optional[Any]=3 , _A : str=0.6 , _A : Union[str, Any]=None , ) -> Any: """simple docstring""" snake_case_ : Optional[int] = parent snake_case_ : Tuple = batch_size snake_case_ : List[Any] = image_size snake_case_ : List[str] = patch_size snake_case_ : List[str] = num_channels snake_case_ : Optional[Any] = is_training snake_case_ : Any = use_labels snake_case_ : Tuple = hidden_size snake_case_ : Union[str, Any] = num_hidden_layers snake_case_ : List[Any] = num_attention_heads snake_case_ : Optional[Any] = intermediate_size snake_case_ : List[Any] = hidden_act snake_case_ : Union[str, Any] = hidden_dropout_prob snake_case_ : Any = attention_probs_dropout_prob snake_case_ : Tuple = type_sequence_label_size snake_case_ : List[str] = initializer_range snake_case_ : Optional[Any] = mask_ratio snake_case_ : Any = scope # in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above # (we add 1 for the [CLS] token) snake_case_ : Optional[int] = (image_size // patch_size) ** 2 snake_case_ : str = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) ) def UpperCAmelCase_ ( self : List[str] ) -> Optional[Any]: """simple docstring""" snake_case_ : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) snake_case_ : Union[str, Any] = None if self.use_labels: snake_case_ : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) snake_case_ : Union[str, Any] = self.get_config() return config, pixel_values, labels def UpperCAmelCase_ ( self : int ) -> Optional[Any]: """simple docstring""" return ViTMAEConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , decoder_hidden_size=self.hidden_size , decoder_num_hidden_layers=self.num_hidden_layers , decoder_num_attention_heads=self.num_attention_heads , decoder_intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_A , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , ) def UpperCAmelCase_ ( self : List[Any] , _A : int , _A : Dict , _A : str ) -> Dict: """simple docstring""" snake_case_ : Union[str, Any] = TFViTMAEModel(config=_A ) snake_case_ : str = model(_A , training=_A ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCAmelCase_ ( self : Dict , _A : Dict , _A : Any , _A : List[Any] ) -> int: """simple docstring""" snake_case_ : Any = TFViTMAEForPreTraining(_A ) snake_case_ : Optional[Any] = model(_A , training=_A ) # expected sequence length = num_patches snake_case_ : List[str] = (self.image_size // self.patch_size) ** 2 snake_case_ : Optional[Any] = self.patch_size**2 * self.num_channels self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) ) # test greyscale images snake_case_ : str = 1 snake_case_ : Dict = TFViTMAEForPreTraining(_A ) snake_case_ : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) snake_case_ : List[str] = model(_A , training=_A ) snake_case_ : Optional[Any] = self.patch_size**2 self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) ) def UpperCAmelCase_ ( self : Union[str, Any] ) -> Tuple: """simple docstring""" snake_case_ : List[Any] = self.prepare_config_and_inputs() ((snake_case_) ,(snake_case_) ,(snake_case_)) : Any = config_and_inputs snake_case_ : Optional[Any] = {'pixel_values': pixel_values} return config, inputs_dict @require_tf class SCREAMING_SNAKE_CASE_ ( snake_case_ , snake_case_ , unittest.TestCase ): __magic_name__: List[str] = (TFViTMAEModel, TFViTMAEForPreTraining) if is_tf_available() else () __magic_name__: str = {"feature-extraction": TFViTMAEModel} if is_tf_available() else {} __magic_name__: Dict = False __magic_name__: Dict = False __magic_name__: List[Any] = False __magic_name__: Dict = False def UpperCAmelCase_ ( self : Any ) -> List[Any]: """simple docstring""" snake_case_ : List[Any] = TFViTMAEModelTester(self ) snake_case_ : Tuple = ConfigTester(self , config_class=_A , has_text_modality=_A , hidden_size=37 ) def UpperCAmelCase_ ( self : int ) -> Union[str, Any]: """simple docstring""" self.config_tester.run_common_tests() @unittest.skip(reason='ViTMAE does not use inputs_embeds' ) def UpperCAmelCase_ ( self : Tuple ) -> Union[str, Any]: """simple docstring""" pass def UpperCAmelCase_ ( self : List[str] ) -> Union[str, Any]: """simple docstring""" snake_case_ ,snake_case_ : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: snake_case_ : List[Any] = model_class(_A ) self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) ) snake_case_ : Optional[int] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(_A , tf.keras.layers.Layer ) ) def UpperCAmelCase_ ( self : List[str] ) -> Dict: """simple docstring""" snake_case_ ,snake_case_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: snake_case_ : List[str] = model_class(_A ) snake_case_ : Any = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic snake_case_ : Dict = [*signature.parameters.keys()] snake_case_ : Dict = ['pixel_values'] self.assertListEqual(arg_names[:1] , _A ) def UpperCAmelCase_ ( self : Dict ) -> List[str]: """simple docstring""" snake_case_ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_A ) def UpperCAmelCase_ ( self : List[Any] ) -> List[str]: """simple docstring""" snake_case_ : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*_A ) def UpperCAmelCase_ ( self : Tuple ) -> Dict: """simple docstring""" np.random.seed(2 ) snake_case_ ,snake_case_ : int = self.model_tester.prepare_config_and_inputs_for_common() snake_case_ : Optional[int] = int((config.image_size // config.patch_size) ** 2 ) snake_case_ : Optional[Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) for model_class in self.all_model_classes: snake_case_ : Optional[Any] = model_class(_A ) snake_case_ : Union[str, Any] = self._prepare_for_class(_A , _A ) snake_case_ : List[str] = model(_A , noise=_A ) snake_case_ : Tuple = copy.deepcopy(self._prepare_for_class(_A , _A ) ) snake_case_ : str = model(**_A , noise=_A ) snake_case_ : Union[str, Any] = outputs_dict[0].numpy() snake_case_ : Optional[Any] = outputs_keywords[0].numpy() self.assertLess(np.sum(np.abs(output_dict - output_keywords ) ) , 1E-6 ) def UpperCAmelCase_ ( self : List[Any] ) -> List[Any]: """simple docstring""" np.random.seed(2 ) snake_case_ ,snake_case_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() snake_case_ : Tuple = int((config.image_size // config.patch_size) ** 2 ) snake_case_ : Optional[int] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) def prepare_numpy_arrays(_A : int ): snake_case_ : Any = {} for k, v in inputs_dict.items(): if tf.is_tensor(_A ): snake_case_ : str = v.numpy() else: snake_case_ : Optional[Any] = np.array(_A ) return inputs_np_dict for model_class in self.all_model_classes: snake_case_ : int = model_class(_A ) snake_case_ : List[Any] = self._prepare_for_class(_A , _A ) snake_case_ : Any = prepare_numpy_arrays(_A ) snake_case_ : List[Any] = model(_A , noise=_A ) snake_case_ : List[Any] = model(**_A , noise=_A ) self.assert_outputs_same(_A , _A ) def UpperCAmelCase_ ( self : Tuple , _A : Union[str, Any] , _A : Union[str, Any] , _A : List[Any] ) -> List[str]: """simple docstring""" np.random.seed(2 ) snake_case_ : Optional[int] = int((tf_model.config.image_size // tf_model.config.patch_size) ** 2 ) snake_case_ : Optional[int] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) snake_case_ : Optional[int] = tf.constant(_A ) # Add `noise` argument. # PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument snake_case_ : Optional[Any] = tf_noise super().check_pt_tf_models(_A , _A , _A ) def UpperCAmelCase_ ( self : Optional[Any] ) -> Dict: """simple docstring""" np.random.seed(2 ) snake_case_ ,snake_case_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common() snake_case_ : int = { module_member for model_class in self.all_model_classes for module in (import_module(model_class.__module__ ),) for module_member_name in dir(_A ) if module_member_name.endswith('MainLayer' ) # This condition is required, since `modeling_tf_clip.py` has 3 classes whose names end with `MainLayer`. and module_member_name[: -len('MainLayer' )] == model_class.__name__[: -len('Model' )] for module_member in (getattr(_A , _A ),) if isinstance(_A , _A ) and tf.keras.layers.Layer in module_member.__bases__ and getattr(_A , '_keras_serializable' , _A ) } snake_case_ : List[Any] = int((config.image_size // config.patch_size) ** 2 ) snake_case_ : List[Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) snake_case_ : Optional[int] = tf.convert_to_tensor(_A ) inputs_dict.update({'noise': noise} ) for main_layer_class in tf_main_layer_classes: snake_case_ : Optional[Any] = main_layer_class(_A ) snake_case_ : List[str] = { name: tf.keras.Input(tensor.shape[1:] , dtype=tensor.dtype ) for name, tensor in inputs_dict.items() } snake_case_ : Union[str, Any] = tf.keras.Model(_A , outputs=main_layer(_A ) ) snake_case_ : int = model(_A ) with tempfile.TemporaryDirectory() as tmpdirname: snake_case_ : List[Any] = os.path.join(_A , 'keras_model.h5' ) model.save(_A ) snake_case_ : str = tf.keras.models.load_model( _A , custom_objects={main_layer_class.__name__: main_layer_class} ) assert isinstance(_A , tf.keras.Model ) snake_case_ : List[str] = model(_A ) self.assert_outputs_same(_A , _A ) @slow def UpperCAmelCase_ ( self : Union[str, Any] ) -> Union[str, Any]: """simple docstring""" np.random.seed(2 ) snake_case_ ,snake_case_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() snake_case_ : int = int((config.image_size // config.patch_size) ** 2 ) snake_case_ : int = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) for model_class in self.all_model_classes: snake_case_ : Optional[Any] = model_class(_A ) snake_case_ : Optional[Any] = self._prepare_for_class(_A , _A ) snake_case_ : int = model(_A , noise=_A ) if model_class.__name__ == "TFViTMAEModel": snake_case_ : Any = outputs.last_hidden_state.numpy() snake_case_ : Optional[int] = 0 else: snake_case_ : str = outputs.logits.numpy() snake_case_ : Optional[Any] = 0 with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(_A , saved_model=_A ) snake_case_ : Any = model_class.from_pretrained(_A ) snake_case_ : Any = model(_A , noise=_A ) if model_class.__name__ == "TFViTMAEModel": snake_case_ : Dict = after_outputs['last_hidden_state'].numpy() snake_case_ : Dict = 0 else: snake_case_ : Any = after_outputs['logits'].numpy() snake_case_ : Optional[Any] = 0 snake_case_ : Any = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(_A , 1E-5 ) def UpperCAmelCase_ ( self : Any ) -> str: """simple docstring""" np.random.seed(2 ) snake_case_ ,snake_case_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() snake_case_ : Optional[int] = int((config.image_size // config.patch_size) ** 2 ) snake_case_ : Union[str, Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) for model_class in self.all_model_classes: snake_case_ : str = model_class(_A ) snake_case_ : int = self._prepare_for_class(_A , _A ) snake_case_ : str = model(_A , noise=_A ) snake_case_ : Dict = model.get_config() # make sure that returned config is jsonifiable, which is required by keras json.dumps(_A ) snake_case_ : Any = model_class.from_config(model.get_config() ) # make sure it also accepts a normal config snake_case_ : str = model_class.from_config(model.config ) snake_case_ : Union[str, Any] = new_model(_A ) # Build model new_model.set_weights(model.get_weights() ) snake_case_ : List[str] = new_model(_A , noise=_A ) self.assert_outputs_same(_A , _A ) @unittest.skip( reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.' ) def UpperCAmelCase_ ( self : List[Any] ) -> Optional[int]: """simple docstring""" pass @unittest.skip(reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load' ) def UpperCAmelCase_ ( self : Optional[int] ) -> Tuple: """simple docstring""" pass @slow def UpperCAmelCase_ ( self : Tuple ) -> Tuple: """simple docstring""" snake_case_ : Optional[Any] = TFViTMAEModel.from_pretrained('google/vit-base-patch16-224' ) self.assertIsNotNone(_A ) def SCREAMING_SNAKE_CASE__ ( ): snake_case_ : Optional[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_tf @require_vision class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ): @cached_property def UpperCAmelCase_ ( self : str ) -> Dict: """simple docstring""" return ViTImageProcessor.from_pretrained('facebook/vit-mae-base' ) if is_vision_available() else None @slow def UpperCAmelCase_ ( self : str ) -> Dict: """simple docstring""" np.random.seed(2 ) snake_case_ : List[str] = TFViTMAEForPreTraining.from_pretrained('facebook/vit-mae-base' ) snake_case_ : List[Any] = self.default_image_processor snake_case_ : Dict = prepare_img() snake_case_ : Optional[Any] = image_processor(images=_A , return_tensors='tf' ) # prepare a noise vector that will be also used for testing the TF model # (this way we can ensure that the PT and TF models operate on the same inputs) snake_case_ : int = ViTMAEConfig() snake_case_ : List[Any] = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 ) snake_case_ : List[Any] = np.random.uniform(size=(1, num_patches) ) # forward pass snake_case_ : Optional[Any] = model(**_A , noise=_A ) # verify the logits snake_case_ : Optional[int] = tf.convert_to_tensor([1, 196, 768] ) self.assertEqual(outputs.logits.shape , _A ) snake_case_ : Any = tf.convert_to_tensor( [[-0.0_5_4_8, -1.7_0_2_3, -0.9_3_2_5], [0.3_7_2_1, -0.5_6_7_0, -0.2_2_3_3], [0.8_2_3_5, -1.3_8_7_8, -0.3_5_2_4]] ) tf.debugging.assert_near(outputs.logits[0, :3, :3] , _A , atol=1E-4 )
327
1
import math import sys def SCREAMING_SNAKE_CASE__ ( __a ): snake_case_ : str = '' try: with open(__a , 'rb' ) as binary_file: snake_case_ : List[str] = binary_file.read() for dat in data: snake_case_ : Optional[int] = f"""{dat:08b}""" result += curr_byte return result except OSError: print('File not accessible' ) sys.exit() def SCREAMING_SNAKE_CASE__ ( __a ): snake_case_ : Tuple = {'0': '0', '1': '1'} snake_case_ ,snake_case_ : int = '', '' snake_case_ : List[Any] = len(__a ) for i in range(len(__a ) ): curr_string += data_bits[i] if curr_string not in lexicon: continue snake_case_ : Tuple = lexicon[curr_string] result += last_match_id snake_case_ : Optional[Any] = last_match_id + '0' if math.loga(__a ).is_integer(): snake_case_ : List[Any] = {} for curr_key in list(__a ): snake_case_ : Tuple = lexicon.pop(__a ) snake_case_ : Optional[Any] = new_lex snake_case_ : Optional[int] = last_match_id + '1' index += 1 snake_case_ : List[str] = '' return result def SCREAMING_SNAKE_CASE__ ( __a , __a ): snake_case_ : Tuple = 8 try: with open(__a , 'wb' ) as opened_file: snake_case_ : Optional[int] = [ to_write[i : i + byte_length] for i in range(0 , len(__a ) , __a ) ] if len(result_byte_array[-1] ) % byte_length == 0: result_byte_array.append('10000000' ) else: result_byte_array[-1] += "1" + "0" * ( byte_length - len(result_byte_array[-1] ) - 1 ) for elem in result_byte_array[:-1]: opened_file.write(int(__a , 2 ).to_bytes(1 , byteorder='big' ) ) except OSError: print('File not accessible' ) sys.exit() def SCREAMING_SNAKE_CASE__ ( __a ): snake_case_ : Tuple = 0 for letter in data_bits: if letter == "1": break counter += 1 snake_case_ : List[str] = data_bits[counter:] snake_case_ : Tuple = data_bits[counter + 1 :] return data_bits def SCREAMING_SNAKE_CASE__ ( __a , __a ): snake_case_ : Union[str, Any] = read_file_binary(__a ) snake_case_ : Tuple = remove_prefix(__a ) snake_case_ : Tuple = decompress_data(__a ) write_file_binary(__a , __a ) if __name__ == "__main__": compress(sys.argv[1], sys.argv[2])
327
from __future__ import annotations def SCREAMING_SNAKE_CASE__ ( __a , __a ): snake_case_ : list[list[int]] = [] snake_case_ : list[int] = [] snake_case_ : List[Any] = 0 snake_case_ : Union[str, Any] = sum(__a ) create_state_space_tree(__a , __a , __a , __a , __a , __a ) return result def SCREAMING_SNAKE_CASE__ ( __a , __a , __a , __a , __a , __a , ): if sum(__a ) > max_sum or (remaining_nums_sum + sum(__a )) < max_sum: return if sum(__a ) == max_sum: result.append(__a ) return for index in range(__a , len(__a ) ): create_state_space_tree( __a , __a , index + 1 , [*path, nums[index]] , __a , remaining_nums_sum - nums[index] , ) _SCREAMING_SNAKE_CASE = [3, 34, 4, 12, 5, 2] _SCREAMING_SNAKE_CASE = 9 _SCREAMING_SNAKE_CASE = generate_sum_of_subsets_soln(nums, max_sum) print(*result)
327
1
def SCREAMING_SNAKE_CASE__ ( __a ): snake_case_ : Any = len(__a ) snake_case_ : Any = len(matrix[0] ) snake_case_ : str = min(__a , __a ) for row in range(__a ): # Check if diagonal element is not zero if matrix[row][row] != 0: # Eliminate all the elements below the diagonal for col in range(row + 1 , __a ): snake_case_ : List[str] = matrix[col][row] / matrix[row][row] for i in range(__a , __a ): matrix[col][i] -= multiplier * matrix[row][i] else: # Find a non-zero diagonal element to swap rows snake_case_ : Union[str, Any] = True for i in range(row + 1 , __a ): if matrix[i][row] != 0: snake_case_ ,snake_case_ : Tuple = matrix[i], matrix[row] snake_case_ : Any = False break if reduce: rank -= 1 for i in range(__a ): snake_case_ : List[str] = matrix[i][rank] # Reduce the row pointer by one to stay on the same row row -= 1 return rank if __name__ == "__main__": import doctest doctest.testmod()
327
def SCREAMING_SNAKE_CASE__ ( __a , __a ): if density <= 0: raise ValueError('Impossible fluid density' ) if bulk_modulus <= 0: raise ValueError('Impossible bulk modulus' ) return (bulk_modulus / density) ** 0.5 if __name__ == "__main__": import doctest doctest.testmod()
327
1
import datetime import platform import subprocess from typing import Optional, Tuple, Union import numpy as np def SCREAMING_SNAKE_CASE__ ( __a , __a ): snake_case_ : Dict = f"""{sampling_rate}""" snake_case_ : Any = '1' snake_case_ : Tuple = 'f32le' snake_case_ : Optional[Any] = [ 'ffmpeg', '-i', 'pipe:0', '-ac', ac, '-ar', ar, '-f', format_for_conversion, '-hide_banner', '-loglevel', 'quiet', 'pipe:1', ] try: with subprocess.Popen(__a , stdin=subprocess.PIPE , stdout=subprocess.PIPE ) as ffmpeg_process: snake_case_ : str = ffmpeg_process.communicate(__a ) except FileNotFoundError as error: raise ValueError('ffmpeg was not found but is required to load audio files from filename' ) from error snake_case_ : Any = output_stream[0] snake_case_ : Optional[Any] = np.frombuffer(__a , np.floataa ) if audio.shape[0] == 0: raise ValueError('Malformed soundfile' ) return audio def SCREAMING_SNAKE_CASE__ ( __a , __a , __a = "f32le" , ): snake_case_ : List[Any] = f"""{sampling_rate}""" snake_case_ : Tuple = '1' if format_for_conversion == "s16le": snake_case_ : str = 2 elif format_for_conversion == "f32le": snake_case_ : List[Any] = 4 else: raise ValueError(f"""Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`""" ) snake_case_ : Optional[int] = platform.system() if system == "Linux": snake_case_ : Tuple = 'alsa' snake_case_ : List[str] = 'default' elif system == "Darwin": snake_case_ : Optional[Any] = 'avfoundation' snake_case_ : Any = ':0' elif system == "Windows": snake_case_ : List[Any] = 'dshow' snake_case_ : List[str] = 'default' snake_case_ : int = [ 'ffmpeg', '-f', format_, '-i', input_, '-ac', ac, '-ar', ar, '-f', format_for_conversion, '-fflags', 'nobuffer', '-hide_banner', '-loglevel', 'quiet', 'pipe:1', ] snake_case_ : str = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample snake_case_ : Any = _ffmpeg_stream(__a , __a ) for item in iterator: yield item def SCREAMING_SNAKE_CASE__ ( __a , __a , __a = None , __a = None , __a = "f32le" , ): if stream_chunk_s is not None: snake_case_ : Tuple = stream_chunk_s else: snake_case_ : Dict = chunk_length_s snake_case_ : Optional[int] = ffmpeg_microphone(__a , __a , format_for_conversion=__a ) if format_for_conversion == "s16le": snake_case_ : Dict = np.intaa snake_case_ : Optional[Any] = 2 elif format_for_conversion == "f32le": snake_case_ : int = np.floataa snake_case_ : int = 4 else: raise ValueError(f"""Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`""" ) if stride_length_s is None: snake_case_ : Optional[Any] = chunk_length_s / 6 snake_case_ : Dict = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample if isinstance(__a , (int, float) ): snake_case_ : Tuple = [stride_length_s, stride_length_s] snake_case_ : List[Any] = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample snake_case_ : Tuple = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample snake_case_ : Union[str, Any] = datetime.datetime.now() snake_case_ : Union[str, Any] = datetime.timedelta(seconds=__a ) for item in chunk_bytes_iter(__a , __a , stride=(stride_left, stride_right) , stream=__a ): # Put everything back in numpy scale snake_case_ : str = np.frombuffer(item['raw'] , dtype=__a ) snake_case_ : Union[str, Any] = ( item['stride'][0] // size_of_sample, item['stride'][1] // size_of_sample, ) snake_case_ : List[str] = sampling_rate audio_time += delta if datetime.datetime.now() > audio_time + 10 * delta: # We're late !! SKIP continue yield item def SCREAMING_SNAKE_CASE__ ( __a , __a , __a , __a = False ): snake_case_ : Optional[int] = b'' snake_case_ ,snake_case_ : Optional[int] = stride if stride_left + stride_right >= chunk_len: raise ValueError( f"""Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}""" ) snake_case_ : List[Any] = 0 for raw in iterator: acc += raw if stream and len(__a ) < chunk_len: snake_case_ : Optional[int] = (_stride_left, 0) yield {"raw": acc[:chunk_len], "stride": stride, "partial": True} else: while len(__a ) >= chunk_len: # We are flushing the accumulator snake_case_ : Any = (_stride_left, stride_right) snake_case_ : List[Any] = {'raw': acc[:chunk_len], 'stride': stride} if stream: snake_case_ : List[Any] = False yield item snake_case_ : Dict = stride_left snake_case_ : List[str] = acc[chunk_len - stride_left - stride_right :] # Last chunk if len(__a ) > stride_left: snake_case_ : str = {'raw': acc, 'stride': (_stride_left, 0)} if stream: snake_case_ : Optional[int] = False yield item def SCREAMING_SNAKE_CASE__ ( __a , __a ): snake_case_ : List[Any] = 2**24 # 16Mo try: with subprocess.Popen(__a , stdout=subprocess.PIPE , bufsize=__a ) as ffmpeg_process: while True: snake_case_ : Tuple = ffmpeg_process.stdout.read(__a ) if raw == b"": break yield raw except FileNotFoundError as error: raise ValueError('ffmpeg was not found but is required to stream audio files from filename' ) from error
327
from math import pi def SCREAMING_SNAKE_CASE__ ( __a , __a ): return 2 * pi * radius * (angle / 3_60) if __name__ == "__main__": print(arc_length(90, 10))
327
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available, is_vision_available, ) _SCREAMING_SNAKE_CASE = {"""configuration_beit""": ["""BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BeitConfig""", """BeitOnnxConfig"""]} try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _SCREAMING_SNAKE_CASE = ["""BeitFeatureExtractor"""] _SCREAMING_SNAKE_CASE = ["""BeitImageProcessor"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _SCREAMING_SNAKE_CASE = [ """BEIT_PRETRAINED_MODEL_ARCHIVE_LIST""", """BeitForImageClassification""", """BeitForMaskedImageModeling""", """BeitForSemanticSegmentation""", """BeitModel""", """BeitPreTrainedModel""", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _SCREAMING_SNAKE_CASE = [ """FlaxBeitForImageClassification""", """FlaxBeitForMaskedImageModeling""", """FlaxBeitModel""", """FlaxBeitPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_beit import BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, BeitConfig, BeitOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_beit import BeitFeatureExtractor from .image_processing_beit import BeitImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_beit import ( BEIT_PRETRAINED_MODEL_ARCHIVE_LIST, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation, BeitModel, BeitPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_beit import ( FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling, FlaxBeitModel, FlaxBeitPreTrainedModel, ) else: import sys _SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
327
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import center_crop, normalize, rescale, resize, to_channel_dimension_format from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): import PIL _SCREAMING_SNAKE_CASE = logging.get_logger(__name__) class SCREAMING_SNAKE_CASE_ ( snake_case_ ): __magic_name__: Optional[Any] = ["pixel_values"] def __init__( self : str , _A : bool = True , _A : Dict[str, int] = None , _A : PILImageResampling = PIL.Image.BICUBIC , _A : bool = True , _A : Dict[str, int] = None , _A : Union[int, float] = 1 / 255 , _A : bool = True , _A : bool = True , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[float, List[float]]] = None , **_A : str , ) -> None: """simple docstring""" super().__init__(**_A ) snake_case_ : Dict = size if size is not None else {'height': 256, 'width': 256} snake_case_ : Tuple = get_size_dict(_A ) snake_case_ : str = crop_size if crop_size is not None else {'height': 224, 'width': 224} snake_case_ : int = get_size_dict(_A , param_name='crop_size' ) snake_case_ : Union[str, Any] = do_resize snake_case_ : str = size snake_case_ : List[str] = resample snake_case_ : List[Any] = do_center_crop snake_case_ : Dict = crop_size snake_case_ : Tuple = do_rescale snake_case_ : Optional[Any] = rescale_factor snake_case_ : Any = do_normalize snake_case_ : Any = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN snake_case_ : Optional[int] = image_std if image_std is not None else IMAGENET_STANDARD_STD def UpperCAmelCase_ ( self : Optional[int] , _A : np.ndarray , _A : Dict[str, int] , _A : PILImageResampling = PIL.Image.BICUBIC , _A : Optional[Union[str, ChannelDimension]] = None , **_A : List[str] , ) -> np.ndarray: """simple docstring""" snake_case_ : Tuple = get_size_dict(_A ) if "height" not in size or "width" not in size: raise ValueError(F"""The size dictionary must have keys 'height' and 'width'. Got {size.keys()}""" ) return resize( _A , size=(size['height'], size['width']) , resample=_A , data_format=_A , **_A ) def UpperCAmelCase_ ( self : int , _A : np.ndarray , _A : Dict[str, int] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Optional[Any] , ) -> np.ndarray: """simple docstring""" snake_case_ : Optional[int] = get_size_dict(_A ) if "height" not in size or "width" not in size: raise ValueError(F"""The size dictionary must have keys 'height' and 'width'. Got {size.keys()}""" ) return center_crop(_A , size=(size['height'], size['width']) , data_format=_A , **_A ) def UpperCAmelCase_ ( self : Dict , _A : np.ndarray , _A : Union[int, float] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : str , ) -> str: """simple docstring""" return rescale(_A , scale=_A , data_format=_A , **_A ) def UpperCAmelCase_ ( self : Any , _A : np.ndarray , _A : Union[float, List[float]] , _A : Union[float, List[float]] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Tuple , ) -> np.ndarray: """simple docstring""" return normalize(_A , mean=_A , std=_A , data_format=_A , **_A ) def UpperCAmelCase_ ( self : List[str] , _A : ImageInput , _A : bool = None , _A : Dict[str, int] = None , _A : Union[str, Any]=None , _A : bool = None , _A : Dict[str, int] = None , _A : bool = None , _A : float = None , _A : bool = None , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[str, TensorType]] = None , _A : ChannelDimension = ChannelDimension.FIRST , **_A : int , ) -> PIL.Image.Image: """simple docstring""" snake_case_ : int = do_resize if do_resize is not None else self.do_resize snake_case_ : str = resample if resample is not None else self.resample snake_case_ : Any = do_center_crop if do_center_crop is not None else self.do_center_crop snake_case_ : List[str] = do_rescale if do_rescale is not None else self.do_rescale snake_case_ : Any = rescale_factor if rescale_factor is not None else self.rescale_factor snake_case_ : List[str] = do_normalize if do_normalize is not None else self.do_normalize snake_case_ : Any = image_mean if image_mean is not None else self.image_mean snake_case_ : Dict = image_std if image_std is not None else self.image_std snake_case_ : int = size if size is not None else self.size snake_case_ : Optional[int] = get_size_dict(_A ) snake_case_ : int = crop_size if crop_size is not None else self.crop_size snake_case_ : Any = get_size_dict(_A , param_name='crop_size' ) snake_case_ : Optional[Any] = make_list_of_images(_A ) if not valid_images(_A ): raise ValueError( 'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ' 'torch.Tensor, tf.Tensor or jax.ndarray.' ) if do_resize and size is None or resample is None: raise ValueError('Size and resample must be specified if do_resize is True.' ) if do_center_crop and crop_size is None: raise ValueError('Crop size must be specified if do_center_crop is True.' ) if do_rescale and rescale_factor is None: raise ValueError('Rescale factor must be specified if do_rescale is True.' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('Image mean and std must be specified if do_normalize is True.' ) # All transformations expect numpy arrays. snake_case_ : Optional[Any] = [to_numpy_array(_A ) for image in images] if do_resize: snake_case_ : Dict = [self.resize(image=_A , size=_A , resample=_A ) for image in images] if do_center_crop: snake_case_ : Optional[Any] = [self.center_crop(image=_A , size=_A ) for image in images] if do_rescale: snake_case_ : Optional[int] = [self.rescale(image=_A , scale=_A ) for image in images] if do_normalize: snake_case_ : str = [self.normalize(image=_A , mean=_A , std=_A ) for image in images] snake_case_ : Dict = [to_channel_dimension_format(_A , _A ) for image in images] snake_case_ : Tuple = {'pixel_values': images} return BatchFeature(data=_A , tensor_type=_A )
327
1
from typing import Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images from ...utils import TensorType, logging _SCREAMING_SNAKE_CASE = logging.get_logger(__name__) class SCREAMING_SNAKE_CASE_ ( snake_case_ ): __magic_name__: Dict = ["pixel_values"] def __init__( self : str , _A : bool = True , _A : Union[int, float] = 1 / 255 , _A : bool = True , _A : int = 8 , **_A : List[str] , ) -> None: """simple docstring""" super().__init__(**_A ) snake_case_ : Optional[Any] = do_rescale snake_case_ : Tuple = rescale_factor snake_case_ : str = do_pad snake_case_ : List[str] = pad_size def UpperCAmelCase_ ( self : Optional[Any] , _A : np.ndarray , _A : float , _A : Optional[Union[str, ChannelDimension]] = None , **_A : List[Any] ) -> np.ndarray: """simple docstring""" return rescale(_A , scale=_A , data_format=_A , **_A ) def UpperCAmelCase_ ( self : List[Any] , _A : np.ndarray , _A : int , _A : Optional[Union[str, ChannelDimension]] = None ) -> int: """simple docstring""" snake_case_ ,snake_case_ : List[Any] = get_image_size(_A ) snake_case_ : Union[str, Any] = (old_height // size + 1) * size - old_height snake_case_ : Union[str, Any] = (old_width // size + 1) * size - old_width return pad(_A , ((0, pad_height), (0, pad_width)) , mode='symmetric' , data_format=_A ) def UpperCAmelCase_ ( self : Optional[int] , _A : ImageInput , _A : Optional[bool] = None , _A : Optional[float] = None , _A : Optional[bool] = None , _A : Optional[int] = None , _A : Optional[Union[str, TensorType]] = None , _A : Union[str, ChannelDimension] = ChannelDimension.FIRST , **_A : Union[str, Any] , ) -> Any: """simple docstring""" snake_case_ : Any = do_rescale if do_rescale is not None else self.do_rescale snake_case_ : List[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor snake_case_ : List[str] = do_pad if do_pad is not None else self.do_pad snake_case_ : Any = pad_size if pad_size is not None else self.pad_size snake_case_ : Tuple = make_list_of_images(_A ) if not valid_images(_A ): raise ValueError( 'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ' 'torch.Tensor, tf.Tensor or jax.ndarray.' ) if do_rescale and rescale_factor is None: raise ValueError('Rescale factor must be specified if do_rescale is True.' ) # All transformations expect numpy arrays. snake_case_ : Optional[int] = [to_numpy_array(_A ) for image in images] if do_rescale: snake_case_ : Dict = [self.rescale(image=_A , scale=_A ) for image in images] if do_pad: snake_case_ : Optional[int] = [self.pad(_A , size=_A ) for image in images] snake_case_ : List[str] = [to_channel_dimension_format(_A , _A ) for image in images] snake_case_ : Dict = {'pixel_values': images} return BatchFeature(data=_A , tensor_type=_A )
327
import sys _SCREAMING_SNAKE_CASE = ( """73167176531330624919225119674426574742355349194934""" """96983520312774506326239578318016984801869478851843""" """85861560789112949495459501737958331952853208805511""" """12540698747158523863050715693290963295227443043557""" """66896648950445244523161731856403098711121722383113""" """62229893423380308135336276614282806444486645238749""" """30358907296290491560440772390713810515859307960866""" """70172427121883998797908792274921901699720888093776""" """65727333001053367881220235421809751254540594752243""" """52584907711670556013604839586446706324415722155397""" """53697817977846174064955149290862569321978468622482""" """83972241375657056057490261407972968652414535100474""" """82166370484403199890008895243450658541227588666881""" """16427171479924442928230863465674813919123162824586""" """17866458359124566529476545682848912883142607690042""" """24219022671055626321111109370544217506941658960408""" """07198403850962455444362981230987879927244284909188""" """84580156166097919133875499200524063689912560717606""" """05886116467109405077541002256983155200055935729725""" """71636269561882670428252483600823257530420752963450""" ) def SCREAMING_SNAKE_CASE__ ( __a = N ): snake_case_ : Optional[Any] = -sys.maxsize - 1 for i in range(len(__a ) - 12 ): snake_case_ : Optional[Any] = 1 for j in range(13 ): product *= int(n[i + j] ) if product > largest_product: snake_case_ : int = product return largest_product if __name__ == "__main__": print(F'''{solution() = }''')
327
1
import baseaa def SCREAMING_SNAKE_CASE__ ( __a ): return baseaa.aaaencode(string.encode('utf-8' ) ) def SCREAMING_SNAKE_CASE__ ( __a ): return baseaa.aaadecode(__a ).decode('utf-8' ) if __name__ == "__main__": import doctest doctest.testmod()
327
import logging import os import sys from dataclasses import dataclass, field from typing import Optional from seqaseq_trainer import SeqaSeqTrainer from seqaseq_training_args import SeqaSeqTrainingArguments import transformers from transformers import ( AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer, HfArgumentParser, MBartTokenizer, MBartTokenizerFast, set_seed, ) from transformers.trainer_utils import EvaluationStrategy, is_main_process from transformers.training_args import ParallelMode from utils import ( SeqaSeqDataCollator, SeqaSeqDataset, assert_all_frozen, build_compute_metrics_fn, check_output_dir, freeze_embeds, freeze_params, lmap, save_json, use_task_specific_params, write_txt_file, ) _SCREAMING_SNAKE_CASE = logging.getLogger(__name__) @dataclass class SCREAMING_SNAKE_CASE_ : __magic_name__: str = field( metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} ) __magic_name__: Optional[str] = field( default=snake_case_ , metadata={"help": "Pretrained config name or path if not the same as model_name"} ) __magic_name__: Optional[str] = field( default=snake_case_ , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} ) __magic_name__: Optional[str] = field( default=snake_case_ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , ) __magic_name__: bool = field(default=snake_case_ , metadata={"help": "Whether tp freeze the encoder."} ) __magic_name__: bool = field(default=snake_case_ , metadata={"help": "Whether to freeze the embeddings."} ) @dataclass class SCREAMING_SNAKE_CASE_ : __magic_name__: str = field( metadata={"help": "The input data dir. Should contain the .tsv files (or other data files) for the task."} ) __magic_name__: Optional[str] = field( default="summarization" , metadata={"help": "Task name, summarization (or summarization_{dataset} for pegasus) or translation"} , ) __magic_name__: Optional[int] = field( default=1024 , metadata={ "help": ( "The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) } , ) __magic_name__: Optional[int] = field( default=128 , metadata={ "help": ( "The maximum total sequence length for target text after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) } , ) __magic_name__: Optional[int] = field( default=142 , metadata={ "help": ( "The maximum total sequence length for validation target text after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded. " "This argument is also used to override the ``max_length`` param of ``model.generate``, which is used " "during ``evaluate`` and ``predict``." ) } , ) __magic_name__: Optional[int] = field( default=142 , metadata={ "help": ( "The maximum total sequence length for test target text after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) } , ) __magic_name__: Optional[int] = field(default=-1 , metadata={"help": "# training examples. -1 means use all."} ) __magic_name__: Optional[int] = field(default=-1 , metadata={"help": "# validation examples. -1 means use all."} ) __magic_name__: Optional[int] = field(default=-1 , metadata={"help": "# test examples. -1 means use all."} ) __magic_name__: Optional[str] = field(default=snake_case_ , metadata={"help": "Source language id for translation."} ) __magic_name__: Optional[str] = field(default=snake_case_ , metadata={"help": "Target language id for translation."} ) __magic_name__: Optional[int] = field(default=snake_case_ , metadata={"help": "# num_beams to use for evaluation."} ) __magic_name__: bool = field( default=snake_case_ , metadata={"help": "If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined."} , ) def SCREAMING_SNAKE_CASE__ ( __a , __a , __a ): logger.info(f"""***** {split} metrics *****""" ) for key in sorted(metrics.keys() ): logger.info(f""" {key} = {metrics[key]}""" ) save_json(__a , os.path.join(__a , f"""{split}_results.json""" ) ) def SCREAMING_SNAKE_CASE__ ( ): # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. snake_case_ : Any = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. snake_case_ ,snake_case_ ,snake_case_ : List[Any] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: snake_case_ ,snake_case_ ,snake_case_ : List[str] = parser.parse_args_into_dataclasses() check_output_dir(__a ) # Setup logging logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , ) logger.warning( 'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() logger.info('Training/evaluation parameters %s' , __a ) # Set seed set_seed(training_args.seed ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. snake_case_ : Tuple = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) snake_case_ : Any = ('encoder_layerdrop', 'decoder_layerdrop', 'dropout', 'attention_dropout') for p in extra_model_params: if getattr(__a , __a , __a ): assert hasattr(__a , __a ), f"""({config.__class__.__name__}) doesn't have a `{p}` attribute""" setattr(__a , __a , getattr(__a , __a ) ) snake_case_ : Tuple = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) snake_case_ : Any = AutoModelForSeqaSeqLM.from_pretrained( model_args.model_name_or_path , from_tf='.ckpt' in model_args.model_name_or_path , config=__a , cache_dir=model_args.cache_dir , ) # use task specific params use_task_specific_params(__a , data_args.task ) # set num_beams for evaluation if data_args.eval_beams is None: snake_case_ : Any = model.config.num_beams # set decoder_start_token_id for MBart if model.config.decoder_start_token_id is None and isinstance(__a , (MBartTokenizer, MBartTokenizerFast) ): assert ( data_args.tgt_lang is not None and data_args.src_lang is not None ), "mBart requires --tgt_lang and --src_lang" if isinstance(__a , __a ): snake_case_ : int = tokenizer.lang_code_to_id[data_args.tgt_lang] else: snake_case_ : int = tokenizer.convert_tokens_to_ids(data_args.tgt_lang ) if model_args.freeze_embeds: freeze_embeds(__a ) if model_args.freeze_encoder: freeze_params(model.get_encoder() ) assert_all_frozen(model.get_encoder() ) snake_case_ : List[Any] = SeqaSeqDataset # Get datasets snake_case_ : List[Any] = ( dataset_class( __a , type_path='train' , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '' , ) if training_args.do_train else None ) snake_case_ : List[str] = ( dataset_class( __a , type_path='val' , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '' , ) if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO else None ) snake_case_ : List[Any] = ( dataset_class( __a , type_path='test' , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '' , ) if training_args.do_predict else None ) # Initialize our Trainer snake_case_ : Any = ( build_compute_metrics_fn(data_args.task , __a ) if training_args.predict_with_generate else None ) snake_case_ : List[str] = SeqaSeqTrainer( model=__a , args=__a , data_args=__a , train_dataset=__a , eval_dataset=__a , data_collator=SeqaSeqDataCollator( __a , __a , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=__a , tokenizer=__a , ) snake_case_ : Optional[int] = {} # Training if training_args.do_train: logger.info('*** Train ***' ) snake_case_ : Any = trainer.train( model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None ) snake_case_ : Tuple = train_result.metrics snake_case_ : List[str] = data_args.n_train trainer.save_model() # this also saves the tokenizer if trainer.is_world_process_zero(): handle_metrics('train' , __a , training_args.output_dir ) all_metrics.update(__a ) # Need to save the state, since Trainer.save_model saves only the tokenizer with the model trainer.state.save_to_json(os.path.join(training_args.output_dir , 'trainer_state.json' ) ) # For convenience, we also re-save the tokenizer to the same directory, # so that you can share your model easily on huggingface.co/models =) tokenizer.save_pretrained(training_args.output_dir ) # Evaluation if training_args.do_eval: logger.info('*** Evaluate ***' ) snake_case_ : List[Any] = trainer.evaluate(metric_key_prefix='val' ) snake_case_ : str = data_args.n_val snake_case_ : Union[str, Any] = round(metrics['val_loss'] , 4 ) if trainer.is_world_process_zero(): handle_metrics('val' , __a , training_args.output_dir ) all_metrics.update(__a ) if training_args.do_predict: logger.info('*** Predict ***' ) snake_case_ : Dict = trainer.predict(test_dataset=__a , metric_key_prefix='test' ) snake_case_ : Union[str, Any] = test_output.metrics snake_case_ : int = data_args.n_test if trainer.is_world_process_zero(): snake_case_ : List[str] = round(metrics['test_loss'] , 4 ) handle_metrics('test' , __a , training_args.output_dir ) all_metrics.update(__a ) if training_args.predict_with_generate: snake_case_ : Any = tokenizer.batch_decode( test_output.predictions , skip_special_tokens=__a , clean_up_tokenization_spaces=__a ) snake_case_ : Any = lmap(str.strip , __a ) write_txt_file(__a , os.path.join(training_args.output_dir , 'test_generations.txt' ) ) if trainer.is_world_process_zero(): save_json(__a , os.path.join(training_args.output_dir , 'all_results.json' ) ) return all_metrics def SCREAMING_SNAKE_CASE__ ( __a ): # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
327
1
import argparse import gc import json import os import re import torch from huggingface_hub import hf_hub_download from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedTokenizerFast, RwkvConfig from transformers.modeling_utils import WEIGHTS_INDEX_NAME, shard_checkpoint _SCREAMING_SNAKE_CASE = { """169M""": 12, """430M""": 24, """1B5""": 24, """3B""": 32, """7B""": 32, """14B""": 40, } _SCREAMING_SNAKE_CASE = { """169M""": 7_68, """430M""": 10_24, """1B5""": 20_48, """3B""": 25_60, """7B""": 40_96, """14B""": 51_20, } def SCREAMING_SNAKE_CASE__ ( __a ): snake_case_ : Union[str, Any] = list(state_dict.keys() ) for name in state_dict_keys: snake_case_ : Any = state_dict.pop(__a ) # emb -> embedding if name.startswith('emb.' ): snake_case_ : Any = name.replace('emb.' , 'embeddings.' ) # ln_0 -> pre_ln (only present at block 0) if name.startswith('blocks.0.ln0' ): snake_case_ : List[str] = name.replace('blocks.0.ln0' , 'blocks.0.pre_ln' ) # att -> attention snake_case_ : Any = re.sub(r'blocks\.(\d+)\.att' , r'blocks.\1.attention' , __a ) # ffn -> feed_forward snake_case_ : Optional[Any] = re.sub(r'blocks\.(\d+)\.ffn' , r'blocks.\1.feed_forward' , __a ) # time_mix_k -> time_mix_key and reshape if name.endswith('.time_mix_k' ): snake_case_ : Dict = name.replace('.time_mix_k' , '.time_mix_key' ) # time_mix_v -> time_mix_value and reshape if name.endswith('.time_mix_v' ): snake_case_ : str = name.replace('.time_mix_v' , '.time_mix_value' ) # time_mix_r -> time_mix_key and reshape if name.endswith('.time_mix_r' ): snake_case_ : List[str] = name.replace('.time_mix_r' , '.time_mix_receptance' ) if name != "head.weight": snake_case_ : List[str] = 'rwkv.' + name snake_case_ : Tuple = weight return state_dict def SCREAMING_SNAKE_CASE__ ( __a , __a , __a , __a=None , __a=None , __a=False , __a=None ): # 1. If possible, build the tokenizer. if tokenizer_file is None: print('No `--tokenizer_file` provided, we will use the default tokenizer.' ) snake_case_ : int = 5_02_77 snake_case_ : str = AutoTokenizer.from_pretrained('EleutherAI/gpt-neox-20b' ) else: snake_case_ : List[str] = PreTrainedTokenizerFast(tokenizer_file=__a ) snake_case_ : Any = len(__a ) tokenizer.save_pretrained(__a ) # 2. Build the config snake_case_ : Tuple = list(NUM_HIDDEN_LAYERS_MAPPING.keys() ) if size is None: # Try to infer size from the checkpoint name for candidate in possible_sizes: if candidate in checkpoint_file: snake_case_ : Any = candidate break if size is None: raise ValueError('Could not infer the size, please provide it with the `--size` argument.' ) if size not in possible_sizes: raise ValueError(f"""`size` should be one of {possible_sizes}, got {size}.""" ) snake_case_ : Union[str, Any] = RwkvConfig( vocab_size=__a , num_hidden_layers=NUM_HIDDEN_LAYERS_MAPPING[size] , hidden_size=HIDEN_SIZE_MAPPING[size] , ) config.save_pretrained(__a ) # 3. Download model file then convert state_dict snake_case_ : str = hf_hub_download(__a , __a ) snake_case_ : Union[str, Any] = torch.load(__a , map_location='cpu' ) snake_case_ : Any = convert_state_dict(__a ) # 4. Split in shards and save snake_case_ ,snake_case_ : Union[str, Any] = shard_checkpoint(__a ) for shard_file, shard in shards.items(): torch.save(__a , os.path.join(__a , __a ) ) if index is not None: snake_case_ : Optional[int] = os.path.join(__a , __a ) # Save the index as well with open(__a , 'w' , encoding='utf-8' ) as f: snake_case_ : List[Any] = json.dumps(__a , indent=2 , sort_keys=__a ) + '\n' f.write(__a ) # 5. Clean up shards (for some reason the file PyTorch saves take the same space as the whole state_dict print( 'Cleaning up shards. This may error with an OOM error, it this is the case don\'t worry you still have converted the model.' ) snake_case_ : str = list(shards.keys() ) del state_dict del shards gc.collect() for shard_file in shard_files: snake_case_ : Any = torch.load(os.path.join(__a , __a ) ) torch.save({k: v.cpu().clone() for k, v in state_dict.items()} , os.path.join(__a , __a ) ) del state_dict gc.collect() if push_to_hub: if model_name is None: raise ValueError('Please provide a `model_name` to push the model to the Hub.' ) snake_case_ : int = AutoModelForCausalLM.from_pretrained(__a ) model.push_to_hub(__a , max_shard_size='2GB' ) tokenizer.push_to_hub(__a ) if __name__ == "__main__": _SCREAMING_SNAKE_CASE = argparse.ArgumentParser() # Required parameters parser.add_argument( """--repo_id""", default=None, type=str, required=True, help="""Repo ID from which to pull the checkpoint.""" ) parser.add_argument( """--checkpoint_file""", default=None, type=str, required=True, help="""Name of the checkpoint file in the repo.""" ) parser.add_argument( """--output_dir""", default=None, type=str, required=True, help="""Where to save the converted model.""" ) parser.add_argument( """--tokenizer_file""", default=None, type=str, help="""Path to the tokenizer file to use (if not provided, only the model is converted).""", ) parser.add_argument( """--size""", default=None, type=str, help="""Size of the model. Will be inferred from the `checkpoint_file` if not passed.""", ) parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Push to the Hub the converted model.""", ) parser.add_argument( """--model_name""", default=None, type=str, help="""Name of the pushed model on the Hub, including the username / organization.""", ) _SCREAMING_SNAKE_CASE = parser.parse_args() convert_rmkv_checkpoint_to_hf_format( args.repo_id, args.checkpoint_file, args.output_dir, size=args.size, tokenizer_file=args.tokenizer_file, push_to_hub=args.push_to_hub, model_name=args.model_name, )
327
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available _SCREAMING_SNAKE_CASE = { """configuration_poolformer""": [ """POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """PoolFormerConfig""", """PoolFormerOnnxConfig""", ] } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _SCREAMING_SNAKE_CASE = ["""PoolFormerFeatureExtractor"""] _SCREAMING_SNAKE_CASE = ["""PoolFormerImageProcessor"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _SCREAMING_SNAKE_CASE = [ """POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""", """PoolFormerForImageClassification""", """PoolFormerModel""", """PoolFormerPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_poolformer import ( POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, PoolFormerConfig, PoolFormerOnnxConfig, ) try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_poolformer import PoolFormerFeatureExtractor from .image_processing_poolformer import PoolFormerImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_poolformer import ( POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, PoolFormerForImageClassification, PoolFormerModel, PoolFormerPreTrainedModel, ) else: import sys _SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
327
1
class SCREAMING_SNAKE_CASE_ : def __init__( self : int , _A : List[Any] ) -> Union[str, Any]: """simple docstring""" snake_case_ : str = val snake_case_ : Tuple = None snake_case_ : int = None def UpperCAmelCase_ ( self : Union[str, Any] , _A : str ) -> List[Any]: """simple docstring""" if self.val: if val < self.val: if self.left is None: snake_case_ : Union[str, Any] = Node(_A ) else: self.left.insert(_A ) elif val > self.val: if self.right is None: snake_case_ : Optional[Any] = Node(_A ) else: self.right.insert(_A ) else: snake_case_ : Optional[Any] = val def SCREAMING_SNAKE_CASE__ ( __a , __a ): # Recursive traversal if root: inorder(root.left , __a ) res.append(root.val ) inorder(root.right , __a ) def SCREAMING_SNAKE_CASE__ ( __a ): # Build BST if len(__a ) == 0: return arr snake_case_ : int = Node(arr[0] ) for i in range(1 , len(__a ) ): root.insert(arr[i] ) # Traverse BST in order. snake_case_ : List[str] = [] inorder(__a , __a ) return res if __name__ == "__main__": print(tree_sort([10, 1, 3, 2, 9, 14, 13]))
327
import tempfile import unittest from transformers import AutoModelForSeqaSeqLM, AutoTokenizer from transformers.testing_utils import ( is_torch_available, require_optimum, require_torch, slow, ) if is_torch_available(): import torch @require_torch @require_optimum @slow class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ): def UpperCAmelCase_ ( self : Dict ) -> List[Any]: """simple docstring""" snake_case_ : Any = 'hf-internal-testing/tiny-random-t5' snake_case_ : Optional[Any] = AutoTokenizer.from_pretrained(_A ) snake_case_ : Optional[int] = AutoModelForSeqaSeqLM.from_pretrained(_A ) snake_case_ : List[Any] = tokenizer('This is me' , return_tensors='pt' ) snake_case_ : Any = model.to_bettertransformer() self.assertTrue(any('BetterTransformer' in mod.__class__.__name__ for _, mod in model.named_modules() ) ) snake_case_ : Optional[Any] = model.generate(**_A ) snake_case_ : int = model.reverse_bettertransformer() self.assertFalse(any('BetterTransformer' in mod.__class__.__name__ for _, mod in model.named_modules() ) ) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(_A ) snake_case_ : Tuple = AutoModelForSeqaSeqLM.from_pretrained(_A ) self.assertFalse( any('BetterTransformer' in mod.__class__.__name__ for _, mod in model_reloaded.named_modules() ) ) snake_case_ : Optional[Any] = model_reloaded.generate(**_A ) self.assertTrue(torch.allclose(_A , _A ) ) def UpperCAmelCase_ ( self : Optional[Any] ) -> Tuple: """simple docstring""" snake_case_ : Any = 'hf-internal-testing/tiny-random-t5' snake_case_ : int = AutoModelForSeqaSeqLM.from_pretrained(_A ) snake_case_ : Dict = model.to_bettertransformer() with tempfile.TemporaryDirectory() as tmpdirname: with self.assertRaises(_A ): model.save_pretrained(_A ) snake_case_ : Union[str, Any] = model.reverse_bettertransformer() model.save_pretrained(_A )
327
1
import ast import os import re import shutil import tempfile import unittest from unittest import mock import torch from accelerate.test_utils.examples import compare_against_test from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow from accelerate.utils import write_basic_config # DataLoaders built from `test_samples/MRPC` for quick testing # Should mock `{script_name}.get_dataloaders` via: # @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders) _SCREAMING_SNAKE_CASE = [ """cross_validation.py""", """gradient_accumulation.py""", """local_sgd.py""", """multi_process_metrics.py""", """memory.py""", """automatic_gradient_accumulation.py""", """fsdp_with_peak_mem_tracking.py""", """deepspeed_with_config_support.py""", """megatron_lm_gpt_pretraining.py""", ] class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ): def UpperCAmelCase_ ( self : str , _A : str , _A : bool , _A : str = None , _A : list = None ) -> Tuple: """simple docstring""" snake_case_ : int = None snake_case_ : Union[str, Any] = os.path.abspath(os.path.join('examples' , 'by_feature' ) ) snake_case_ : Optional[int] = os.path.abspath('examples' ) for item in os.listdir(_A ): if item not in EXCLUDE_EXAMPLES: snake_case_ : Optional[Any] = os.path.join(_A , _A ) if os.path.isfile(_A ) and ".py" in item_path: with self.subTest( tested_script=_A , feature_script=_A , tested_section='main()' if parser_only else 'training_function()' , ): snake_case_ : int = compare_against_test( os.path.join(_A , _A ) , _A , _A , _A ) snake_case_ : Any = '\n'.join(_A ) if special_strings is not None: for string in special_strings: snake_case_ : Dict = diff.replace(_A , '' ) self.assertEqual(_A , '' ) def UpperCAmelCase_ ( self : Tuple ) -> Any: """simple docstring""" self.one_complete_example('complete_nlp_example.py' , _A ) self.one_complete_example('complete_nlp_example.py' , _A ) def UpperCAmelCase_ ( self : str ) -> List[Any]: """simple docstring""" snake_case_ : Optional[int] = os.path.abspath(os.path.join('examples' , 'cv_example.py' ) ) snake_case_ : Any = [ ' ' * 16 + '{\n\n', ' ' * 20 + '"accuracy": eval_metric["accuracy"],\n\n', ' ' * 20 + '"f1": eval_metric["f1"],\n\n', ' ' * 20 + '"train_loss": total_loss.item() / len(train_dataloader),\n\n', ' ' * 20 + '"epoch": epoch,\n\n', ' ' * 16 + '},\n\n', ' ' * 16 + 'step=epoch,\n', ' ' * 12, ' ' * 8 + 'for step, batch in enumerate(active_dataloader):\n', ] self.one_complete_example('complete_cv_example.py' , _A , _A , _A ) self.one_complete_example('complete_cv_example.py' , _A , _A , _A ) @mock.patch.dict(os.environ , {"TESTING_MOCKED_DATALOADERS": "1"} ) class SCREAMING_SNAKE_CASE_ ( snake_case_ ): __magic_name__: Dict = False @classmethod def UpperCAmelCase_ ( cls : Any ) -> Optional[Any]: """simple docstring""" super().setUpClass() snake_case_ : List[Any] = tempfile.mkdtemp() snake_case_ : Tuple = os.path.join(cls._tmpdir , 'default_config.yml' ) write_basic_config(save_location=cls.configPath ) snake_case_ : Union[str, Any] = ['accelerate', 'launch', '--config_file', cls.configPath] @classmethod def UpperCAmelCase_ ( cls : int ) -> List[str]: """simple docstring""" super().tearDownClass() shutil.rmtree(cls._tmpdir ) def UpperCAmelCase_ ( self : List[str] ) -> str: """simple docstring""" snake_case_ : List[str] = F""" examples/by_feature/checkpointing.py --checkpointing_steps epoch --output_dir {self.tmpdir} """.split() run_command(self._launch_args + testargs ) self.assertTrue(os.path.exists(os.path.join(self.tmpdir , 'epoch_0' ) ) ) def UpperCAmelCase_ ( self : int ) -> Optional[int]: """simple docstring""" snake_case_ : List[str] = F""" examples/by_feature/checkpointing.py --checkpointing_steps 1 --output_dir {self.tmpdir} """.split() snake_case_ : Tuple = run_command(self._launch_args + testargs ) self.assertTrue(os.path.exists(os.path.join(self.tmpdir , 'step_2' ) ) ) def UpperCAmelCase_ ( self : Optional[int] ) -> Union[str, Any]: """simple docstring""" snake_case_ : str = F""" examples/by_feature/checkpointing.py --resume_from_checkpoint {os.path.join(self.tmpdir , 'epoch_0' )} """.split() snake_case_ : int = run_command(self._launch_args + testargs , return_stdout=_A ) self.assertNotIn('epoch 0:' , _A ) self.assertIn('epoch 1:' , _A ) def UpperCAmelCase_ ( self : Optional[int] ) -> Optional[int]: """simple docstring""" snake_case_ : List[Any] = F""" examples/by_feature/checkpointing.py --resume_from_checkpoint {os.path.join(self.tmpdir , 'step_2' )} """.split() snake_case_ : Optional[Any] = run_command(self._launch_args + testargs , return_stdout=_A ) if torch.cuda.is_available(): snake_case_ : int = torch.cuda.device_count() else: snake_case_ : Optional[Any] = 1 if num_processes > 1: self.assertNotIn('epoch 0:' , _A ) self.assertIn('epoch 1:' , _A ) else: self.assertIn('epoch 0:' , _A ) self.assertIn('epoch 1:' , _A ) @slow def UpperCAmelCase_ ( self : Optional[Any] ) -> Union[str, Any]: """simple docstring""" snake_case_ : Dict = '\n examples/by_feature/cross_validation.py\n --num_folds 2\n '.split() with mock.patch.dict(os.environ , {'TESTING_MOCKED_DATALOADERS': '0'} ): snake_case_ : str = run_command(self._launch_args + testargs , return_stdout=_A ) snake_case_ : Any = re.findall('({.+})' , _A ) snake_case_ : int = [r for r in results if 'accuracy' in r][-1] snake_case_ : int = ast.literal_eval(_A ) self.assertGreaterEqual(results['accuracy'] , 0.7_5 ) def UpperCAmelCase_ ( self : Any ) -> str: """simple docstring""" snake_case_ : str = ['examples/by_feature/multi_process_metrics.py'] run_command(self._launch_args + testargs ) @require_trackers @mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} ) def UpperCAmelCase_ ( self : List[str] ) -> Union[str, Any]: """simple docstring""" with tempfile.TemporaryDirectory() as tmpdir: snake_case_ : int = F""" examples/by_feature/tracking.py --with_tracking --project_dir {tmpdir} """.split() run_command(self._launch_args + testargs ) self.assertTrue(os.path.exists(os.path.join(_A , 'tracking' ) ) ) def UpperCAmelCase_ ( self : Optional[int] ) -> Optional[int]: """simple docstring""" snake_case_ : Dict = ['examples/by_feature/gradient_accumulation.py'] run_command(self._launch_args + testargs ) def UpperCAmelCase_ ( self : Optional[Any] ) -> List[str]: """simple docstring""" snake_case_ : List[Any] = ['examples/by_feature/local_sgd.py'] run_command(self._launch_args + testargs )
327
import numpy as np import torch from torch.utils.data import Dataset from utils import logger class SCREAMING_SNAKE_CASE_ ( snake_case_ ): def __init__( self : Union[str, Any] , _A : Any , _A : Dict ) -> Union[str, Any]: """simple docstring""" snake_case_ : str = params snake_case_ : int = np.array(_A ) snake_case_ : Optional[int] = np.array([len(_A ) for t in data] ) self.check() self.remove_long_sequences() self.remove_empty_sequences() self.remove_unknown_sequences() self.check() self.print_statistics() def __getitem__( self : Tuple , _A : Optional[int] ) -> str: """simple docstring""" return (self.token_ids[index], self.lengths[index]) def __len__( self : List[str] ) -> str: """simple docstring""" return len(self.lengths ) def UpperCAmelCase_ ( self : Dict ) -> str: """simple docstring""" assert len(self.token_ids ) == len(self.lengths ) assert all(self.lengths[i] == len(self.token_ids[i] ) for i in range(len(self.lengths ) ) ) def UpperCAmelCase_ ( self : Any ) -> Optional[Any]: """simple docstring""" snake_case_ : Dict = self.params.max_model_input_size snake_case_ : Tuple = self.lengths > max_len logger.info(F"""Splitting {sum(_A )} too long sequences.""" ) def divide_chunks(_A : Union[str, Any] , _A : Dict ): return [l[i : i + n] for i in range(0 , len(_A ) , _A )] snake_case_ : Dict = [] snake_case_ : Union[str, Any] = [] if self.params.mlm: snake_case_ ,snake_case_ : Optional[int] = self.params.special_tok_ids['cls_token'], self.params.special_tok_ids['sep_token'] else: snake_case_ ,snake_case_ : Any = self.params.special_tok_ids['bos_token'], self.params.special_tok_ids['eos_token'] for seq_, len_ in zip(self.token_ids , self.lengths ): assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_ if len_ <= max_len: new_tok_ids.append(seq_ ) new_lengths.append(len_ ) else: snake_case_ : List[Any] = [] for sub_s in divide_chunks(seq_ , max_len - 2 ): if sub_s[0] != cls_id: snake_case_ : Optional[int] = np.insert(_A , 0 , _A ) if sub_s[-1] != sep_id: snake_case_ : Optional[Any] = np.insert(_A , len(_A ) , _A ) assert len(_A ) <= max_len assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s sub_seqs.append(_A ) new_tok_ids.extend(_A ) new_lengths.extend([len(_A ) for l in sub_seqs] ) snake_case_ : Tuple = np.array(_A ) snake_case_ : int = np.array(_A ) def UpperCAmelCase_ ( self : List[str] ) -> List[str]: """simple docstring""" snake_case_ : Tuple = len(self ) snake_case_ : int = self.lengths > 11 snake_case_ : Dict = self.token_ids[indices] snake_case_ : int = self.lengths[indices] snake_case_ : List[Any] = len(self ) logger.info(F"""Remove {init_size - new_size} too short (<=11 tokens) sequences.""" ) def UpperCAmelCase_ ( self : Union[str, Any] ) -> int: """simple docstring""" if "unk_token" not in self.params.special_tok_ids: return else: snake_case_ : Optional[Any] = self.params.special_tok_ids['unk_token'] snake_case_ : Dict = len(self ) snake_case_ : str = np.array([np.count_nonzero(a == unk_token_id ) for a in self.token_ids] ) snake_case_ : Any = (unk_occs / self.lengths) < 0.5 snake_case_ : List[Any] = self.token_ids[indices] snake_case_ : int = self.lengths[indices] snake_case_ : Tuple = len(self ) logger.info(F"""Remove {init_size - new_size} sequences with a high level of unknown tokens (50%).""" ) def UpperCAmelCase_ ( self : Union[str, Any] ) -> Any: """simple docstring""" if not self.params.is_master: return logger.info(F"""{len(self )} sequences""" ) # data_len = sum(self.lengths) # nb_unique_tokens = len(Counter(list(chain(*self.token_ids)))) # logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)') # unk_idx = self.params.special_tok_ids['unk_token'] # nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids]) # logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)') def UpperCAmelCase_ ( self : Optional[int] , _A : Union[str, Any] ) -> List[Any]: """simple docstring""" snake_case_ : Any = [t[0] for t in batch] snake_case_ : int = [t[1] for t in batch] assert len(_A ) == len(_A ) # Max for paddings snake_case_ : str = max(_A ) # Pad token ids if self.params.mlm: snake_case_ : int = self.params.special_tok_ids['pad_token'] else: snake_case_ : Dict = self.params.special_tok_ids['unk_token'] snake_case_ : Dict = [list(t.astype(_A ) ) + [pad_idx] * (max_seq_len_ - len(_A )) for t in token_ids] assert len(tk_ ) == len(_A ) assert all(len(_A ) == max_seq_len_ for t in tk_ ) snake_case_ : Any = torch.tensor(tk_ ) # (bs, max_seq_len_) snake_case_ : Optional[Any] = torch.tensor(_A ) # (bs) return tk_t, lg_t
327
1
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _SCREAMING_SNAKE_CASE = logging.get_logger(__name__) _SCREAMING_SNAKE_CASE = { """facebook/levit-128S""": """https://huggingface.co/facebook/levit-128S/resolve/main/config.json""", # See all LeViT models at https://huggingface.co/models?filter=levit } class SCREAMING_SNAKE_CASE_ ( snake_case_ ): __magic_name__: str = "levit" def __init__( self : Any , _A : str=224 , _A : List[Any]=3 , _A : List[str]=3 , _A : Dict=2 , _A : int=1 , _A : Any=16 , _A : Dict=[128, 256, 384] , _A : Dict=[4, 8, 12] , _A : Optional[int]=[4, 4, 4] , _A : Any=[16, 16, 16] , _A : int=0 , _A : Tuple=[2, 2, 2] , _A : Union[str, Any]=[2, 2, 2] , _A : Optional[Any]=0.0_2 , **_A : Any , ) -> Dict: """simple docstring""" super().__init__(**_A ) snake_case_ : Optional[Any] = image_size snake_case_ : List[str] = num_channels snake_case_ : str = kernel_size snake_case_ : int = stride snake_case_ : List[str] = padding snake_case_ : Optional[Any] = hidden_sizes snake_case_ : str = num_attention_heads snake_case_ : List[Any] = depths snake_case_ : List[Any] = key_dim snake_case_ : Any = drop_path_rate snake_case_ : str = patch_size snake_case_ : Any = attention_ratio snake_case_ : Dict = mlp_ratio snake_case_ : Union[str, Any] = initializer_range snake_case_ : int = [ ['Subsample', key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2], ['Subsample', key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2], ] class SCREAMING_SNAKE_CASE_ ( snake_case_ ): __magic_name__: Tuple = version.parse("1.11" ) @property def UpperCAmelCase_ ( self : List[Any] ) -> Mapping[str, Mapping[int, str]]: """simple docstring""" return OrderedDict( [ ('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}), ] ) @property def UpperCAmelCase_ ( self : Union[str, Any] ) -> float: """simple docstring""" return 1E-4
327
def SCREAMING_SNAKE_CASE__ ( __a , __a ): while b: snake_case_ ,snake_case_ : Any = b, a % b return a def SCREAMING_SNAKE_CASE__ ( __a , __a ): return a if b == 0 else euclidean_gcd_recursive(__a , a % b ) def SCREAMING_SNAKE_CASE__ ( ): print(f"""euclidean_gcd(3, 5) = {euclidean_gcd(3 , 5 )}""" ) print(f"""euclidean_gcd(5, 3) = {euclidean_gcd(5 , 3 )}""" ) print(f"""euclidean_gcd(1, 3) = {euclidean_gcd(1 , 3 )}""" ) print(f"""euclidean_gcd(3, 6) = {euclidean_gcd(3 , 6 )}""" ) print(f"""euclidean_gcd(6, 3) = {euclidean_gcd(6 , 3 )}""" ) print(f"""euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 , 5 )}""" ) print(f"""euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 , 3 )}""" ) print(f"""euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 , 3 )}""" ) print(f"""euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 , 6 )}""" ) print(f"""euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 , 3 )}""" ) if __name__ == "__main__": main()
327
1
import random import unittest import numpy as np from diffusers import ( DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, OnnxStableDiffusionImgaImgPipeline, PNDMScheduler, ) from diffusers.utils import floats_tensor from diffusers.utils.testing_utils import ( is_onnx_available, load_image, nightly, require_onnxruntime, require_torch_gpu, ) from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin if is_onnx_available(): import onnxruntime as ort class SCREAMING_SNAKE_CASE_ ( snake_case_ , unittest.TestCase ): __magic_name__: Tuple = "hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline" def UpperCAmelCase_ ( self : Any , _A : Union[str, Any]=0 ) -> List[str]: """simple docstring""" snake_case_ : Optional[Any] = floats_tensor((1, 3, 128, 128) , rng=random.Random(_A ) ) snake_case_ : Tuple = np.random.RandomState(_A ) snake_case_ : Any = { 'prompt': 'A painting of a squirrel eating a burger', 'image': image, 'generator': generator, 'num_inference_steps': 3, 'strength': 0.7_5, 'guidance_scale': 7.5, 'output_type': 'numpy', } return inputs def UpperCAmelCase_ ( self : List[Any] ) -> List[str]: """simple docstring""" snake_case_ : str = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' ) pipe.set_progress_bar_config(disable=_A ) snake_case_ : str = self.get_dummy_inputs() snake_case_ : Dict = pipe(**_A ).images snake_case_ : Any = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 128, 128, 3) snake_case_ : Optional[int] = np.array([0.6_9_6_4_3, 0.5_8_4_8_4, 0.5_0_3_1_4, 0.5_8_7_6_0, 0.5_5_3_6_8, 0.5_9_6_4_3, 0.5_1_5_2_9, 0.4_1_2_1_7, 0.4_9_0_8_7] ) assert np.abs(image_slice - expected_slice ).max() < 1E-1 def UpperCAmelCase_ ( self : Dict ) -> Optional[Any]: """simple docstring""" snake_case_ : Tuple = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' ) snake_case_ : Optional[int] = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=_A ) pipe.set_progress_bar_config(disable=_A ) snake_case_ : int = self.get_dummy_inputs() snake_case_ : Optional[Any] = pipe(**_A ).images snake_case_ : Optional[int] = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) snake_case_ : List[str] = np.array([0.6_1_7_3_7, 0.5_4_6_4_2, 0.5_3_1_8_3, 0.5_4_4_6_5, 0.5_2_7_4_2, 0.6_0_5_2_5, 0.4_9_9_6_9, 0.4_0_6_5_5, 0.4_8_1_5_4] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 def UpperCAmelCase_ ( self : List[str] ) -> List[str]: """simple docstring""" snake_case_ : Optional[int] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' ) snake_case_ : str = LMSDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=_A ) # warmup pass to apply optimizations snake_case_ : Any = pipe(**self.get_dummy_inputs() ) snake_case_ : Any = self.get_dummy_inputs() snake_case_ : Dict = pipe(**_A ).images snake_case_ : List[str] = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) snake_case_ : str = np.array([0.5_2_7_6_1, 0.5_9_9_7_7, 0.4_9_0_3_3, 0.4_9_6_1_9, 0.5_4_2_8_2, 0.5_0_3_1_1, 0.4_7_6_0_0, 0.4_0_9_1_8, 0.4_5_2_0_3] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 def UpperCAmelCase_ ( self : Optional[Any] ) -> int: """simple docstring""" snake_case_ : Dict = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' ) snake_case_ : Any = EulerDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=_A ) snake_case_ : Tuple = self.get_dummy_inputs() snake_case_ : Optional[Any] = pipe(**_A ).images snake_case_ : Tuple = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) snake_case_ : Any = np.array([0.5_2_9_1_1, 0.6_0_0_0_4, 0.4_9_2_2_9, 0.4_9_8_0_5, 0.5_4_5_0_2, 0.5_0_6_8_0, 0.4_7_7_7_7, 0.4_1_0_2_8, 0.4_5_3_0_4] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 def UpperCAmelCase_ ( self : List[str] ) -> str: """simple docstring""" snake_case_ : Dict = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' ) snake_case_ : Union[str, Any] = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=_A ) snake_case_ : Optional[int] = self.get_dummy_inputs() snake_case_ : str = pipe(**_A ).images snake_case_ : int = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) snake_case_ : Union[str, Any] = np.array([0.5_2_9_1_1, 0.6_0_0_0_4, 0.4_9_2_2_9, 0.4_9_8_0_5, 0.5_4_5_0_2, 0.5_0_6_8_0, 0.4_7_7_7_7, 0.4_1_0_2_8, 0.4_5_3_0_4] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 def UpperCAmelCase_ ( self : Tuple ) -> List[Any]: """simple docstring""" snake_case_ : Any = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' ) snake_case_ : Optional[Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=_A ) snake_case_ : Tuple = self.get_dummy_inputs() snake_case_ : Tuple = pipe(**_A ).images snake_case_ : List[str] = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) snake_case_ : int = np.array([0.6_5_3_3_1, 0.5_8_2_7_7, 0.4_8_2_0_4, 0.5_6_0_5_9, 0.5_3_6_6_5, 0.5_6_2_3_5, 0.5_0_9_6_9, 0.4_0_0_0_9, 0.4_6_5_5_2] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 @nightly @require_onnxruntime @require_torch_gpu class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ): @property def UpperCAmelCase_ ( self : Dict ) -> Optional[Any]: """simple docstring""" return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def UpperCAmelCase_ ( self : List[str] ) -> List[Any]: """simple docstring""" snake_case_ : List[str] = ort.SessionOptions() snake_case_ : int = False return options def UpperCAmelCase_ ( self : List[Any] ) -> int: """simple docstring""" snake_case_ : Optional[Any] = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/img2img/sketch-mountains-input.jpg' ) snake_case_ : Any = init_image.resize((768, 512) ) # using the PNDM scheduler by default snake_case_ : Any = OnnxStableDiffusionImgaImgPipeline.from_pretrained( 'CompVis/stable-diffusion-v1-4' , revision='onnx' , safety_checker=_A , feature_extractor=_A , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=_A ) snake_case_ : Dict = 'A fantasy landscape, trending on artstation' snake_case_ : List[str] = np.random.RandomState(0 ) snake_case_ : List[Any] = pipe( prompt=_A , image=_A , strength=0.7_5 , guidance_scale=7.5 , num_inference_steps=10 , generator=_A , output_type='np' , ) snake_case_ : Dict = output.images snake_case_ : Dict = images[0, 255:258, 383:386, -1] assert images.shape == (1, 512, 768, 3) snake_case_ : List[Any] = np.array([0.4_9_0_9, 0.5_0_5_9, 0.5_3_7_2, 0.4_6_2_3, 0.4_8_7_6, 0.5_0_4_9, 0.4_8_2_0, 0.4_9_5_6, 0.5_0_1_9] ) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2 def UpperCAmelCase_ ( self : Optional[int] ) -> List[Any]: """simple docstring""" snake_case_ : Tuple = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/img2img/sketch-mountains-input.jpg' ) snake_case_ : Optional[Any] = init_image.resize((768, 512) ) snake_case_ : Any = LMSDiscreteScheduler.from_pretrained( 'runwayml/stable-diffusion-v1-5' , subfolder='scheduler' , revision='onnx' ) snake_case_ : List[str] = OnnxStableDiffusionImgaImgPipeline.from_pretrained( 'runwayml/stable-diffusion-v1-5' , revision='onnx' , scheduler=_A , safety_checker=_A , feature_extractor=_A , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=_A ) snake_case_ : Optional[Any] = 'A fantasy landscape, trending on artstation' snake_case_ : Dict = np.random.RandomState(0 ) snake_case_ : str = pipe( prompt=_A , image=_A , strength=0.7_5 , guidance_scale=7.5 , num_inference_steps=20 , generator=_A , output_type='np' , ) snake_case_ : int = output.images snake_case_ : int = images[0, 255:258, 383:386, -1] assert images.shape == (1, 512, 768, 3) snake_case_ : Any = np.array([0.8_0_4_3, 0.9_2_6, 0.9_5_8_1, 0.8_1_1_9, 0.8_9_5_4, 0.9_1_3, 0.7_2_0_9, 0.7_4_6_3, 0.7_4_3_1] ) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
327
import os import torch from ..logging import get_logger from .constants import FSDP_PYTORCH_VERSION, MODEL_NAME, OPTIMIZER_NAME from .versions import is_torch_version if is_torch_version(""">=""", FSDP_PYTORCH_VERSION): import torch.distributed.checkpoint as dist_cp from torch.distributed.checkpoint.default_planner import DefaultLoadPlanner, DefaultSavePlanner from torch.distributed.checkpoint.optimizer import load_sharded_optimizer_state_dict from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType _SCREAMING_SNAKE_CASE = get_logger(__name__) def SCREAMING_SNAKE_CASE__ ( __a , __a , __a , __a , __a=0 ): os.makedirs(__a , exist_ok=__a ) with FSDP.state_dict_type( __a , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ): snake_case_ : Dict = model.state_dict() if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: snake_case_ : Optional[int] = f"""{MODEL_NAME}.bin""" if model_index == 0 else f"""{MODEL_NAME}_{model_index}.bin""" snake_case_ : Dict = os.path.join(__a , __a ) if accelerator.process_index == 0: logger.info(f"""Saving model to {output_model_file}""" ) torch.save(__a , __a ) logger.info(f"""Model saved to {output_model_file}""" ) elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT: snake_case_ : Dict = ( f"""{MODEL_NAME}_rank{accelerator.process_index}.bin""" if model_index == 0 else f"""{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin""" ) snake_case_ : Dict = os.path.join(__a , __a ) logger.info(f"""Saving model to {output_model_file}""" ) torch.save(__a , __a ) logger.info(f"""Model saved to {output_model_file}""" ) elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT: snake_case_ : Optional[int] = os.path.join(__a , f"""{MODEL_NAME}_{model_index}""" ) os.makedirs(__a , exist_ok=__a ) logger.info(f"""Saving model to {ckpt_dir}""" ) snake_case_ : int = {'model': state_dict} dist_cp.save_state_dict( state_dict=__a , storage_writer=dist_cp.FileSystemWriter(__a ) , planner=DefaultSavePlanner() , ) logger.info(f"""Model saved to {ckpt_dir}""" ) def SCREAMING_SNAKE_CASE__ ( __a , __a , __a , __a , __a=0 ): accelerator.wait_for_everyone() with FSDP.state_dict_type( __a , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ): if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: if type(__a ) != FSDP and accelerator.process_index != 0: if not fsdp_plugin.sync_module_states: raise ValueError( 'Set the `sync_module_states` flag to `True` so that model states are synced across processes when ' 'initializing FSDP object' ) return snake_case_ : Optional[int] = f"""{MODEL_NAME}.bin""" if model_index == 0 else f"""{MODEL_NAME}_{model_index}.bin""" snake_case_ : Optional[Any] = os.path.join(__a , __a ) logger.info(f"""Loading model from {input_model_file}""" ) snake_case_ : Optional[Any] = torch.load(__a ) logger.info(f"""Model loaded from {input_model_file}""" ) elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT: snake_case_ : Optional[Any] = ( f"""{MODEL_NAME}_rank{accelerator.process_index}.bin""" if model_index == 0 else f"""{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin""" ) snake_case_ : Tuple = os.path.join(__a , __a ) logger.info(f"""Loading model from {input_model_file}""" ) snake_case_ : Optional[int] = torch.load(__a ) logger.info(f"""Model loaded from {input_model_file}""" ) elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT: snake_case_ : Tuple = ( os.path.join(__a , f"""{MODEL_NAME}_{model_index}""" ) if f"""{MODEL_NAME}""" not in input_dir else input_dir ) logger.info(f"""Loading model from {ckpt_dir}""" ) snake_case_ : List[Any] = {'model': model.state_dict()} dist_cp.load_state_dict( state_dict=__a , storage_reader=dist_cp.FileSystemReader(__a ) , planner=DefaultLoadPlanner() , ) snake_case_ : Any = state_dict['model'] logger.info(f"""Model loaded from {ckpt_dir}""" ) model.load_state_dict(__a ) def SCREAMING_SNAKE_CASE__ ( __a , __a , __a , __a , __a , __a=0 ): os.makedirs(__a , exist_ok=__a ) with FSDP.state_dict_type( __a , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ): snake_case_ : List[str] = FSDP.optim_state_dict(__a , __a ) if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: if accelerator.process_index == 0: snake_case_ : str = ( f"""{OPTIMIZER_NAME}.bin""" if optimizer_index == 0 else f"""{OPTIMIZER_NAME}_{optimizer_index}.bin""" ) snake_case_ : Any = os.path.join(__a , __a ) logger.info(f"""Saving Optimizer state to {output_optimizer_file}""" ) torch.save(__a , __a ) logger.info(f"""Optimizer state saved in {output_optimizer_file}""" ) else: snake_case_ : Optional[int] = os.path.join(__a , f"""{OPTIMIZER_NAME}_{optimizer_index}""" ) os.makedirs(__a , exist_ok=__a ) logger.info(f"""Saving Optimizer state to {ckpt_dir}""" ) dist_cp.save_state_dict( state_dict={'optimizer': optim_state} , storage_writer=dist_cp.FileSystemWriter(__a ) , planner=DefaultSavePlanner() , ) logger.info(f"""Optimizer state saved in {ckpt_dir}""" ) def SCREAMING_SNAKE_CASE__ ( __a , __a , __a , __a , __a , __a=0 ): accelerator.wait_for_everyone() with FSDP.state_dict_type( __a , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ): if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: snake_case_ : Optional[Any] = None # below check should work but currently it isn't working (mostly opytorch issue), # in the meantime disabling it at the cost of excess memory usage # if accelerator.process_index == 0 or not fsdp_plugin.optim_state_dict_config.rank0_only: snake_case_ : Union[str, Any] = ( f"""{OPTIMIZER_NAME}.bin""" if optimizer_index == 0 else f"""{OPTIMIZER_NAME}_{optimizer_index}.bin""" ) snake_case_ : List[Any] = os.path.join(__a , __a ) logger.info(f"""Loading Optimizer state from {input_optimizer_file}""" ) snake_case_ : Optional[int] = torch.load(__a ) logger.info(f"""Optimizer state loaded from {input_optimizer_file}""" ) else: snake_case_ : str = ( os.path.join(__a , f"""{OPTIMIZER_NAME}_{optimizer_index}""" ) if f"""{OPTIMIZER_NAME}""" not in input_dir else input_dir ) logger.info(f"""Loading Optimizer from {ckpt_dir}""" ) snake_case_ : Any = load_sharded_optimizer_state_dict( model_state_dict=model.state_dict() , optimizer_key='optimizer' , storage_reader=dist_cp.FileSystemReader(__a ) , ) snake_case_ : Optional[int] = optim_state['optimizer'] logger.info(f"""Optimizer loaded from {ckpt_dir}""" ) snake_case_ : Optional[Any] = FSDP.optim_state_dict_to_load(__a , __a , __a ) optimizer.load_state_dict(__a )
327
1
from __future__ import annotations def SCREAMING_SNAKE_CASE__ ( __a , __a , __a ): if len(__a ) == 0: raise ValueError('find_max() arg is an empty sequence' ) if ( left >= len(__a ) or left < -len(__a ) or right >= len(__a ) or right < -len(__a ) ): raise IndexError('list index out of range' ) if left == right: return nums[left] snake_case_ : Optional[Any] = (left + right) >> 1 # the middle snake_case_ : Optional[Any] = find_max(__a , __a , __a ) # find max in range[left, mid] snake_case_ : Any = find_max(__a , mid + 1 , __a ) # find max in range[mid + 1, right] return left_max if left_max >= right_max else right_max if __name__ == "__main__": import doctest doctest.testmod(verbose=True)
327
import unittest from typing import Dict, List, Optional, Union import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import BridgeTowerImageProcessor class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ): def __init__( self : List[str] , _A : List[Any] , _A : bool = True , _A : Dict[str, int] = None , _A : int = 32 , _A : bool = True , _A : Union[int, float] = 1 / 255 , _A : bool = True , _A : bool = True , _A : Optional[Union[float, List[float]]] = [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3] , _A : Optional[Union[float, List[float]]] = [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1] , _A : bool = True , _A : Tuple=7 , _A : Tuple=30 , _A : int=400 , _A : Tuple=3 , ) -> Optional[int]: """simple docstring""" snake_case_ : str = parent snake_case_ : str = do_resize snake_case_ : str = size if size is not None else {'shortest_edge': 288} snake_case_ : Any = size_divisor snake_case_ : Any = do_rescale snake_case_ : Union[str, Any] = rescale_factor snake_case_ : str = do_normalize snake_case_ : int = do_center_crop snake_case_ : str = image_mean snake_case_ : int = image_std snake_case_ : Any = do_pad snake_case_ : Optional[int] = batch_size snake_case_ : List[str] = num_channels snake_case_ : Any = min_resolution snake_case_ : str = max_resolution def UpperCAmelCase_ ( self : Tuple ) -> Optional[int]: """simple docstring""" return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "size": self.size, "size_divisor": self.size_divisor, } def UpperCAmelCase_ ( self : Dict , _A : str , _A : Union[str, Any]=False ) -> int: """simple docstring""" if not batched: snake_case_ : Optional[int] = self.size['shortest_edge'] snake_case_ : List[Any] = image_inputs[0] if isinstance(_A , Image.Image ): snake_case_ ,snake_case_ : Optional[Any] = image.size else: snake_case_ ,snake_case_ : str = image.shape[1], image.shape[2] snake_case_ : Dict = size / min(_A , _A ) if h < w: snake_case_ ,snake_case_ : str = size, scale * w else: snake_case_ ,snake_case_ : Tuple = scale * h, size snake_case_ : Dict = int((1333 / 800) * size ) if max(_A , _A ) > max_size: snake_case_ : Union[str, Any] = max_size / max(_A , _A ) snake_case_ : Any = newh * scale snake_case_ : Union[str, Any] = neww * scale snake_case_ ,snake_case_ : Any = int(newh + 0.5 ), int(neww + 0.5 ) snake_case_ ,snake_case_ : int = ( newh // self.size_divisor * self.size_divisor, neww // self.size_divisor * self.size_divisor, ) else: snake_case_ : Optional[int] = [] for image in image_inputs: snake_case_ ,snake_case_ : Optional[int] = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) snake_case_ : str = max(_A , key=lambda _A : item[0] )[0] snake_case_ : List[str] = max(_A , key=lambda _A : item[1] )[1] return expected_height, expected_width @require_torch @require_vision class SCREAMING_SNAKE_CASE_ ( snake_case_ , unittest.TestCase ): __magic_name__: List[Any] = BridgeTowerImageProcessor if is_vision_available() else None def UpperCAmelCase_ ( self : List[Any] ) -> Tuple: """simple docstring""" snake_case_ : int = BridgeTowerImageProcessingTester(self ) @property def UpperCAmelCase_ ( self : int ) -> Any: """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def UpperCAmelCase_ ( self : Union[str, Any] ) -> Dict: """simple docstring""" snake_case_ : Optional[Any] = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_A , 'image_mean' ) ) self.assertTrue(hasattr(_A , 'image_std' ) ) self.assertTrue(hasattr(_A , 'do_normalize' ) ) self.assertTrue(hasattr(_A , 'do_resize' ) ) self.assertTrue(hasattr(_A , 'size' ) ) self.assertTrue(hasattr(_A , 'size_divisor' ) ) def UpperCAmelCase_ ( self : Optional[int] ) -> List[str]: """simple docstring""" pass def UpperCAmelCase_ ( self : Tuple ) -> Tuple: """simple docstring""" snake_case_ : Optional[Any] = self.image_processing_class(**self.image_processor_dict ) # create random PIL images snake_case_ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A ) for image in image_inputs: self.assertIsInstance(_A , Image.Image ) # Test not batched input snake_case_ : Dict = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values snake_case_ ,snake_case_ : Optional[Any] = self.image_processor_tester.get_expected_values(_A ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched snake_case_ : List[str] = image_processing(_A , return_tensors='pt' ).pixel_values snake_case_ ,snake_case_ : Optional[Any] = self.image_processor_tester.get_expected_values(_A , batched=_A ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def UpperCAmelCase_ ( self : Union[str, Any] ) -> Any: """simple docstring""" snake_case_ : int = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors snake_case_ : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , numpify=_A ) for image in image_inputs: self.assertIsInstance(_A , np.ndarray ) # Test not batched input snake_case_ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values snake_case_ ,snake_case_ : Tuple = self.image_processor_tester.get_expected_values(_A ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched snake_case_ : Any = image_processing(_A , return_tensors='pt' ).pixel_values snake_case_ ,snake_case_ : Any = self.image_processor_tester.get_expected_values(_A , batched=_A ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def UpperCAmelCase_ ( self : Dict ) -> List[str]: """simple docstring""" snake_case_ : Optional[Any] = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors snake_case_ : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , torchify=_A ) for image in image_inputs: self.assertIsInstance(_A , torch.Tensor ) # Test not batched input snake_case_ : Optional[int] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values snake_case_ ,snake_case_ : Optional[Any] = self.image_processor_tester.get_expected_values(_A ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched snake_case_ : str = image_processing(_A , return_tensors='pt' ).pixel_values snake_case_ ,snake_case_ : Tuple = self.image_processor_tester.get_expected_values(_A , batched=_A ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , )
327
1
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _SCREAMING_SNAKE_CASE = logging.get_logger(__name__) _SCREAMING_SNAKE_CASE = { """xlm-roberta-base""": """https://huggingface.co/xlm-roberta-base/resolve/main/config.json""", """xlm-roberta-large""": """https://huggingface.co/xlm-roberta-large/resolve/main/config.json""", """xlm-roberta-large-finetuned-conll02-dutch""": ( """https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/config.json""" ), """xlm-roberta-large-finetuned-conll02-spanish""": ( """https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/config.json""" ), """xlm-roberta-large-finetuned-conll03-english""": ( """https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/config.json""" ), """xlm-roberta-large-finetuned-conll03-german""": ( """https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/config.json""" ), } class SCREAMING_SNAKE_CASE_ ( snake_case_ ): __magic_name__: int = "xlm-roberta" def __init__( self : List[Any] , _A : Optional[Any]=30522 , _A : Optional[Any]=768 , _A : Optional[Any]=12 , _A : Optional[Any]=12 , _A : Optional[int]=3072 , _A : int="gelu" , _A : Union[str, Any]=0.1 , _A : Union[str, Any]=0.1 , _A : Dict=512 , _A : List[Any]=2 , _A : Union[str, Any]=0.0_2 , _A : Union[str, Any]=1E-12 , _A : str=1 , _A : List[str]=0 , _A : int=2 , _A : Any="absolute" , _A : int=True , _A : List[Any]=None , **_A : Optional[int] , ) -> Any: """simple docstring""" super().__init__(pad_token_id=_A , bos_token_id=_A , eos_token_id=_A , **_A ) snake_case_ : Dict = vocab_size snake_case_ : str = hidden_size snake_case_ : List[Any] = num_hidden_layers snake_case_ : List[Any] = num_attention_heads snake_case_ : List[str] = hidden_act snake_case_ : Union[str, Any] = intermediate_size snake_case_ : int = hidden_dropout_prob snake_case_ : int = attention_probs_dropout_prob snake_case_ : Optional[int] = max_position_embeddings snake_case_ : Tuple = type_vocab_size snake_case_ : List[str] = initializer_range snake_case_ : Optional[int] = layer_norm_eps snake_case_ : List[str] = position_embedding_type snake_case_ : Optional[Any] = use_cache snake_case_ : Optional[Any] = classifier_dropout class SCREAMING_SNAKE_CASE_ ( snake_case_ ): @property def UpperCAmelCase_ ( self : Optional[int] ) -> Mapping[str, Mapping[int, str]]: """simple docstring""" if self.task == "multiple-choice": snake_case_ : List[Any] = {0: 'batch', 1: 'choice', 2: 'sequence'} else: snake_case_ : List[Any] = {0: 'batch', 1: 'sequence'} return OrderedDict( [ ('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ] )
327
import json import os import tempfile import transformers import datasets from utils import generate_example_dataset, get_duration _SCREAMING_SNAKE_CASE = 50_00_00 _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = os.path.split(__file__) _SCREAMING_SNAKE_CASE = os.path.join(RESULTS_BASEPATH, """results""", RESULTS_FILENAME.replace(""".py""", """.json""")) @get_duration def SCREAMING_SNAKE_CASE__ ( __a , **__a ): snake_case_ : int = dataset.map(**__a ) @get_duration def SCREAMING_SNAKE_CASE__ ( __a , **__a ): snake_case_ : Dict = dataset.filter(**__a ) def SCREAMING_SNAKE_CASE__ ( ): snake_case_ : Tuple = {'num examples': SPEED_TEST_N_EXAMPLES} with tempfile.TemporaryDirectory() as tmp_dir: snake_case_ : Dict = datasets.Features({'text': datasets.Value('string' ), 'numbers': datasets.Value('float32' )} ) snake_case_ : List[Any] = generate_example_dataset( os.path.join(__a , 'dataset.arrow' ) , __a , num_examples=__a ) snake_case_ : str = transformers.AutoTokenizer.from_pretrained('bert-base-cased' , use_fast=__a ) def tokenize(__a ): return tokenizer(examples['text'] ) snake_case_ : Any = map(__a ) snake_case_ : Tuple = map(__a , batched=__a ) snake_case_ : str = map(__a , function=lambda __a : None , batched=__a ) with dataset.formatted_as(type='numpy' ): snake_case_ : Optional[int] = map(__a , function=lambda __a : None , batched=__a ) with dataset.formatted_as(type='pandas' ): snake_case_ : str = map(__a , function=lambda __a : None , batched=__a ) with dataset.formatted_as(type='torch' , columns='numbers' ): snake_case_ : int = map(__a , function=lambda __a : None , batched=__a ) with dataset.formatted_as(type='tensorflow' , columns='numbers' ): snake_case_ : List[Any] = map(__a , function=lambda __a : None , batched=__a ) snake_case_ : int = map(__a , function=__a , batched=__a ) snake_case_ : Optional[Any] = filter(__a ) # Activate later when tokenizer support batched inputs # with dataset.formatted_as(type='numpy'): # times[func.__name__ + " fast-tokenizer batched numpy"] = func(dataset, function=tokenize, batched=True) with open(__a , 'wb' ) as f: f.write(json.dumps(__a ).encode('utf-8' ) ) if __name__ == "__main__": # useful to run the profiler benchmark_map_filter()
327
1
def SCREAMING_SNAKE_CASE__ ( ): snake_case_ : Union[str, Any] = 0 for i in range(1 , 10_01 ): total += i**i return str(__a )[-10:] if __name__ == "__main__": print(solution())
327
from collections import namedtuple import requests from lxml import html # type: ignore _SCREAMING_SNAKE_CASE = namedtuple("""covid_data""", """cases deaths recovered""") def SCREAMING_SNAKE_CASE__ ( __a = "https://www.worldometers.info/coronavirus/" ): snake_case_ : Union[str, Any] = '//div[@class = "maincounter-number"]/span/text()' return covid_data(*html.fromstring(requests.get(__a ).content ).xpath(__a ) ) _SCREAMING_SNAKE_CASE = """Total COVID-19 cases in the world: {} Total deaths due to COVID-19 in the world: {} Total COVID-19 patients recovered in the world: {}""" print(fmt.format(*covid_stats()))
327
1
import argparse import json import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.utils.deepspeed import DummyOptim, DummyScheduler _SCREAMING_SNAKE_CASE = 16 _SCREAMING_SNAKE_CASE = 32 def SCREAMING_SNAKE_CASE__ ( __a , __a = 16 , __a = "bert-base-cased" ): snake_case_ : Any = AutoTokenizer.from_pretrained(__a ) snake_case_ : Optional[int] = load_dataset('glue' , 'mrpc' ) def tokenize_function(__a ): # max_length=None => use the model max length (it's actually the default) snake_case_ : int = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=__a , max_length=__a ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset snake_case_ : Tuple = datasets.map( __a , batched=__a , remove_columns=['idx', 'sentence1', 'sentence2'] , load_from_cache_file=__a ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library snake_case_ : str = tokenized_datasets.rename_column('label' , 'labels' ) def collate_fn(__a ): # On TPU it's best to pad everything to the same length or training will be very slow. if accelerator.distributed_type == DistributedType.TPU: return tokenizer.pad(__a , padding='max_length' , max_length=1_28 , return_tensors='pt' ) return tokenizer.pad(__a , padding='longest' , return_tensors='pt' ) # Instantiate dataloaders. snake_case_ : Tuple = DataLoader( tokenized_datasets['train'] , shuffle=__a , collate_fn=__a , batch_size=__a ) snake_case_ : Any = DataLoader( tokenized_datasets['validation'] , shuffle=__a , collate_fn=__a , batch_size=__a ) return train_dataloader, eval_dataloader def SCREAMING_SNAKE_CASE__ ( __a , __a ): # Initialize accelerator snake_case_ : Optional[Any] = Accelerator() # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs snake_case_ : Optional[int] = config['lr'] snake_case_ : List[Any] = int(config['num_epochs'] ) snake_case_ : List[Any] = int(config['seed'] ) snake_case_ : List[Any] = int(config['batch_size'] ) snake_case_ : List[Any] = args.model_name_or_path set_seed(__a ) snake_case_ ,snake_case_ : List[Any] = get_dataloaders(__a , __a , __a ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) snake_case_ : Any = AutoModelForSequenceClassification.from_pretrained(__a , return_dict=__a ) # Instantiate optimizer snake_case_ : int = ( AdamW if accelerator.state.deepspeed_plugin is None or 'optimizer' not in accelerator.state.deepspeed_plugin.deepspeed_config else DummyOptim ) snake_case_ : Optional[Any] = optimizer_cls(params=model.parameters() , lr=__a ) if accelerator.state.deepspeed_plugin is not None: snake_case_ : Optional[Any] = accelerator.state.deepspeed_plugin.deepspeed_config[ 'gradient_accumulation_steps' ] else: snake_case_ : str = 1 snake_case_ : Optional[int] = (len(__a ) * num_epochs) // gradient_accumulation_steps # Instantiate scheduler if ( accelerator.state.deepspeed_plugin is None or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config ): snake_case_ : List[str] = get_linear_schedule_with_warmup( optimizer=__a , num_warmup_steps=0 , num_training_steps=__a , ) else: snake_case_ : Union[str, Any] = DummyScheduler(__a , total_num_steps=__a , warmup_num_steps=0 ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. snake_case_ ,snake_case_ ,snake_case_ ,snake_case_ ,snake_case_ : Any = accelerator.prepare( __a , __a , __a , __a , __a ) # We need to keep track of how many total steps we have iterated over snake_case_ : List[str] = 0 # We also need to keep track of the stating epoch so files are named properly snake_case_ : List[Any] = 0 # Now we train the model snake_case_ : List[Any] = evaluate.load('glue' , 'mrpc' ) snake_case_ : List[Any] = 0 snake_case_ : Optional[Any] = {} for epoch in range(__a , __a ): model.train() for step, batch in enumerate(__a ): snake_case_ : List[Any] = model(**__a ) snake_case_ : int = outputs.loss snake_case_ : Union[str, Any] = loss / gradient_accumulation_steps accelerator.backward(__a ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() overall_step += 1 model.eval() snake_case_ : int = 0 for step, batch in enumerate(__a ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): snake_case_ : str = model(**__a ) snake_case_ : List[Any] = outputs.logits.argmax(dim=-1 ) # It is slightly faster to call this once, than multiple times snake_case_ ,snake_case_ : str = accelerator.gather( (predictions, batch['labels']) ) # If we are in a multiprocess environment, the last batch has duplicates if accelerator.use_distributed: if step == len(__a ) - 1: snake_case_ : str = predictions[: len(eval_dataloader.dataset ) - samples_seen] snake_case_ : int = references[: len(eval_dataloader.dataset ) - samples_seen] else: samples_seen += references.shape[0] metric.add_batch( predictions=__a , references=__a , ) snake_case_ : Optional[Any] = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(f"""epoch {epoch}:""" , __a ) snake_case_ : str = eval_metric['accuracy'] if best_performance < eval_metric["accuracy"]: snake_case_ : List[str] = eval_metric['accuracy'] if args.performance_lower_bound is not None: assert ( args.performance_lower_bound <= best_performance ), f"""Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}""" accelerator.wait_for_everyone() if accelerator.is_main_process: with open(os.path.join(args.output_dir , 'all_results.json' ) , 'w' ) as f: json.dump(__a , __a ) def SCREAMING_SNAKE_CASE__ ( ): snake_case_ : int = argparse.ArgumentParser(description='Simple example of training script tracking peak GPU memory usage.' ) parser.add_argument( '--model_name_or_path' , type=__a , default='bert-base-cased' , help='Path to pretrained model or model identifier from huggingface.co/models.' , required=__a , ) parser.add_argument( '--output_dir' , type=__a , default='.' , help='Optional save directory where all checkpoint folders will be stored. Default is the current working directory.' , ) parser.add_argument( '--performance_lower_bound' , type=__a , default=__a , help='Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value.' , ) parser.add_argument( '--num_epochs' , type=__a , default=3 , help='Number of train epochs.' , ) snake_case_ : Optional[Any] = parser.parse_args() snake_case_ : Union[str, Any] = {'lr': 2E-5, 'num_epochs': args.num_epochs, 'seed': 42, 'batch_size': 16} training_function(__a , __a ) if __name__ == "__main__": main()
327
import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from .tokenization_lxmert import LxmertTokenizer _SCREAMING_SNAKE_CASE = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""} _SCREAMING_SNAKE_CASE = { """vocab_file""": { """unc-nlp/lxmert-base-uncased""": """https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/vocab.txt""", }, """tokenizer_file""": { """unc-nlp/lxmert-base-uncased""": ( """https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/tokenizer.json""" ), }, } _SCREAMING_SNAKE_CASE = { """unc-nlp/lxmert-base-uncased""": 5_12, } _SCREAMING_SNAKE_CASE = { """unc-nlp/lxmert-base-uncased""": {"""do_lower_case""": True}, } class SCREAMING_SNAKE_CASE_ ( snake_case_ ): __magic_name__: List[Any] = VOCAB_FILES_NAMES __magic_name__: List[str] = PRETRAINED_VOCAB_FILES_MAP __magic_name__: List[str] = PRETRAINED_INIT_CONFIGURATION __magic_name__: Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __magic_name__: Union[str, Any] = LxmertTokenizer def __init__( self : List[str] , _A : Union[str, Any]=None , _A : Optional[Any]=None , _A : Dict=True , _A : Dict="[UNK]" , _A : Optional[int]="[SEP]" , _A : Dict="[PAD]" , _A : Union[str, Any]="[CLS]" , _A : str="[MASK]" , _A : Tuple=True , _A : Dict=None , **_A : List[Any] , ) -> Optional[int]: """simple docstring""" super().__init__( _A , tokenizer_file=_A , do_lower_case=_A , unk_token=_A , sep_token=_A , pad_token=_A , cls_token=_A , mask_token=_A , tokenize_chinese_chars=_A , strip_accents=_A , **_A , ) snake_case_ : Optional[int] = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get('lowercase' , _A ) != do_lower_case or normalizer_state.get('strip_accents' , _A ) != strip_accents or normalizer_state.get('handle_chinese_chars' , _A ) != tokenize_chinese_chars ): snake_case_ : Tuple = getattr(_A , normalizer_state.pop('type' ) ) snake_case_ : Union[str, Any] = do_lower_case snake_case_ : int = strip_accents snake_case_ : Optional[Any] = tokenize_chinese_chars snake_case_ : List[Any] = normalizer_class(**_A ) snake_case_ : Tuple = do_lower_case def UpperCAmelCase_ ( self : Dict , _A : Any , _A : List[Any]=None ) -> Dict: """simple docstring""" snake_case_ : Optional[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def UpperCAmelCase_ ( self : Optional[Any] , _A : List[int] , _A : Optional[List[int]] = None ) -> List[int]: """simple docstring""" snake_case_ : str = [self.sep_token_id] snake_case_ : Dict = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def UpperCAmelCase_ ( self : Optional[int] , _A : str , _A : Optional[str] = None ) -> Tuple[str]: """simple docstring""" snake_case_ : Union[str, Any] = self._tokenizer.model.save(_A , name=_A ) return tuple(_A )
327
1
def SCREAMING_SNAKE_CASE__ ( __a = 10_00 ): snake_case_ : Union[str, Any] = 2**power snake_case_ : List[str] = str(__a ) snake_case_ : Optional[Any] = list(__a ) snake_case_ : int = 0 for i in list_num: sum_of_num += int(__a ) return sum_of_num if __name__ == "__main__": _SCREAMING_SNAKE_CASE = int(input("""Enter the power of 2: """).strip()) print("""2 ^ """, power, """ = """, 2**power) _SCREAMING_SNAKE_CASE = solution(power) print("""Sum of the digits is: """, result)
327
def SCREAMING_SNAKE_CASE__ ( __a ): if not isinstance(__a , __a ): snake_case_ : int = f"""Input value of [number={number}] must be an integer""" raise TypeError(__a ) if number < 0: return False snake_case_ : Dict = number * number while number > 0: if number % 10 != number_square % 10: return False number //= 10 number_square //= 10 return True if __name__ == "__main__": import doctest doctest.testmod()
327
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available _SCREAMING_SNAKE_CASE = { """configuration_graphormer""": ["""GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GraphormerConfig"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _SCREAMING_SNAKE_CASE = [ """GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST""", """GraphormerForGraphClassification""", """GraphormerModel""", """GraphormerPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_graphormer import GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, GraphormerConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_graphormer import ( GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST, GraphormerForGraphClassification, GraphormerModel, GraphormerPreTrainedModel, ) else: import sys _SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
327
from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _SCREAMING_SNAKE_CASE = { """configuration_autoformer""": [ """AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """AutoformerConfig""", ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _SCREAMING_SNAKE_CASE = [ """AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""", """AutoformerForPrediction""", """AutoformerModel""", """AutoformerPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_autoformer import ( AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, AutoformerConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_autoformer import ( AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, AutoformerForPrediction, AutoformerModel, AutoformerPreTrainedModel, ) else: import sys _SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
327
1
_SCREAMING_SNAKE_CASE = """ # Transformers 설치 방법 ! pip install transformers datasets # 마지막 릴리스 대신 소스에서 설치하려면, 위 명령을 주석으로 바꾸고 아래 명령을 해제하세요. # ! pip install git+https://github.com/huggingface/transformers.git """ _SCREAMING_SNAKE_CASE = [{"""type""": """code""", """content""": INSTALL_CONTENT}] _SCREAMING_SNAKE_CASE = { """{processor_class}""": """FakeProcessorClass""", """{model_class}""": """FakeModelClass""", """{object_class}""": """FakeObjectClass""", }
327
from typing import Dict from .base import GenericTensor, Pipeline class SCREAMING_SNAKE_CASE_ ( snake_case_ ): def UpperCAmelCase_ ( self : str , _A : Optional[Any]=None , _A : List[str]=None , _A : Optional[Any]=None , **_A : List[str] ) -> Any: """simple docstring""" if tokenize_kwargs is None: snake_case_ : Optional[Any] = {} if truncation is not None: if "truncation" in tokenize_kwargs: raise ValueError( 'truncation parameter defined twice (given as keyword argument as well as in tokenize_kwargs)' ) snake_case_ : int = truncation snake_case_ : Optional[int] = tokenize_kwargs snake_case_ : Dict = {} if return_tensors is not None: snake_case_ : Union[str, Any] = return_tensors return preprocess_params, {}, postprocess_params def UpperCAmelCase_ ( self : Optional[int] , _A : int , **_A : Any ) -> Dict[str, GenericTensor]: """simple docstring""" snake_case_ : Dict = self.framework snake_case_ : Any = self.tokenizer(_A , return_tensors=_A , **_A ) return model_inputs def UpperCAmelCase_ ( self : Optional[Any] , _A : List[str] ) -> int: """simple docstring""" snake_case_ : Tuple = self.model(**_A ) return model_outputs def UpperCAmelCase_ ( self : Union[str, Any] , _A : str , _A : str=False ) -> Any: """simple docstring""" if return_tensors: return model_outputs[0] if self.framework == "pt": return model_outputs[0].tolist() elif self.framework == "tf": return model_outputs[0].numpy().tolist() def __call__( self : List[str] , *_A : Union[str, Any] , **_A : Tuple ) -> List[str]: """simple docstring""" return super().__call__(*_A , **_A )
327
1
import argparse import logging import os from pathlib import Path from typing import Any, Dict import pytorch_lightning as pl from pytorch_lightning.utilities import rank_zero_info from transformers import ( AdamW, AutoConfig, AutoModel, AutoModelForPreTraining, AutoModelForQuestionAnswering, AutoModelForSeqaSeqLM, AutoModelForSequenceClassification, AutoModelForTokenClassification, AutoModelWithLMHead, AutoTokenizer, PretrainedConfig, PreTrainedTokenizer, ) from transformers.optimization import ( Adafactor, get_cosine_schedule_with_warmup, get_cosine_with_hard_restarts_schedule_with_warmup, get_linear_schedule_with_warmup, get_polynomial_decay_schedule_with_warmup, ) from transformers.utils.versions import require_version _SCREAMING_SNAKE_CASE = logging.getLogger(__name__) require_version("""pytorch_lightning>=1.0.4""") _SCREAMING_SNAKE_CASE = { """base""": AutoModel, """sequence-classification""": AutoModelForSequenceClassification, """question-answering""": AutoModelForQuestionAnswering, """pretraining""": AutoModelForPreTraining, """token-classification""": AutoModelForTokenClassification, """language-modeling""": AutoModelWithLMHead, """summarization""": AutoModelForSeqaSeqLM, """translation""": AutoModelForSeqaSeqLM, } # update this and the import above to support new schedulers from transformers.optimization _SCREAMING_SNAKE_CASE = { """linear""": get_linear_schedule_with_warmup, """cosine""": get_cosine_schedule_with_warmup, """cosine_w_restarts""": get_cosine_with_hard_restarts_schedule_with_warmup, """polynomial""": get_polynomial_decay_schedule_with_warmup, # '': get_constant_schedule, # not supported for now # '': get_constant_schedule_with_warmup, # not supported for now } _SCREAMING_SNAKE_CASE = sorted(arg_to_scheduler.keys()) _SCREAMING_SNAKE_CASE = """{""" + """, """.join(arg_to_scheduler_choices) + """}""" class SCREAMING_SNAKE_CASE_ ( pl.LightningModule ): def __init__( self : Any , _A : argparse.Namespace , _A : List[Any]=None , _A : Optional[int]="base" , _A : int=None , _A : Union[str, Any]=None , _A : Optional[int]=None , **_A : Any , ) -> int: """simple docstring""" super().__init__() # TODO: move to self.save_hyperparameters() # self.save_hyperparameters() # can also expand arguments into trainer signature for easier reading self.save_hyperparameters(_A ) snake_case_ : Optional[int] = 0 snake_case_ : Union[str, Any] = Path(self.hparams.output_dir ) snake_case_ : Dict = self.hparams.cache_dir if self.hparams.cache_dir else None if config is None: snake_case_ : List[str] = AutoConfig.from_pretrained( self.hparams.config_name if self.hparams.config_name else self.hparams.model_name_or_path , **({'num_labels': num_labels} if num_labels is not None else {}) , cache_dir=_A , **_A , ) else: snake_case_ : PretrainedConfig = config snake_case_ : int = ('encoder_layerdrop', 'decoder_layerdrop', 'dropout', 'attention_dropout') for p in extra_model_params: if getattr(self.hparams , _A , _A ): assert hasattr(self.config , _A ), F"""model config doesn't have a `{p}` attribute""" setattr(self.config , _A , getattr(self.hparams , _A ) ) if tokenizer is None: snake_case_ : str = AutoTokenizer.from_pretrained( self.hparams.tokenizer_name if self.hparams.tokenizer_name else self.hparams.model_name_or_path , cache_dir=_A , ) else: snake_case_ : PreTrainedTokenizer = tokenizer snake_case_ : List[Any] = MODEL_MODES[mode] if model is None: snake_case_ : int = self.model_type.from_pretrained( self.hparams.model_name_or_path , from_tf=bool('.ckpt' in self.hparams.model_name_or_path ) , config=self.config , cache_dir=_A , ) else: snake_case_ : Union[str, Any] = model def UpperCAmelCase_ ( self : List[Any] , *_A : List[Any] , **_A : List[Any] ) -> int: """simple docstring""" snake_case_ : Union[str, Any] = self.model_type.from_pretrained(*_A , **_A ) def UpperCAmelCase_ ( self : Tuple ) -> str: """simple docstring""" snake_case_ : Optional[Any] = arg_to_scheduler[self.hparams.lr_scheduler] snake_case_ : Tuple = get_schedule_func( self.opt , num_warmup_steps=self.hparams.warmup_steps , num_training_steps=self.total_steps() ) snake_case_ : Optional[int] = {'scheduler': scheduler, 'interval': 'step', 'frequency': 1} return scheduler def UpperCAmelCase_ ( self : Dict ) -> Union[str, Any]: """simple docstring""" snake_case_ : str = self.model snake_case_ : int = ['bias', 'LayerNorm.weight'] snake_case_ : List[str] = [ { 'params': [ p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay ) ], # check this named paramters 'weight_decay': self.hparams.weight_decay, }, { 'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay )], 'weight_decay': 0.0, }, ] if self.hparams.adafactor: snake_case_ : Dict = Adafactor( _A , lr=self.hparams.learning_rate , scale_parameter=_A , relative_step=_A ) else: snake_case_ : Union[str, Any] = AdamW( _A , lr=self.hparams.learning_rate , eps=self.hparams.adam_epsilon ) snake_case_ : Tuple = optimizer snake_case_ : Optional[int] = self.get_lr_scheduler() return [optimizer], [scheduler] def UpperCAmelCase_ ( self : Optional[Any] , _A : int , _A : Dict ) -> str: """simple docstring""" return self.validation_step(_A , _A ) def UpperCAmelCase_ ( self : int , _A : Optional[int] ) -> Dict: """simple docstring""" return self.validation_end(_A ) def UpperCAmelCase_ ( self : Tuple ) -> int: """simple docstring""" snake_case_ : List[str] = max(1 , self.hparams.gpus ) # TODO: consider num_tpu_cores snake_case_ : Any = self.hparams.train_batch_size * self.hparams.accumulate_grad_batches * num_devices return (self.dataset_size / effective_batch_size) * self.hparams.max_epochs def UpperCAmelCase_ ( self : List[Any] , _A : Any ) -> Dict: """simple docstring""" if stage == "test": snake_case_ : str = len(self.test_dataloader().dataset ) else: snake_case_ : Optional[Any] = self.get_dataloader('train' , self.hparams.train_batch_size , shuffle=_A ) snake_case_ : Any = len(self.train_dataloader().dataset ) def UpperCAmelCase_ ( self : Dict , _A : str , _A : int , _A : bool = False ) -> int: """simple docstring""" raise NotImplementedError('You must implement this for your task' ) def UpperCAmelCase_ ( self : str ) -> Dict: """simple docstring""" return self.train_loader def UpperCAmelCase_ ( self : Any ) -> Dict: """simple docstring""" return self.get_dataloader('dev' , self.hparams.eval_batch_size , shuffle=_A ) def UpperCAmelCase_ ( self : Any ) -> Tuple: """simple docstring""" return self.get_dataloader('test' , self.hparams.eval_batch_size , shuffle=_A ) def UpperCAmelCase_ ( self : Optional[Any] , _A : Optional[Any] ) -> str: """simple docstring""" return os.path.join( self.hparams.data_dir , 'cached_{}_{}_{}'.format( _A , list(filter(_A , self.hparams.model_name_or_path.split('/' ) ) ).pop() , str(self.hparams.max_seq_length ) , ) , ) @pl.utilities.rank_zero_only def UpperCAmelCase_ ( self : Dict , _A : Dict[str, Any] ) -> None: """simple docstring""" snake_case_ : Tuple = self.output_dir.joinpath('best_tfmr' ) snake_case_ : Any = self.step_count self.model.save_pretrained(_A ) self.tokenizer.save_pretrained(_A ) @staticmethod def UpperCAmelCase_ ( _A : List[str] , _A : Optional[Any] ) -> Optional[int]: """simple docstring""" parser.add_argument( '--model_name_or_path' , default=_A , type=_A , required=_A , help='Path to pretrained model or model identifier from huggingface.co/models' , ) parser.add_argument( '--config_name' , default='' , type=_A , help='Pretrained config name or path if not the same as model_name' ) parser.add_argument( '--tokenizer_name' , default=_A , type=_A , help='Pretrained tokenizer name or path if not the same as model_name' , ) parser.add_argument( '--cache_dir' , default=str(Path(_A ).parent / 'test_run' / 'cache' ) , type=_A , help='Where do you want to store the pre-trained models downloaded from huggingface.co' , ) parser.add_argument( '--encoder_layerdrop' , type=_A , help='Encoder layer dropout probability (Optional). Goes into model.config' , ) parser.add_argument( '--decoder_layerdrop' , type=_A , help='Decoder layer dropout probability (Optional). Goes into model.config' , ) parser.add_argument( '--dropout' , type=_A , help='Dropout probability (Optional). Goes into model.config' , ) parser.add_argument( '--attention_dropout' , type=_A , help='Attention dropout probability (Optional). Goes into model.config' , ) parser.add_argument('--learning_rate' , default=5E-5 , type=_A , help='The initial learning rate for Adam.' ) parser.add_argument( '--lr_scheduler' , default='linear' , choices=_A , metavar=_A , type=_A , help='Learning rate scheduler' , ) parser.add_argument('--weight_decay' , default=0.0 , type=_A , help='Weight decay if we apply some.' ) parser.add_argument('--adam_epsilon' , default=1E-8 , type=_A , help='Epsilon for Adam optimizer.' ) parser.add_argument('--warmup_steps' , default=0 , type=_A , help='Linear warmup over warmup_steps.' ) parser.add_argument('--num_workers' , default=4 , type=_A , help='kwarg passed to DataLoader' ) parser.add_argument('--num_train_epochs' , dest='max_epochs' , default=3 , type=_A ) parser.add_argument('--train_batch_size' , default=32 , type=_A ) parser.add_argument('--eval_batch_size' , default=32 , type=_A ) parser.add_argument('--adafactor' , action='store_true' ) class SCREAMING_SNAKE_CASE_ ( pl.Callback ): def UpperCAmelCase_ ( self : Any , _A : List[str] , _A : List[Any] ) -> Optional[Any]: """simple docstring""" if ( trainer.is_global_zero and trainer.global_rank == 0 ): # we initialize the retriever only on master worker with RAY. In new pytorch-lightning accelorators are removed. pl_module.model.rag.retriever.init_retrieval() # better to use hook functions. class SCREAMING_SNAKE_CASE_ ( pl.Callback ): def UpperCAmelCase_ ( self : Any , _A : Optional[int] , _A : Tuple ) -> str: """simple docstring""" for name, param in pl_module.model.rag.named_parameters(): if param.grad is None: print(_A ) class SCREAMING_SNAKE_CASE_ ( pl.Callback ): def UpperCAmelCase_ ( self : str , _A : Optional[int] , _A : Dict ) -> Any: """simple docstring""" snake_case_ : List[Any] = trainer.lr_schedulers[0]['scheduler'] snake_case_ : int = {F"""lr_group_{i}""": lr for i, lr in enumerate(lr_scheduler.get_lr() )} pl_module.logger.log_metrics(_A ) def UpperCAmelCase_ ( self : Any , _A : pl.Trainer , _A : pl.LightningModule ) -> Any: """simple docstring""" rank_zero_info('***** Validation results *****' ) snake_case_ : Tuple = trainer.callback_metrics # Log results for key in sorted(_A ): if key not in ["log", "progress_bar"]: rank_zero_info('{} = {}\n'.format(_A , str(metrics[key] ) ) ) def UpperCAmelCase_ ( self : Tuple , _A : pl.Trainer , _A : pl.LightningModule ) -> Union[str, Any]: """simple docstring""" rank_zero_info('***** Test results *****' ) snake_case_ : str = trainer.callback_metrics # Log and save results to file snake_case_ : Union[str, Any] = os.path.join(pl_module.hparams.output_dir , 'test_results.txt' ) with open(_A , 'w' ) as writer: for key in sorted(_A ): if key not in ["log", "progress_bar"]: rank_zero_info('{} = {}\n'.format(_A , str(metrics[key] ) ) ) writer.write('{} = {}\n'.format(_A , str(metrics[key] ) ) ) def SCREAMING_SNAKE_CASE__ ( __a , __a ): # To allow all pl args uncomment the following line # parser = pl.Trainer.add_argparse_args(parser) parser.add_argument( '--output_dir' , default=str(Path(__a ).parent / 'test_run' / 'model_checkpoints' ) , type=__a , help='The output directory where the model predictions and checkpoints will be written.' , ) parser.add_argument( '--fp16' , action='store_true' , help='Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit' , ) parser.add_argument( '--fp16_opt_level' , type=__a , default='O2' , help=( 'For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].' 'See details at https://nvidia.github.io/apex/amp.html' ) , ) parser.add_argument('--n_tpu_cores' , dest='tpu_cores' , type=__a ) parser.add_argument('--max_grad_norm' , dest='gradient_clip_val' , default=1.0 , type=__a , help='Max gradient norm' ) parser.add_argument('--do_train' , action='store_true' , help='Whether to run training.' ) parser.add_argument('--do_predict' , action='store_true' , help='Whether to run predictions on the test set.' ) parser.add_argument( '--gradient_accumulation_steps' , dest='accumulate_grad_batches' , type=__a , default=1 , help='Number of updates steps to accumulate before performing a backward/update pass.' , ) parser.add_argument('--seed' , type=__a , default=42 , help='random seed for initialization' ) parser.add_argument( '--data_dir' , default=str(Path(__a ).parent / 'test_run' / 'dummy-train-data' ) , type=__a , help='The input data dir. Should contain the training files for the CoNLL-2003 NER task.' , ) def SCREAMING_SNAKE_CASE__ ( __a , __a , __a=None , __a=True , __a=[] , __a=None , __a=None , **__a , ): pl.seed_everything(args.seed ) # init model snake_case_ : str = Path(model.hparams.output_dir ) odir.mkdir(exist_ok=__a ) # add custom checkpoints if checkpoint_callback is None: snake_case_ : Optional[int] = pl.callbacks.ModelCheckpoint( filepath=args.output_dir , prefix='checkpoint' , monitor='val_loss' , mode='min' , save_top_k=1 ) if early_stopping_callback: extra_callbacks.append(__a ) if logging_callback is None: snake_case_ : Dict = LoggingCallback() snake_case_ : Union[str, Any] = {} if args.fpaa: snake_case_ : Dict = 16 if args.gpus > 1: snake_case_ : int = 'auto' snake_case_ : List[str] = 'ddp' snake_case_ : str = args.accumulate_grad_batches snake_case_ : int = None snake_case_ : Tuple = 'auto' snake_case_ : List[Any] = pl.Trainer.from_argparse_args( __a , weights_summary=__a , callbacks=[logging_callback] + extra_callbacks + [InitCallback()] + [checkpoint_callback] , logger=__a , val_check_interval=1 , num_sanity_val_steps=2 , **__a , ) if args.do_train: trainer.fit(__a ) else: print('RAG modeling tests with new set functions successfuly executed!' ) return trainer
327
from itertools import permutations def SCREAMING_SNAKE_CASE__ ( __a ): if num[3] % 2 != 0: return False if (num[2] + num[3] + num[4]) % 3 != 0: return False if num[5] % 5 != 0: return False snake_case_ : Any = [7, 11, 13, 17] for i, test in enumerate(__a ): if (num[i + 4] * 1_00 + num[i + 5] * 10 + num[i + 6]) % test != 0: return False return True def SCREAMING_SNAKE_CASE__ ( __a = 10 ): return sum( int(''.join(map(__a , __a ) ) ) for num in permutations(range(__a ) ) if is_substring_divisible(__a ) ) if __name__ == "__main__": print(F'''{solution() = }''')
327
1
import numpy as np from PIL import Image def SCREAMING_SNAKE_CASE__ ( __a , __a , __a ): snake_case_ : List[str] = np.array(__a ) if arr.shape[0] != arr.shape[1]: raise ValueError('The input array is not a square matrix' ) snake_case_ : Tuple = 0 snake_case_ : Tuple = 0 snake_case_ : Union[str, Any] = 0 snake_case_ : Optional[int] = 0 # compute the shape of the output matrix snake_case_ : List[str] = (arr.shape[0] - size) // stride + 1 # initialize the output matrix with zeros of shape maxpool_shape snake_case_ : Union[str, Any] = np.zeros((maxpool_shape, maxpool_shape) ) while i < arr.shape[0]: if i + size > arr.shape[0]: # if the end of the matrix is reached, break break while j < arr.shape[1]: # if the end of the matrix is reached, break if j + size > arr.shape[1]: break # compute the maximum of the pooling matrix snake_case_ : Any = np.max(arr[i : i + size, j : j + size] ) # shift the pooling matrix by stride of column pixels j += stride mat_j += 1 # shift the pooling matrix by stride of row pixels i += stride mat_i += 1 # reset the column index to 0 snake_case_ : Dict = 0 snake_case_ : Union[str, Any] = 0 return updated_arr def SCREAMING_SNAKE_CASE__ ( __a , __a , __a ): snake_case_ : Any = np.array(__a ) if arr.shape[0] != arr.shape[1]: raise ValueError('The input array is not a square matrix' ) snake_case_ : str = 0 snake_case_ : int = 0 snake_case_ : Tuple = 0 snake_case_ : Tuple = 0 # compute the shape of the output matrix snake_case_ : List[Any] = (arr.shape[0] - size) // stride + 1 # initialize the output matrix with zeros of shape avgpool_shape snake_case_ : Optional[int] = np.zeros((avgpool_shape, avgpool_shape) ) while i < arr.shape[0]: # if the end of the matrix is reached, break if i + size > arr.shape[0]: break while j < arr.shape[1]: # if the end of the matrix is reached, break if j + size > arr.shape[1]: break # compute the average of the pooling matrix snake_case_ : str = int(np.average(arr[i : i + size, j : j + size] ) ) # shift the pooling matrix by stride of column pixels j += stride mat_j += 1 # shift the pooling matrix by stride of row pixels i += stride mat_i += 1 # reset the column index to 0 snake_case_ : Any = 0 snake_case_ : List[str] = 0 return updated_arr # Main Function if __name__ == "__main__": from doctest import testmod testmod(name="""avgpooling""", verbose=True) # Loading the image _SCREAMING_SNAKE_CASE = Image.open("""path_to_image""") # Converting the image to numpy array and maxpooling, displaying the result # Ensure that the image is a square matrix Image.fromarray(maxpooling(np.array(image), size=3, stride=2)).show() # Converting the image to numpy array and averagepooling, displaying the result # Ensure that the image is a square matrix Image.fromarray(avgpooling(np.array(image), size=3, stride=2)).show()
327
from __future__ import annotations from collections import namedtuple def SCREAMING_SNAKE_CASE__ ( __a , __a , __a ): snake_case_ : Any = namedtuple('result' , 'name value' ) if (voltage, current, power).count(0 ) != 1: raise ValueError('Only one argument must be 0' ) elif power < 0: raise ValueError( 'Power cannot be negative in any electrical/electronics system' ) elif voltage == 0: return result('voltage' , power / current ) elif current == 0: return result('current' , power / voltage ) elif power == 0: return result('power' , float(round(abs(voltage * current ) , 2 ) ) ) else: raise ValueError('Exactly one argument must be 0' ) if __name__ == "__main__": import doctest doctest.testmod()
327
1
def SCREAMING_SNAKE_CASE__ ( __a , __a ): if density <= 0: raise ValueError('Impossible fluid density' ) if bulk_modulus <= 0: raise ValueError('Impossible bulk modulus' ) return (bulk_modulus / density) ** 0.5 if __name__ == "__main__": import doctest doctest.testmod()
327
import re import string import numpy as np import datasets _SCREAMING_SNAKE_CASE = """ Returns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list. """ _SCREAMING_SNAKE_CASE = """ Args: predictions: List of predicted texts. references: List of reference texts. regexes_to_ignore: List, defaults to None. Regex expressions of characters to ignore when calculating the exact matches. Note: these regexes are removed from the input data before the changes based on the options below (e.g. ignore_case, ignore_punctuation, ignore_numbers) are applied. ignore_case: Boolean, defaults to False. If true, turns everything to lowercase so that capitalization differences are ignored. ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before comparing predictions and references. ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before comparing predictions and references. Returns: exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive. Examples: >>> exact_match = datasets.load_metric(\"exact_match\") >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"] >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"] >>> results = exact_match.compute(references=refs, predictions=preds) >>> print(round(results[\"exact_match\"], 1)) 25.0 >>> exact_match = datasets.load_metric(\"exact_match\") >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"] >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"] >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\"], ignore_case=True, ignore_punctuation=True) >>> print(round(results[\"exact_match\"], 1)) 50.0 >>> exact_match = datasets.load_metric(\"exact_match\") >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"] >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"] >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\", \"YELL\"], ignore_case=True, ignore_punctuation=True) >>> print(round(results[\"exact_match\"], 1)) 75.0 >>> exact_match = datasets.load_metric(\"exact_match\") >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"] >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"] >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\", \"YELL\"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True) >>> print(round(results[\"exact_match\"], 1)) 100.0 >>> exact_match = datasets.load_metric(\"exact_match\") >>> refs = [\"The cat sat on the mat.\", \"Theaters are great.\", \"It's like comparing oranges and apples.\"] >>> preds = [\"The cat sat on the mat?\", \"Theaters are great.\", \"It's like comparing apples and oranges.\"] >>> results = exact_match.compute(references=refs, predictions=preds) >>> print(round(results[\"exact_match\"], 1)) 33.3 """ _SCREAMING_SNAKE_CASE = """ """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class SCREAMING_SNAKE_CASE_ ( datasets.Metric ): def UpperCAmelCase_ ( self : Union[str, Any] ) -> Optional[int]: """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { 'predictions': datasets.Value('string' , id='sequence' ), 'references': datasets.Value('string' , id='sequence' ), } ) , reference_urls=[] , ) def UpperCAmelCase_ ( self : int , _A : Tuple , _A : Tuple , _A : str=None , _A : Dict=False , _A : Tuple=False , _A : str=False , ) -> Tuple: """simple docstring""" if regexes_to_ignore is not None: for s in regexes_to_ignore: snake_case_ : List[Any] = np.array([re.sub(_A , '' , _A ) for x in predictions] ) snake_case_ : Optional[Any] = np.array([re.sub(_A , '' , _A ) for x in references] ) else: snake_case_ : Dict = np.asarray(_A ) snake_case_ : Tuple = np.asarray(_A ) if ignore_case: snake_case_ : List[str] = np.char.lower(_A ) snake_case_ : Any = np.char.lower(_A ) if ignore_punctuation: snake_case_ : int = string.punctuation.maketrans('' , '' , string.punctuation ) snake_case_ : Tuple = np.char.translate(_A , table=_A ) snake_case_ : str = np.char.translate(_A , table=_A ) if ignore_numbers: snake_case_ : Optional[int] = string.digits.maketrans('' , '' , string.digits ) snake_case_ : str = np.char.translate(_A , table=_A ) snake_case_ : Union[str, Any] = np.char.translate(_A , table=_A ) snake_case_ : int = predictions == references return {"exact_match": np.mean(_A ) * 100}
327
1
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available _SCREAMING_SNAKE_CASE = { """configuration_efficientnet""": [ """EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """EfficientNetConfig""", """EfficientNetOnnxConfig""", ] } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _SCREAMING_SNAKE_CASE = ["""EfficientNetImageProcessor"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _SCREAMING_SNAKE_CASE = [ """EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST""", """EfficientNetForImageClassification""", """EfficientNetModel""", """EfficientNetPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_efficientnet import ( EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP, EfficientNetConfig, EfficientNetOnnxConfig, ) try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_efficientnet import EfficientNetImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_efficientnet import ( EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST, EfficientNetForImageClassification, EfficientNetModel, EfficientNetPreTrainedModel, ) else: import sys _SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
327
from __future__ import annotations import copy import inspect import json import math import os import tempfile import unittest from importlib import import_module import numpy as np from transformers import ViTMAEConfig from transformers.file_utils import cached_property, is_tf_available, is_vision_available from transformers.testing_utils import require_tf, require_vision, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFViTMAEForPreTraining, TFViTMAEModel if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class SCREAMING_SNAKE_CASE_ : def __init__( self : List[Any] , _A : Optional[Any] , _A : Dict=13 , _A : Union[str, Any]=30 , _A : Tuple=2 , _A : Union[str, Any]=3 , _A : Optional[int]=True , _A : Optional[Any]=True , _A : str=32 , _A : int=2 , _A : List[str]=4 , _A : List[str]=37 , _A : Tuple="gelu" , _A : Dict=0.1 , _A : Optional[Any]=0.1 , _A : Optional[int]=10 , _A : Optional[int]=0.0_2 , _A : Optional[Any]=3 , _A : str=0.6 , _A : Union[str, Any]=None , ) -> Any: """simple docstring""" snake_case_ : Optional[int] = parent snake_case_ : Tuple = batch_size snake_case_ : List[Any] = image_size snake_case_ : List[str] = patch_size snake_case_ : List[str] = num_channels snake_case_ : Optional[Any] = is_training snake_case_ : Any = use_labels snake_case_ : Tuple = hidden_size snake_case_ : Union[str, Any] = num_hidden_layers snake_case_ : List[Any] = num_attention_heads snake_case_ : Optional[Any] = intermediate_size snake_case_ : List[Any] = hidden_act snake_case_ : Union[str, Any] = hidden_dropout_prob snake_case_ : Any = attention_probs_dropout_prob snake_case_ : Tuple = type_sequence_label_size snake_case_ : List[str] = initializer_range snake_case_ : Optional[Any] = mask_ratio snake_case_ : Any = scope # in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above # (we add 1 for the [CLS] token) snake_case_ : Optional[int] = (image_size // patch_size) ** 2 snake_case_ : str = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) ) def UpperCAmelCase_ ( self : List[str] ) -> Optional[Any]: """simple docstring""" snake_case_ : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) snake_case_ : Union[str, Any] = None if self.use_labels: snake_case_ : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) snake_case_ : Union[str, Any] = self.get_config() return config, pixel_values, labels def UpperCAmelCase_ ( self : int ) -> Optional[Any]: """simple docstring""" return ViTMAEConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , decoder_hidden_size=self.hidden_size , decoder_num_hidden_layers=self.num_hidden_layers , decoder_num_attention_heads=self.num_attention_heads , decoder_intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_A , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , ) def UpperCAmelCase_ ( self : List[Any] , _A : int , _A : Dict , _A : str ) -> Dict: """simple docstring""" snake_case_ : Union[str, Any] = TFViTMAEModel(config=_A ) snake_case_ : str = model(_A , training=_A ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCAmelCase_ ( self : Dict , _A : Dict , _A : Any , _A : List[Any] ) -> int: """simple docstring""" snake_case_ : Any = TFViTMAEForPreTraining(_A ) snake_case_ : Optional[Any] = model(_A , training=_A ) # expected sequence length = num_patches snake_case_ : List[str] = (self.image_size // self.patch_size) ** 2 snake_case_ : Optional[Any] = self.patch_size**2 * self.num_channels self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) ) # test greyscale images snake_case_ : str = 1 snake_case_ : Dict = TFViTMAEForPreTraining(_A ) snake_case_ : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) snake_case_ : List[str] = model(_A , training=_A ) snake_case_ : Optional[Any] = self.patch_size**2 self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) ) def UpperCAmelCase_ ( self : Union[str, Any] ) -> Tuple: """simple docstring""" snake_case_ : List[Any] = self.prepare_config_and_inputs() ((snake_case_) ,(snake_case_) ,(snake_case_)) : Any = config_and_inputs snake_case_ : Optional[Any] = {'pixel_values': pixel_values} return config, inputs_dict @require_tf class SCREAMING_SNAKE_CASE_ ( snake_case_ , snake_case_ , unittest.TestCase ): __magic_name__: List[str] = (TFViTMAEModel, TFViTMAEForPreTraining) if is_tf_available() else () __magic_name__: str = {"feature-extraction": TFViTMAEModel} if is_tf_available() else {} __magic_name__: Dict = False __magic_name__: Dict = False __magic_name__: List[Any] = False __magic_name__: Dict = False def UpperCAmelCase_ ( self : Any ) -> List[Any]: """simple docstring""" snake_case_ : List[Any] = TFViTMAEModelTester(self ) snake_case_ : Tuple = ConfigTester(self , config_class=_A , has_text_modality=_A , hidden_size=37 ) def UpperCAmelCase_ ( self : int ) -> Union[str, Any]: """simple docstring""" self.config_tester.run_common_tests() @unittest.skip(reason='ViTMAE does not use inputs_embeds' ) def UpperCAmelCase_ ( self : Tuple ) -> Union[str, Any]: """simple docstring""" pass def UpperCAmelCase_ ( self : List[str] ) -> Union[str, Any]: """simple docstring""" snake_case_ ,snake_case_ : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: snake_case_ : List[Any] = model_class(_A ) self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) ) snake_case_ : Optional[int] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(_A , tf.keras.layers.Layer ) ) def UpperCAmelCase_ ( self : List[str] ) -> Dict: """simple docstring""" snake_case_ ,snake_case_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: snake_case_ : List[str] = model_class(_A ) snake_case_ : Any = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic snake_case_ : Dict = [*signature.parameters.keys()] snake_case_ : Dict = ['pixel_values'] self.assertListEqual(arg_names[:1] , _A ) def UpperCAmelCase_ ( self : Dict ) -> List[str]: """simple docstring""" snake_case_ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_A ) def UpperCAmelCase_ ( self : List[Any] ) -> List[str]: """simple docstring""" snake_case_ : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*_A ) def UpperCAmelCase_ ( self : Tuple ) -> Dict: """simple docstring""" np.random.seed(2 ) snake_case_ ,snake_case_ : int = self.model_tester.prepare_config_and_inputs_for_common() snake_case_ : Optional[int] = int((config.image_size // config.patch_size) ** 2 ) snake_case_ : Optional[Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) for model_class in self.all_model_classes: snake_case_ : Optional[Any] = model_class(_A ) snake_case_ : Union[str, Any] = self._prepare_for_class(_A , _A ) snake_case_ : List[str] = model(_A , noise=_A ) snake_case_ : Tuple = copy.deepcopy(self._prepare_for_class(_A , _A ) ) snake_case_ : str = model(**_A , noise=_A ) snake_case_ : Union[str, Any] = outputs_dict[0].numpy() snake_case_ : Optional[Any] = outputs_keywords[0].numpy() self.assertLess(np.sum(np.abs(output_dict - output_keywords ) ) , 1E-6 ) def UpperCAmelCase_ ( self : List[Any] ) -> List[Any]: """simple docstring""" np.random.seed(2 ) snake_case_ ,snake_case_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() snake_case_ : Tuple = int((config.image_size // config.patch_size) ** 2 ) snake_case_ : Optional[int] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) def prepare_numpy_arrays(_A : int ): snake_case_ : Any = {} for k, v in inputs_dict.items(): if tf.is_tensor(_A ): snake_case_ : str = v.numpy() else: snake_case_ : Optional[Any] = np.array(_A ) return inputs_np_dict for model_class in self.all_model_classes: snake_case_ : int = model_class(_A ) snake_case_ : List[Any] = self._prepare_for_class(_A , _A ) snake_case_ : Any = prepare_numpy_arrays(_A ) snake_case_ : List[Any] = model(_A , noise=_A ) snake_case_ : List[Any] = model(**_A , noise=_A ) self.assert_outputs_same(_A , _A ) def UpperCAmelCase_ ( self : Tuple , _A : Union[str, Any] , _A : Union[str, Any] , _A : List[Any] ) -> List[str]: """simple docstring""" np.random.seed(2 ) snake_case_ : Optional[int] = int((tf_model.config.image_size // tf_model.config.patch_size) ** 2 ) snake_case_ : Optional[int] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) snake_case_ : Optional[int] = tf.constant(_A ) # Add `noise` argument. # PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument snake_case_ : Optional[Any] = tf_noise super().check_pt_tf_models(_A , _A , _A ) def UpperCAmelCase_ ( self : Optional[Any] ) -> Dict: """simple docstring""" np.random.seed(2 ) snake_case_ ,snake_case_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common() snake_case_ : int = { module_member for model_class in self.all_model_classes for module in (import_module(model_class.__module__ ),) for module_member_name in dir(_A ) if module_member_name.endswith('MainLayer' ) # This condition is required, since `modeling_tf_clip.py` has 3 classes whose names end with `MainLayer`. and module_member_name[: -len('MainLayer' )] == model_class.__name__[: -len('Model' )] for module_member in (getattr(_A , _A ),) if isinstance(_A , _A ) and tf.keras.layers.Layer in module_member.__bases__ and getattr(_A , '_keras_serializable' , _A ) } snake_case_ : List[Any] = int((config.image_size // config.patch_size) ** 2 ) snake_case_ : List[Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) snake_case_ : Optional[int] = tf.convert_to_tensor(_A ) inputs_dict.update({'noise': noise} ) for main_layer_class in tf_main_layer_classes: snake_case_ : Optional[Any] = main_layer_class(_A ) snake_case_ : List[str] = { name: tf.keras.Input(tensor.shape[1:] , dtype=tensor.dtype ) for name, tensor in inputs_dict.items() } snake_case_ : Union[str, Any] = tf.keras.Model(_A , outputs=main_layer(_A ) ) snake_case_ : int = model(_A ) with tempfile.TemporaryDirectory() as tmpdirname: snake_case_ : List[Any] = os.path.join(_A , 'keras_model.h5' ) model.save(_A ) snake_case_ : str = tf.keras.models.load_model( _A , custom_objects={main_layer_class.__name__: main_layer_class} ) assert isinstance(_A , tf.keras.Model ) snake_case_ : List[str] = model(_A ) self.assert_outputs_same(_A , _A ) @slow def UpperCAmelCase_ ( self : Union[str, Any] ) -> Union[str, Any]: """simple docstring""" np.random.seed(2 ) snake_case_ ,snake_case_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() snake_case_ : int = int((config.image_size // config.patch_size) ** 2 ) snake_case_ : int = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) for model_class in self.all_model_classes: snake_case_ : Optional[Any] = model_class(_A ) snake_case_ : Optional[Any] = self._prepare_for_class(_A , _A ) snake_case_ : int = model(_A , noise=_A ) if model_class.__name__ == "TFViTMAEModel": snake_case_ : Any = outputs.last_hidden_state.numpy() snake_case_ : Optional[int] = 0 else: snake_case_ : str = outputs.logits.numpy() snake_case_ : Optional[Any] = 0 with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(_A , saved_model=_A ) snake_case_ : Any = model_class.from_pretrained(_A ) snake_case_ : Any = model(_A , noise=_A ) if model_class.__name__ == "TFViTMAEModel": snake_case_ : Dict = after_outputs['last_hidden_state'].numpy() snake_case_ : Dict = 0 else: snake_case_ : Any = after_outputs['logits'].numpy() snake_case_ : Optional[Any] = 0 snake_case_ : Any = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(_A , 1E-5 ) def UpperCAmelCase_ ( self : Any ) -> str: """simple docstring""" np.random.seed(2 ) snake_case_ ,snake_case_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() snake_case_ : Optional[int] = int((config.image_size // config.patch_size) ** 2 ) snake_case_ : Union[str, Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) for model_class in self.all_model_classes: snake_case_ : str = model_class(_A ) snake_case_ : int = self._prepare_for_class(_A , _A ) snake_case_ : str = model(_A , noise=_A ) snake_case_ : Dict = model.get_config() # make sure that returned config is jsonifiable, which is required by keras json.dumps(_A ) snake_case_ : Any = model_class.from_config(model.get_config() ) # make sure it also accepts a normal config snake_case_ : str = model_class.from_config(model.config ) snake_case_ : Union[str, Any] = new_model(_A ) # Build model new_model.set_weights(model.get_weights() ) snake_case_ : List[str] = new_model(_A , noise=_A ) self.assert_outputs_same(_A , _A ) @unittest.skip( reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.' ) def UpperCAmelCase_ ( self : List[Any] ) -> Optional[int]: """simple docstring""" pass @unittest.skip(reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load' ) def UpperCAmelCase_ ( self : Optional[int] ) -> Tuple: """simple docstring""" pass @slow def UpperCAmelCase_ ( self : Tuple ) -> Tuple: """simple docstring""" snake_case_ : Optional[Any] = TFViTMAEModel.from_pretrained('google/vit-base-patch16-224' ) self.assertIsNotNone(_A ) def SCREAMING_SNAKE_CASE__ ( ): snake_case_ : Optional[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_tf @require_vision class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ): @cached_property def UpperCAmelCase_ ( self : str ) -> Dict: """simple docstring""" return ViTImageProcessor.from_pretrained('facebook/vit-mae-base' ) if is_vision_available() else None @slow def UpperCAmelCase_ ( self : str ) -> Dict: """simple docstring""" np.random.seed(2 ) snake_case_ : List[str] = TFViTMAEForPreTraining.from_pretrained('facebook/vit-mae-base' ) snake_case_ : List[Any] = self.default_image_processor snake_case_ : Dict = prepare_img() snake_case_ : Optional[Any] = image_processor(images=_A , return_tensors='tf' ) # prepare a noise vector that will be also used for testing the TF model # (this way we can ensure that the PT and TF models operate on the same inputs) snake_case_ : int = ViTMAEConfig() snake_case_ : List[Any] = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 ) snake_case_ : List[Any] = np.random.uniform(size=(1, num_patches) ) # forward pass snake_case_ : Optional[Any] = model(**_A , noise=_A ) # verify the logits snake_case_ : Optional[int] = tf.convert_to_tensor([1, 196, 768] ) self.assertEqual(outputs.logits.shape , _A ) snake_case_ : Any = tf.convert_to_tensor( [[-0.0_5_4_8, -1.7_0_2_3, -0.9_3_2_5], [0.3_7_2_1, -0.5_6_7_0, -0.2_2_3_3], [0.8_2_3_5, -1.3_8_7_8, -0.3_5_2_4]] ) tf.debugging.assert_near(outputs.logits[0, :3, :3] , _A , atol=1E-4 )
327
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) _SCREAMING_SNAKE_CASE = {"""configuration_unispeech""": ["""UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP""", """UniSpeechConfig"""]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _SCREAMING_SNAKE_CASE = [ """UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST""", """UniSpeechForCTC""", """UniSpeechForPreTraining""", """UniSpeechForSequenceClassification""", """UniSpeechModel""", """UniSpeechPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_unispeech import ( UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST, UniSpeechForCTC, UniSpeechForPreTraining, UniSpeechForSequenceClassification, UniSpeechModel, UniSpeechPreTrainedModel, ) else: import sys _SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
327
from __future__ import annotations def SCREAMING_SNAKE_CASE__ ( __a , __a ): snake_case_ : list[list[int]] = [] snake_case_ : list[int] = [] snake_case_ : List[Any] = 0 snake_case_ : Union[str, Any] = sum(__a ) create_state_space_tree(__a , __a , __a , __a , __a , __a ) return result def SCREAMING_SNAKE_CASE__ ( __a , __a , __a , __a , __a , __a , ): if sum(__a ) > max_sum or (remaining_nums_sum + sum(__a )) < max_sum: return if sum(__a ) == max_sum: result.append(__a ) return for index in range(__a , len(__a ) ): create_state_space_tree( __a , __a , index + 1 , [*path, nums[index]] , __a , remaining_nums_sum - nums[index] , ) _SCREAMING_SNAKE_CASE = [3, 34, 4, 12, 5, 2] _SCREAMING_SNAKE_CASE = 9 _SCREAMING_SNAKE_CASE = generate_sum_of_subsets_soln(nums, max_sum) print(*result)
327
1
from __future__ import annotations import numpy as np def SCREAMING_SNAKE_CASE__ ( __a ): return np.maximum(0 , __a ) if __name__ == "__main__": print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
327
def SCREAMING_SNAKE_CASE__ ( __a , __a ): if density <= 0: raise ValueError('Impossible fluid density' ) if bulk_modulus <= 0: raise ValueError('Impossible bulk modulus' ) return (bulk_modulus / density) ** 0.5 if __name__ == "__main__": import doctest doctest.testmod()
327
1
import unittest from typing import Dict, List, Optional, Union import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import BridgeTowerImageProcessor class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ): def __init__( self : List[str] , _A : List[Any] , _A : bool = True , _A : Dict[str, int] = None , _A : int = 32 , _A : bool = True , _A : Union[int, float] = 1 / 255 , _A : bool = True , _A : bool = True , _A : Optional[Union[float, List[float]]] = [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3] , _A : Optional[Union[float, List[float]]] = [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1] , _A : bool = True , _A : Tuple=7 , _A : Tuple=30 , _A : int=400 , _A : Tuple=3 , ) -> Optional[int]: """simple docstring""" snake_case_ : str = parent snake_case_ : str = do_resize snake_case_ : str = size if size is not None else {'shortest_edge': 288} snake_case_ : Any = size_divisor snake_case_ : Any = do_rescale snake_case_ : Union[str, Any] = rescale_factor snake_case_ : str = do_normalize snake_case_ : int = do_center_crop snake_case_ : str = image_mean snake_case_ : int = image_std snake_case_ : Any = do_pad snake_case_ : Optional[int] = batch_size snake_case_ : List[str] = num_channels snake_case_ : Any = min_resolution snake_case_ : str = max_resolution def UpperCAmelCase_ ( self : Tuple ) -> Optional[int]: """simple docstring""" return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "size": self.size, "size_divisor": self.size_divisor, } def UpperCAmelCase_ ( self : Dict , _A : str , _A : Union[str, Any]=False ) -> int: """simple docstring""" if not batched: snake_case_ : Optional[int] = self.size['shortest_edge'] snake_case_ : List[Any] = image_inputs[0] if isinstance(_A , Image.Image ): snake_case_ ,snake_case_ : Optional[Any] = image.size else: snake_case_ ,snake_case_ : str = image.shape[1], image.shape[2] snake_case_ : Dict = size / min(_A , _A ) if h < w: snake_case_ ,snake_case_ : str = size, scale * w else: snake_case_ ,snake_case_ : Tuple = scale * h, size snake_case_ : Dict = int((1333 / 800) * size ) if max(_A , _A ) > max_size: snake_case_ : Union[str, Any] = max_size / max(_A , _A ) snake_case_ : Any = newh * scale snake_case_ : Union[str, Any] = neww * scale snake_case_ ,snake_case_ : Any = int(newh + 0.5 ), int(neww + 0.5 ) snake_case_ ,snake_case_ : int = ( newh // self.size_divisor * self.size_divisor, neww // self.size_divisor * self.size_divisor, ) else: snake_case_ : Optional[int] = [] for image in image_inputs: snake_case_ ,snake_case_ : Optional[int] = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) snake_case_ : str = max(_A , key=lambda _A : item[0] )[0] snake_case_ : List[str] = max(_A , key=lambda _A : item[1] )[1] return expected_height, expected_width @require_torch @require_vision class SCREAMING_SNAKE_CASE_ ( snake_case_ , unittest.TestCase ): __magic_name__: List[Any] = BridgeTowerImageProcessor if is_vision_available() else None def UpperCAmelCase_ ( self : List[Any] ) -> Tuple: """simple docstring""" snake_case_ : int = BridgeTowerImageProcessingTester(self ) @property def UpperCAmelCase_ ( self : int ) -> Any: """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def UpperCAmelCase_ ( self : Union[str, Any] ) -> Dict: """simple docstring""" snake_case_ : Optional[Any] = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_A , 'image_mean' ) ) self.assertTrue(hasattr(_A , 'image_std' ) ) self.assertTrue(hasattr(_A , 'do_normalize' ) ) self.assertTrue(hasattr(_A , 'do_resize' ) ) self.assertTrue(hasattr(_A , 'size' ) ) self.assertTrue(hasattr(_A , 'size_divisor' ) ) def UpperCAmelCase_ ( self : Optional[int] ) -> List[str]: """simple docstring""" pass def UpperCAmelCase_ ( self : Tuple ) -> Tuple: """simple docstring""" snake_case_ : Optional[Any] = self.image_processing_class(**self.image_processor_dict ) # create random PIL images snake_case_ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A ) for image in image_inputs: self.assertIsInstance(_A , Image.Image ) # Test not batched input snake_case_ : Dict = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values snake_case_ ,snake_case_ : Optional[Any] = self.image_processor_tester.get_expected_values(_A ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched snake_case_ : List[str] = image_processing(_A , return_tensors='pt' ).pixel_values snake_case_ ,snake_case_ : Optional[Any] = self.image_processor_tester.get_expected_values(_A , batched=_A ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def UpperCAmelCase_ ( self : Union[str, Any] ) -> Any: """simple docstring""" snake_case_ : int = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors snake_case_ : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , numpify=_A ) for image in image_inputs: self.assertIsInstance(_A , np.ndarray ) # Test not batched input snake_case_ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values snake_case_ ,snake_case_ : Tuple = self.image_processor_tester.get_expected_values(_A ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched snake_case_ : Any = image_processing(_A , return_tensors='pt' ).pixel_values snake_case_ ,snake_case_ : Any = self.image_processor_tester.get_expected_values(_A , batched=_A ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def UpperCAmelCase_ ( self : Dict ) -> List[str]: """simple docstring""" snake_case_ : Optional[Any] = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors snake_case_ : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , torchify=_A ) for image in image_inputs: self.assertIsInstance(_A , torch.Tensor ) # Test not batched input snake_case_ : Optional[int] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values snake_case_ ,snake_case_ : Optional[Any] = self.image_processor_tester.get_expected_values(_A ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched snake_case_ : str = image_processing(_A , return_tensors='pt' ).pixel_values snake_case_ ,snake_case_ : Tuple = self.image_processor_tester.get_expected_values(_A , batched=_A ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , )
327
from math import pi def SCREAMING_SNAKE_CASE__ ( __a , __a ): return 2 * pi * radius * (angle / 3_60) if __name__ == "__main__": print(arc_length(90, 10))
327
1
import json import os from typing import Optional, Tuple from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging _SCREAMING_SNAKE_CASE = logging.get_logger(__name__) _SCREAMING_SNAKE_CASE = {"""vocab_file""": """vocab.json"""} _SCREAMING_SNAKE_CASE = { """vocab_file""": { """mgp-str""": """https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json""", } } _SCREAMING_SNAKE_CASE = {"""mgp-str""": 27} class SCREAMING_SNAKE_CASE_ ( snake_case_ ): __magic_name__: Optional[int] = VOCAB_FILES_NAMES __magic_name__: int = PRETRAINED_VOCAB_FILES_MAP __magic_name__: Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__( self : str , _A : Any , _A : Union[str, Any]="[GO]" , _A : Union[str, Any]="[GO]" , _A : int="[s]" , _A : Union[str, Any]="[GO]" , **_A : List[str] ) -> Optional[int]: """simple docstring""" super().__init__( unk_token=_A , bos_token=_A , eos_token=_A , pad_token=_A , **_A , ) with open(_A , encoding='utf-8' ) as vocab_handle: snake_case_ : int = json.load(_A ) snake_case_ : Tuple = {v: k for k, v in self.vocab.items()} @property def UpperCAmelCase_ ( self : Any ) -> Any: """simple docstring""" return len(self.vocab ) def UpperCAmelCase_ ( self : int ) -> int: """simple docstring""" return dict(self.vocab , **self.added_tokens_encoder ) def UpperCAmelCase_ ( self : List[Any] , _A : Dict ) -> Union[str, Any]: """simple docstring""" snake_case_ : int = [] for s in text: char_tokens.extend(_A ) return char_tokens def UpperCAmelCase_ ( self : Any , _A : str ) -> Optional[int]: """simple docstring""" return self.vocab.get(_A , self.vocab.get(self.unk_token ) ) def UpperCAmelCase_ ( self : List[str] , _A : Any ) -> Optional[Any]: """simple docstring""" return self.decoder.get(_A ) def UpperCAmelCase_ ( self : Any , _A : str , _A : Optional[str] = None ) -> Tuple[str]: """simple docstring""" if not os.path.isdir(_A ): logger.error('Vocabulary path ({}) should be a directory'.format(_A ) ) return snake_case_ : List[Any] = os.path.join( _A , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) with open(_A , 'w' , encoding='utf-8' ) as f: f.write(json.dumps(self.vocab , indent=2 , sort_keys=_A , ensure_ascii=_A ) + '\n' ) return (vocab_file,)
327
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import center_crop, normalize, rescale, resize, to_channel_dimension_format from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): import PIL _SCREAMING_SNAKE_CASE = logging.get_logger(__name__) class SCREAMING_SNAKE_CASE_ ( snake_case_ ): __magic_name__: Optional[Any] = ["pixel_values"] def __init__( self : str , _A : bool = True , _A : Dict[str, int] = None , _A : PILImageResampling = PIL.Image.BICUBIC , _A : bool = True , _A : Dict[str, int] = None , _A : Union[int, float] = 1 / 255 , _A : bool = True , _A : bool = True , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[float, List[float]]] = None , **_A : str , ) -> None: """simple docstring""" super().__init__(**_A ) snake_case_ : Dict = size if size is not None else {'height': 256, 'width': 256} snake_case_ : Tuple = get_size_dict(_A ) snake_case_ : str = crop_size if crop_size is not None else {'height': 224, 'width': 224} snake_case_ : int = get_size_dict(_A , param_name='crop_size' ) snake_case_ : Union[str, Any] = do_resize snake_case_ : str = size snake_case_ : List[str] = resample snake_case_ : List[Any] = do_center_crop snake_case_ : Dict = crop_size snake_case_ : Tuple = do_rescale snake_case_ : Optional[Any] = rescale_factor snake_case_ : Any = do_normalize snake_case_ : Any = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN snake_case_ : Optional[int] = image_std if image_std is not None else IMAGENET_STANDARD_STD def UpperCAmelCase_ ( self : Optional[int] , _A : np.ndarray , _A : Dict[str, int] , _A : PILImageResampling = PIL.Image.BICUBIC , _A : Optional[Union[str, ChannelDimension]] = None , **_A : List[str] , ) -> np.ndarray: """simple docstring""" snake_case_ : Tuple = get_size_dict(_A ) if "height" not in size or "width" not in size: raise ValueError(F"""The size dictionary must have keys 'height' and 'width'. Got {size.keys()}""" ) return resize( _A , size=(size['height'], size['width']) , resample=_A , data_format=_A , **_A ) def UpperCAmelCase_ ( self : int , _A : np.ndarray , _A : Dict[str, int] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Optional[Any] , ) -> np.ndarray: """simple docstring""" snake_case_ : Optional[int] = get_size_dict(_A ) if "height" not in size or "width" not in size: raise ValueError(F"""The size dictionary must have keys 'height' and 'width'. Got {size.keys()}""" ) return center_crop(_A , size=(size['height'], size['width']) , data_format=_A , **_A ) def UpperCAmelCase_ ( self : Dict , _A : np.ndarray , _A : Union[int, float] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : str , ) -> str: """simple docstring""" return rescale(_A , scale=_A , data_format=_A , **_A ) def UpperCAmelCase_ ( self : Any , _A : np.ndarray , _A : Union[float, List[float]] , _A : Union[float, List[float]] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Tuple , ) -> np.ndarray: """simple docstring""" return normalize(_A , mean=_A , std=_A , data_format=_A , **_A ) def UpperCAmelCase_ ( self : List[str] , _A : ImageInput , _A : bool = None , _A : Dict[str, int] = None , _A : Union[str, Any]=None , _A : bool = None , _A : Dict[str, int] = None , _A : bool = None , _A : float = None , _A : bool = None , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[str, TensorType]] = None , _A : ChannelDimension = ChannelDimension.FIRST , **_A : int , ) -> PIL.Image.Image: """simple docstring""" snake_case_ : int = do_resize if do_resize is not None else self.do_resize snake_case_ : str = resample if resample is not None else self.resample snake_case_ : Any = do_center_crop if do_center_crop is not None else self.do_center_crop snake_case_ : List[str] = do_rescale if do_rescale is not None else self.do_rescale snake_case_ : Any = rescale_factor if rescale_factor is not None else self.rescale_factor snake_case_ : List[str] = do_normalize if do_normalize is not None else self.do_normalize snake_case_ : Any = image_mean if image_mean is not None else self.image_mean snake_case_ : Dict = image_std if image_std is not None else self.image_std snake_case_ : int = size if size is not None else self.size snake_case_ : Optional[int] = get_size_dict(_A ) snake_case_ : int = crop_size if crop_size is not None else self.crop_size snake_case_ : Any = get_size_dict(_A , param_name='crop_size' ) snake_case_ : Optional[Any] = make_list_of_images(_A ) if not valid_images(_A ): raise ValueError( 'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ' 'torch.Tensor, tf.Tensor or jax.ndarray.' ) if do_resize and size is None or resample is None: raise ValueError('Size and resample must be specified if do_resize is True.' ) if do_center_crop and crop_size is None: raise ValueError('Crop size must be specified if do_center_crop is True.' ) if do_rescale and rescale_factor is None: raise ValueError('Rescale factor must be specified if do_rescale is True.' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('Image mean and std must be specified if do_normalize is True.' ) # All transformations expect numpy arrays. snake_case_ : Optional[Any] = [to_numpy_array(_A ) for image in images] if do_resize: snake_case_ : Dict = [self.resize(image=_A , size=_A , resample=_A ) for image in images] if do_center_crop: snake_case_ : Optional[Any] = [self.center_crop(image=_A , size=_A ) for image in images] if do_rescale: snake_case_ : Optional[int] = [self.rescale(image=_A , scale=_A ) for image in images] if do_normalize: snake_case_ : str = [self.normalize(image=_A , mean=_A , std=_A ) for image in images] snake_case_ : Dict = [to_channel_dimension_format(_A , _A ) for image in images] snake_case_ : Tuple = {'pixel_values': images} return BatchFeature(data=_A , tensor_type=_A )
327
1
import argparse import json import os import fairseq import torch from fairseq.data import Dictionary # Register SEW's fairseq modules from sew_asapp import tasks # noqa: F401 from transformers import ( SEWConfig, SEWForCTC, SEWModel, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaProcessor, logging, ) logging.set_verbosity_info() _SCREAMING_SNAKE_CASE = logging.get_logger(__name__) _SCREAMING_SNAKE_CASE = { """post_extract_proj""": """feature_projection""", """encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""", """self_attn.k_proj""": """encoder.layers.*.attention.k_proj""", """self_attn.v_proj""": """encoder.layers.*.attention.v_proj""", """self_attn.q_proj""": """encoder.layers.*.attention.q_proj""", """self_attn.out_proj""": """encoder.layers.*.attention.out_proj""", """self_attn_layer_norm""": """encoder.layers.*.layer_norm""", """fc1""": """encoder.layers.*.feed_forward.intermediate_dense""", """fc2""": """encoder.layers.*.feed_forward.output_dense""", """final_layer_norm""": """encoder.layers.*.final_layer_norm""", """encoder.upsample.0""": """encoder.upsample.projection""", """encoder.layer_norm""": """encoder.layer_norm""", """w2v_model.layer_norm""": """layer_norm""", """w2v_encoder.proj""": """lm_head""", """mask_emb""": """masked_spec_embed""", } def SCREAMING_SNAKE_CASE__ ( __a , __a , __a , __a , __a ): for attribute in key.split('.' ): snake_case_ : Tuple = getattr(__a , __a ) if weight_type is not None: snake_case_ : Optional[int] = getattr(__a , __a ).shape else: snake_case_ : Any = hf_pointer.shape assert hf_shape == value.shape, ( f"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be""" f""" {value.shape} for {full_name}""" ) if weight_type == "weight": snake_case_ : int = value elif weight_type == "weight_g": snake_case_ : int = value elif weight_type == "weight_v": snake_case_ : Optional[Any] = value elif weight_type == "bias": snake_case_ : Any = value else: snake_case_ : Dict = value logger.info(f"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" ) def SCREAMING_SNAKE_CASE__ ( __a , __a , __a ): snake_case_ : Any = [] snake_case_ : Union[str, Any] = fairseq_model.state_dict() snake_case_ : Dict = hf_model.sew.feature_extractor if is_finetuned else hf_model.feature_extractor for name, value in fairseq_dict.items(): snake_case_ : Optional[Any] = False if "conv_layers" in name: load_conv_layer( __a , __a , __a , __a , hf_model.config.feat_extract_norm == 'group' , ) snake_case_ : int = True else: for key, mapped_key in MAPPING.items(): snake_case_ : Union[str, Any] = 'sew.' + mapped_key if (is_finetuned and mapped_key != 'lm_head') else mapped_key if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]: snake_case_ : str = True if "*" in mapped_key: snake_case_ : Any = name.split(__a )[0].split('.' )[-2] snake_case_ : Dict = mapped_key.replace('*' , __a ) if "weight_g" in name: snake_case_ : Any = 'weight_g' elif "weight_v" in name: snake_case_ : Dict = 'weight_v' elif "weight" in name: snake_case_ : Tuple = 'weight' elif "bias" in name: snake_case_ : Tuple = 'bias' else: snake_case_ : Tuple = None set_recursively(__a , __a , __a , __a , __a ) continue if not is_used: unused_weights.append(__a ) logger.warning(f"""Unused weights: {unused_weights}""" ) def SCREAMING_SNAKE_CASE__ ( __a , __a , __a , __a , __a ): snake_case_ : Any = full_name.split('conv_layers.' )[-1] snake_case_ : Tuple = name.split('.' ) snake_case_ : Optional[int] = int(items[0] ) snake_case_ : List[str] = int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" ) snake_case_ : Any = value logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" ) snake_case_ : Dict = value logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( f"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was""" " found." ) snake_case_ : str = value logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.""" ) snake_case_ : Dict = value logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) else: unused_weights.append(__a ) def SCREAMING_SNAKE_CASE__ ( __a , __a ): snake_case_ : Optional[int] = SEWConfig() if is_finetuned: snake_case_ : Tuple = model.wav_encoder.wav_model.cfg else: snake_case_ : List[str] = model.cfg snake_case_ : Dict = fs_config.conv_bias snake_case_ : int = eval(fs_config.conv_feature_layers ) snake_case_ : Dict = [x[0] for x in conv_layers] snake_case_ : Any = [x[1] for x in conv_layers] snake_case_ : Optional[int] = [x[2] for x in conv_layers] snake_case_ : Any = 'gelu' snake_case_ : Optional[Any] = 'layer' if fs_config.extractor_mode == 'layer_norm' else 'group' snake_case_ : Any = 0.0 snake_case_ : Any = fs_config.activation_fn.name snake_case_ : Optional[Any] = fs_config.encoder_embed_dim snake_case_ : List[Any] = 0.02 snake_case_ : Optional[Any] = fs_config.encoder_ffn_embed_dim snake_case_ : int = 1E-5 snake_case_ : Tuple = fs_config.encoder_layerdrop snake_case_ : Optional[int] = fs_config.encoder_attention_heads snake_case_ : Any = fs_config.conv_pos_groups snake_case_ : int = fs_config.conv_pos snake_case_ : Optional[Any] = len(__a ) snake_case_ : Optional[Any] = fs_config.encoder_layers snake_case_ : Any = fs_config.squeeze_factor # take care of any params that are overridden by the Wav2VecCtc model if is_finetuned: snake_case_ : Union[str, Any] = model.cfg snake_case_ : List[str] = fs_config.final_dropout snake_case_ : Optional[int] = fs_config.layerdrop snake_case_ : Optional[int] = fs_config.activation_dropout snake_case_ : List[Any] = fs_config.mask_prob > 0 or fs_config.mask_channel_prob > 0 snake_case_ : str = fs_config.attention_dropout snake_case_ : Tuple = fs_config.dropout_input snake_case_ : Optional[Any] = fs_config.dropout snake_case_ : Optional[Any] = fs_config.mask_channel_length snake_case_ : Dict = fs_config.mask_channel_prob snake_case_ : Dict = fs_config.mask_length snake_case_ : List[Any] = fs_config.mask_prob snake_case_ : int = 'Wav2Vec2FeatureExtractor' snake_case_ : List[str] = 'Wav2Vec2CTCTokenizer' return config @torch.no_grad() def SCREAMING_SNAKE_CASE__ ( __a , __a , __a=None , __a=None , __a=True ): if is_finetuned: snake_case_ ,snake_case_ ,snake_case_ : str = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} ) else: snake_case_ ,snake_case_ ,snake_case_ : Optional[int] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] ) if config_path is not None: snake_case_ : List[str] = SEWConfig.from_pretrained(__a ) else: snake_case_ : str = convert_config(model[0] , __a ) snake_case_ : Tuple = model[0].eval() snake_case_ : Optional[int] = True if config.feat_extract_norm == 'layer' else False snake_case_ : Any = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=__a , return_attention_mask=__a , ) if is_finetuned: if dict_path: snake_case_ : Tuple = Dictionary.load(__a ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq snake_case_ : Optional[int] = target_dict.pad_index snake_case_ : str = target_dict.bos_index snake_case_ : int = target_dict.pad_index snake_case_ : Any = target_dict.bos_index snake_case_ : Union[str, Any] = target_dict.eos_index snake_case_ : str = len(target_dict.symbols ) snake_case_ : Union[str, Any] = os.path.join(__a , 'vocab.json' ) if not os.path.isdir(__a ): logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(__a ) ) return os.makedirs(__a , exist_ok=__a ) with open(__a , 'w' , encoding='utf-8' ) as vocab_handle: json.dump(target_dict.indices , __a ) snake_case_ : Tuple = WavaVecaCTCTokenizer( __a , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=__a , ) snake_case_ : Optional[int] = WavaVecaProcessor(feature_extractor=__a , tokenizer=__a ) processor.save_pretrained(__a ) snake_case_ : Dict = SEWForCTC(__a ) else: snake_case_ : List[str] = SEWModel(__a ) feature_extractor.save_pretrained(__a ) recursively_load_weights(__a , __a , __a ) hf_model.save_pretrained(__a ) if __name__ == "__main__": _SCREAMING_SNAKE_CASE = argparse.ArgumentParser() parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""") parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""") parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""") parser.add_argument( """--is_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not""" ) _SCREAMING_SNAKE_CASE = parser.parse_args() convert_sew_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, args.is_finetuned )
327
import sys _SCREAMING_SNAKE_CASE = ( """73167176531330624919225119674426574742355349194934""" """96983520312774506326239578318016984801869478851843""" """85861560789112949495459501737958331952853208805511""" """12540698747158523863050715693290963295227443043557""" """66896648950445244523161731856403098711121722383113""" """62229893423380308135336276614282806444486645238749""" """30358907296290491560440772390713810515859307960866""" """70172427121883998797908792274921901699720888093776""" """65727333001053367881220235421809751254540594752243""" """52584907711670556013604839586446706324415722155397""" """53697817977846174064955149290862569321978468622482""" """83972241375657056057490261407972968652414535100474""" """82166370484403199890008895243450658541227588666881""" """16427171479924442928230863465674813919123162824586""" """17866458359124566529476545682848912883142607690042""" """24219022671055626321111109370544217506941658960408""" """07198403850962455444362981230987879927244284909188""" """84580156166097919133875499200524063689912560717606""" """05886116467109405077541002256983155200055935729725""" """71636269561882670428252483600823257530420752963450""" ) def SCREAMING_SNAKE_CASE__ ( __a = N ): snake_case_ : Optional[Any] = -sys.maxsize - 1 for i in range(len(__a ) - 12 ): snake_case_ : Optional[Any] = 1 for j in range(13 ): product *= int(n[i + j] ) if product > largest_product: snake_case_ : int = product return largest_product if __name__ == "__main__": print(F'''{solution() = }''')
327
1
import inspect import unittest class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ): def UpperCAmelCase_ ( self : Union[str, Any] ) -> str: """simple docstring""" try: import diffusers # noqa: F401 except ImportError: assert False def UpperCAmelCase_ ( self : Dict ) -> Union[str, Any]: """simple docstring""" import diffusers from diffusers.dependency_versions_table import deps snake_case_ : Dict = inspect.getmembers(_A , inspect.isclass ) for cls_name, cls_module in all_classes: if "dummy_" in cls_module.__module__: for backend in cls_module._backends: if backend == "k_diffusion": snake_case_ : int = 'k-diffusion' elif backend == "invisible_watermark": snake_case_ : int = 'invisible-watermark' assert backend in deps, F"""{backend} is not in the deps table!"""
327
import logging import os import sys from dataclasses import dataclass, field from typing import Optional from seqaseq_trainer import SeqaSeqTrainer from seqaseq_training_args import SeqaSeqTrainingArguments import transformers from transformers import ( AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer, HfArgumentParser, MBartTokenizer, MBartTokenizerFast, set_seed, ) from transformers.trainer_utils import EvaluationStrategy, is_main_process from transformers.training_args import ParallelMode from utils import ( SeqaSeqDataCollator, SeqaSeqDataset, assert_all_frozen, build_compute_metrics_fn, check_output_dir, freeze_embeds, freeze_params, lmap, save_json, use_task_specific_params, write_txt_file, ) _SCREAMING_SNAKE_CASE = logging.getLogger(__name__) @dataclass class SCREAMING_SNAKE_CASE_ : __magic_name__: str = field( metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} ) __magic_name__: Optional[str] = field( default=snake_case_ , metadata={"help": "Pretrained config name or path if not the same as model_name"} ) __magic_name__: Optional[str] = field( default=snake_case_ , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} ) __magic_name__: Optional[str] = field( default=snake_case_ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , ) __magic_name__: bool = field(default=snake_case_ , metadata={"help": "Whether tp freeze the encoder."} ) __magic_name__: bool = field(default=snake_case_ , metadata={"help": "Whether to freeze the embeddings."} ) @dataclass class SCREAMING_SNAKE_CASE_ : __magic_name__: str = field( metadata={"help": "The input data dir. Should contain the .tsv files (or other data files) for the task."} ) __magic_name__: Optional[str] = field( default="summarization" , metadata={"help": "Task name, summarization (or summarization_{dataset} for pegasus) or translation"} , ) __magic_name__: Optional[int] = field( default=1024 , metadata={ "help": ( "The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) } , ) __magic_name__: Optional[int] = field( default=128 , metadata={ "help": ( "The maximum total sequence length for target text after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) } , ) __magic_name__: Optional[int] = field( default=142 , metadata={ "help": ( "The maximum total sequence length for validation target text after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded. " "This argument is also used to override the ``max_length`` param of ``model.generate``, which is used " "during ``evaluate`` and ``predict``." ) } , ) __magic_name__: Optional[int] = field( default=142 , metadata={ "help": ( "The maximum total sequence length for test target text after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) } , ) __magic_name__: Optional[int] = field(default=-1 , metadata={"help": "# training examples. -1 means use all."} ) __magic_name__: Optional[int] = field(default=-1 , metadata={"help": "# validation examples. -1 means use all."} ) __magic_name__: Optional[int] = field(default=-1 , metadata={"help": "# test examples. -1 means use all."} ) __magic_name__: Optional[str] = field(default=snake_case_ , metadata={"help": "Source language id for translation."} ) __magic_name__: Optional[str] = field(default=snake_case_ , metadata={"help": "Target language id for translation."} ) __magic_name__: Optional[int] = field(default=snake_case_ , metadata={"help": "# num_beams to use for evaluation."} ) __magic_name__: bool = field( default=snake_case_ , metadata={"help": "If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined."} , ) def SCREAMING_SNAKE_CASE__ ( __a , __a , __a ): logger.info(f"""***** {split} metrics *****""" ) for key in sorted(metrics.keys() ): logger.info(f""" {key} = {metrics[key]}""" ) save_json(__a , os.path.join(__a , f"""{split}_results.json""" ) ) def SCREAMING_SNAKE_CASE__ ( ): # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. snake_case_ : Any = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. snake_case_ ,snake_case_ ,snake_case_ : List[Any] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: snake_case_ ,snake_case_ ,snake_case_ : List[str] = parser.parse_args_into_dataclasses() check_output_dir(__a ) # Setup logging logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , ) logger.warning( 'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() logger.info('Training/evaluation parameters %s' , __a ) # Set seed set_seed(training_args.seed ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. snake_case_ : Tuple = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) snake_case_ : Any = ('encoder_layerdrop', 'decoder_layerdrop', 'dropout', 'attention_dropout') for p in extra_model_params: if getattr(__a , __a , __a ): assert hasattr(__a , __a ), f"""({config.__class__.__name__}) doesn't have a `{p}` attribute""" setattr(__a , __a , getattr(__a , __a ) ) snake_case_ : Tuple = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) snake_case_ : Any = AutoModelForSeqaSeqLM.from_pretrained( model_args.model_name_or_path , from_tf='.ckpt' in model_args.model_name_or_path , config=__a , cache_dir=model_args.cache_dir , ) # use task specific params use_task_specific_params(__a , data_args.task ) # set num_beams for evaluation if data_args.eval_beams is None: snake_case_ : Any = model.config.num_beams # set decoder_start_token_id for MBart if model.config.decoder_start_token_id is None and isinstance(__a , (MBartTokenizer, MBartTokenizerFast) ): assert ( data_args.tgt_lang is not None and data_args.src_lang is not None ), "mBart requires --tgt_lang and --src_lang" if isinstance(__a , __a ): snake_case_ : int = tokenizer.lang_code_to_id[data_args.tgt_lang] else: snake_case_ : int = tokenizer.convert_tokens_to_ids(data_args.tgt_lang ) if model_args.freeze_embeds: freeze_embeds(__a ) if model_args.freeze_encoder: freeze_params(model.get_encoder() ) assert_all_frozen(model.get_encoder() ) snake_case_ : List[Any] = SeqaSeqDataset # Get datasets snake_case_ : List[Any] = ( dataset_class( __a , type_path='train' , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '' , ) if training_args.do_train else None ) snake_case_ : List[str] = ( dataset_class( __a , type_path='val' , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '' , ) if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO else None ) snake_case_ : List[Any] = ( dataset_class( __a , type_path='test' , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '' , ) if training_args.do_predict else None ) # Initialize our Trainer snake_case_ : Any = ( build_compute_metrics_fn(data_args.task , __a ) if training_args.predict_with_generate else None ) snake_case_ : List[str] = SeqaSeqTrainer( model=__a , args=__a , data_args=__a , train_dataset=__a , eval_dataset=__a , data_collator=SeqaSeqDataCollator( __a , __a , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=__a , tokenizer=__a , ) snake_case_ : Optional[int] = {} # Training if training_args.do_train: logger.info('*** Train ***' ) snake_case_ : Any = trainer.train( model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None ) snake_case_ : Tuple = train_result.metrics snake_case_ : List[str] = data_args.n_train trainer.save_model() # this also saves the tokenizer if trainer.is_world_process_zero(): handle_metrics('train' , __a , training_args.output_dir ) all_metrics.update(__a ) # Need to save the state, since Trainer.save_model saves only the tokenizer with the model trainer.state.save_to_json(os.path.join(training_args.output_dir , 'trainer_state.json' ) ) # For convenience, we also re-save the tokenizer to the same directory, # so that you can share your model easily on huggingface.co/models =) tokenizer.save_pretrained(training_args.output_dir ) # Evaluation if training_args.do_eval: logger.info('*** Evaluate ***' ) snake_case_ : List[Any] = trainer.evaluate(metric_key_prefix='val' ) snake_case_ : str = data_args.n_val snake_case_ : Union[str, Any] = round(metrics['val_loss'] , 4 ) if trainer.is_world_process_zero(): handle_metrics('val' , __a , training_args.output_dir ) all_metrics.update(__a ) if training_args.do_predict: logger.info('*** Predict ***' ) snake_case_ : Dict = trainer.predict(test_dataset=__a , metric_key_prefix='test' ) snake_case_ : Union[str, Any] = test_output.metrics snake_case_ : int = data_args.n_test if trainer.is_world_process_zero(): snake_case_ : List[str] = round(metrics['test_loss'] , 4 ) handle_metrics('test' , __a , training_args.output_dir ) all_metrics.update(__a ) if training_args.predict_with_generate: snake_case_ : Any = tokenizer.batch_decode( test_output.predictions , skip_special_tokens=__a , clean_up_tokenization_spaces=__a ) snake_case_ : Any = lmap(str.strip , __a ) write_txt_file(__a , os.path.join(training_args.output_dir , 'test_generations.txt' ) ) if trainer.is_world_process_zero(): save_json(__a , os.path.join(training_args.output_dir , 'all_results.json' ) ) return all_metrics def SCREAMING_SNAKE_CASE__ ( __a ): # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
327
1
def SCREAMING_SNAKE_CASE__ ( __a = 10**9 ): snake_case_ : Optional[Any] = 1 snake_case_ : List[str] = 2 snake_case_ : Tuple = 0 snake_case_ : Any = 0 snake_case_ : List[str] = 0 while perimeter <= max_perimeter: perimeters_sum += perimeter prev_value += 2 * value value += prev_value snake_case_ : Dict = 2 * value + 2 if i % 2 == 0 else 2 * value - 2 i += 1 return perimeters_sum if __name__ == "__main__": print(F'''{solution() = }''')
327
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available _SCREAMING_SNAKE_CASE = { """configuration_poolformer""": [ """POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """PoolFormerConfig""", """PoolFormerOnnxConfig""", ] } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _SCREAMING_SNAKE_CASE = ["""PoolFormerFeatureExtractor"""] _SCREAMING_SNAKE_CASE = ["""PoolFormerImageProcessor"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _SCREAMING_SNAKE_CASE = [ """POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""", """PoolFormerForImageClassification""", """PoolFormerModel""", """PoolFormerPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_poolformer import ( POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, PoolFormerConfig, PoolFormerOnnxConfig, ) try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_poolformer import PoolFormerFeatureExtractor from .image_processing_poolformer import PoolFormerImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_poolformer import ( POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, PoolFormerForImageClassification, PoolFormerModel, PoolFormerPreTrainedModel, ) else: import sys _SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
327
1
from __future__ import annotations def SCREAMING_SNAKE_CASE__ ( __a , __a ): snake_case_ : list[list[int]] = [] snake_case_ : list[int] = [] snake_case_ : List[Any] = 0 snake_case_ : Union[str, Any] = sum(__a ) create_state_space_tree(__a , __a , __a , __a , __a , __a ) return result def SCREAMING_SNAKE_CASE__ ( __a , __a , __a , __a , __a , __a , ): if sum(__a ) > max_sum or (remaining_nums_sum + sum(__a )) < max_sum: return if sum(__a ) == max_sum: result.append(__a ) return for index in range(__a , len(__a ) ): create_state_space_tree( __a , __a , index + 1 , [*path, nums[index]] , __a , remaining_nums_sum - nums[index] , ) _SCREAMING_SNAKE_CASE = [3, 34, 4, 12, 5, 2] _SCREAMING_SNAKE_CASE = 9 _SCREAMING_SNAKE_CASE = generate_sum_of_subsets_soln(nums, max_sum) print(*result)
327
import tempfile import unittest from transformers import AutoModelForSeqaSeqLM, AutoTokenizer from transformers.testing_utils import ( is_torch_available, require_optimum, require_torch, slow, ) if is_torch_available(): import torch @require_torch @require_optimum @slow class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ): def UpperCAmelCase_ ( self : Dict ) -> List[Any]: """simple docstring""" snake_case_ : Any = 'hf-internal-testing/tiny-random-t5' snake_case_ : Optional[Any] = AutoTokenizer.from_pretrained(_A ) snake_case_ : Optional[int] = AutoModelForSeqaSeqLM.from_pretrained(_A ) snake_case_ : List[Any] = tokenizer('This is me' , return_tensors='pt' ) snake_case_ : Any = model.to_bettertransformer() self.assertTrue(any('BetterTransformer' in mod.__class__.__name__ for _, mod in model.named_modules() ) ) snake_case_ : Optional[Any] = model.generate(**_A ) snake_case_ : int = model.reverse_bettertransformer() self.assertFalse(any('BetterTransformer' in mod.__class__.__name__ for _, mod in model.named_modules() ) ) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(_A ) snake_case_ : Tuple = AutoModelForSeqaSeqLM.from_pretrained(_A ) self.assertFalse( any('BetterTransformer' in mod.__class__.__name__ for _, mod in model_reloaded.named_modules() ) ) snake_case_ : Optional[Any] = model_reloaded.generate(**_A ) self.assertTrue(torch.allclose(_A , _A ) ) def UpperCAmelCase_ ( self : Optional[Any] ) -> Tuple: """simple docstring""" snake_case_ : Any = 'hf-internal-testing/tiny-random-t5' snake_case_ : int = AutoModelForSeqaSeqLM.from_pretrained(_A ) snake_case_ : Dict = model.to_bettertransformer() with tempfile.TemporaryDirectory() as tmpdirname: with self.assertRaises(_A ): model.save_pretrained(_A ) snake_case_ : Union[str, Any] = model.reverse_bettertransformer() model.save_pretrained(_A )
327
1
import tempfile import unittest from transformers import AutoModelForSeqaSeqLM, AutoTokenizer from transformers.testing_utils import ( is_torch_available, require_optimum, require_torch, slow, ) if is_torch_available(): import torch @require_torch @require_optimum @slow class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ): def UpperCAmelCase_ ( self : Dict ) -> List[Any]: """simple docstring""" snake_case_ : Any = 'hf-internal-testing/tiny-random-t5' snake_case_ : Optional[Any] = AutoTokenizer.from_pretrained(_A ) snake_case_ : Optional[int] = AutoModelForSeqaSeqLM.from_pretrained(_A ) snake_case_ : List[Any] = tokenizer('This is me' , return_tensors='pt' ) snake_case_ : Any = model.to_bettertransformer() self.assertTrue(any('BetterTransformer' in mod.__class__.__name__ for _, mod in model.named_modules() ) ) snake_case_ : Optional[Any] = model.generate(**_A ) snake_case_ : int = model.reverse_bettertransformer() self.assertFalse(any('BetterTransformer' in mod.__class__.__name__ for _, mod in model.named_modules() ) ) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(_A ) snake_case_ : Tuple = AutoModelForSeqaSeqLM.from_pretrained(_A ) self.assertFalse( any('BetterTransformer' in mod.__class__.__name__ for _, mod in model_reloaded.named_modules() ) ) snake_case_ : Optional[Any] = model_reloaded.generate(**_A ) self.assertTrue(torch.allclose(_A , _A ) ) def UpperCAmelCase_ ( self : Optional[Any] ) -> Tuple: """simple docstring""" snake_case_ : Any = 'hf-internal-testing/tiny-random-t5' snake_case_ : int = AutoModelForSeqaSeqLM.from_pretrained(_A ) snake_case_ : Dict = model.to_bettertransformer() with tempfile.TemporaryDirectory() as tmpdirname: with self.assertRaises(_A ): model.save_pretrained(_A ) snake_case_ : Union[str, Any] = model.reverse_bettertransformer() model.save_pretrained(_A )
327
import numpy as np import torch from torch.utils.data import Dataset from utils import logger class SCREAMING_SNAKE_CASE_ ( snake_case_ ): def __init__( self : Union[str, Any] , _A : Any , _A : Dict ) -> Union[str, Any]: """simple docstring""" snake_case_ : str = params snake_case_ : int = np.array(_A ) snake_case_ : Optional[int] = np.array([len(_A ) for t in data] ) self.check() self.remove_long_sequences() self.remove_empty_sequences() self.remove_unknown_sequences() self.check() self.print_statistics() def __getitem__( self : Tuple , _A : Optional[int] ) -> str: """simple docstring""" return (self.token_ids[index], self.lengths[index]) def __len__( self : List[str] ) -> str: """simple docstring""" return len(self.lengths ) def UpperCAmelCase_ ( self : Dict ) -> str: """simple docstring""" assert len(self.token_ids ) == len(self.lengths ) assert all(self.lengths[i] == len(self.token_ids[i] ) for i in range(len(self.lengths ) ) ) def UpperCAmelCase_ ( self : Any ) -> Optional[Any]: """simple docstring""" snake_case_ : Dict = self.params.max_model_input_size snake_case_ : Tuple = self.lengths > max_len logger.info(F"""Splitting {sum(_A )} too long sequences.""" ) def divide_chunks(_A : Union[str, Any] , _A : Dict ): return [l[i : i + n] for i in range(0 , len(_A ) , _A )] snake_case_ : Dict = [] snake_case_ : Union[str, Any] = [] if self.params.mlm: snake_case_ ,snake_case_ : Optional[int] = self.params.special_tok_ids['cls_token'], self.params.special_tok_ids['sep_token'] else: snake_case_ ,snake_case_ : Any = self.params.special_tok_ids['bos_token'], self.params.special_tok_ids['eos_token'] for seq_, len_ in zip(self.token_ids , self.lengths ): assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_ if len_ <= max_len: new_tok_ids.append(seq_ ) new_lengths.append(len_ ) else: snake_case_ : List[Any] = [] for sub_s in divide_chunks(seq_ , max_len - 2 ): if sub_s[0] != cls_id: snake_case_ : Optional[int] = np.insert(_A , 0 , _A ) if sub_s[-1] != sep_id: snake_case_ : Optional[Any] = np.insert(_A , len(_A ) , _A ) assert len(_A ) <= max_len assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s sub_seqs.append(_A ) new_tok_ids.extend(_A ) new_lengths.extend([len(_A ) for l in sub_seqs] ) snake_case_ : Tuple = np.array(_A ) snake_case_ : int = np.array(_A ) def UpperCAmelCase_ ( self : List[str] ) -> List[str]: """simple docstring""" snake_case_ : Tuple = len(self ) snake_case_ : int = self.lengths > 11 snake_case_ : Dict = self.token_ids[indices] snake_case_ : int = self.lengths[indices] snake_case_ : List[Any] = len(self ) logger.info(F"""Remove {init_size - new_size} too short (<=11 tokens) sequences.""" ) def UpperCAmelCase_ ( self : Union[str, Any] ) -> int: """simple docstring""" if "unk_token" not in self.params.special_tok_ids: return else: snake_case_ : Optional[Any] = self.params.special_tok_ids['unk_token'] snake_case_ : Dict = len(self ) snake_case_ : str = np.array([np.count_nonzero(a == unk_token_id ) for a in self.token_ids] ) snake_case_ : Any = (unk_occs / self.lengths) < 0.5 snake_case_ : List[Any] = self.token_ids[indices] snake_case_ : int = self.lengths[indices] snake_case_ : Tuple = len(self ) logger.info(F"""Remove {init_size - new_size} sequences with a high level of unknown tokens (50%).""" ) def UpperCAmelCase_ ( self : Union[str, Any] ) -> Any: """simple docstring""" if not self.params.is_master: return logger.info(F"""{len(self )} sequences""" ) # data_len = sum(self.lengths) # nb_unique_tokens = len(Counter(list(chain(*self.token_ids)))) # logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)') # unk_idx = self.params.special_tok_ids['unk_token'] # nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids]) # logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)') def UpperCAmelCase_ ( self : Optional[int] , _A : Union[str, Any] ) -> List[Any]: """simple docstring""" snake_case_ : Any = [t[0] for t in batch] snake_case_ : int = [t[1] for t in batch] assert len(_A ) == len(_A ) # Max for paddings snake_case_ : str = max(_A ) # Pad token ids if self.params.mlm: snake_case_ : int = self.params.special_tok_ids['pad_token'] else: snake_case_ : Dict = self.params.special_tok_ids['unk_token'] snake_case_ : Dict = [list(t.astype(_A ) ) + [pad_idx] * (max_seq_len_ - len(_A )) for t in token_ids] assert len(tk_ ) == len(_A ) assert all(len(_A ) == max_seq_len_ for t in tk_ ) snake_case_ : Any = torch.tensor(tk_ ) # (bs, max_seq_len_) snake_case_ : Optional[Any] = torch.tensor(_A ) # (bs) return tk_t, lg_t
327
1
from statistics import mean, stdev def SCREAMING_SNAKE_CASE__ ( __a , __a = 3 ): snake_case_ : int = min(__a ) snake_case_ : Union[str, Any] = max(__a ) # normalize data return [round((x - x_min) / (x_max - x_min) , __a ) for x in data] def SCREAMING_SNAKE_CASE__ ( __a , __a = 3 ): snake_case_ : List[Any] = mean(__a ) snake_case_ : int = stdev(__a ) # standardize data return [round((x - mu) / (sigma) , __a ) for x in data]
327
def SCREAMING_SNAKE_CASE__ ( __a , __a ): while b: snake_case_ ,snake_case_ : Any = b, a % b return a def SCREAMING_SNAKE_CASE__ ( __a , __a ): return a if b == 0 else euclidean_gcd_recursive(__a , a % b ) def SCREAMING_SNAKE_CASE__ ( ): print(f"""euclidean_gcd(3, 5) = {euclidean_gcd(3 , 5 )}""" ) print(f"""euclidean_gcd(5, 3) = {euclidean_gcd(5 , 3 )}""" ) print(f"""euclidean_gcd(1, 3) = {euclidean_gcd(1 , 3 )}""" ) print(f"""euclidean_gcd(3, 6) = {euclidean_gcd(3 , 6 )}""" ) print(f"""euclidean_gcd(6, 3) = {euclidean_gcd(6 , 3 )}""" ) print(f"""euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 , 5 )}""" ) print(f"""euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 , 3 )}""" ) print(f"""euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 , 3 )}""" ) print(f"""euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 , 6 )}""" ) print(f"""euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 , 3 )}""" ) if __name__ == "__main__": main()
327
1
import absl # noqa: F401 # Here to have a nice missing dependency error message early on import nltk # noqa: F401 # Here to have a nice missing dependency error message early on import numpy # noqa: F401 # Here to have a nice missing dependency error message early on import six # noqa: F401 # Here to have a nice missing dependency error message early on from rouge_score import rouge_scorer, scoring import datasets _SCREAMING_SNAKE_CASE = """\ @inproceedings{lin-2004-rouge, title = \"{ROUGE}: A Package for Automatic Evaluation of Summaries\", author = \"Lin, Chin-Yew\", booktitle = \"Text Summarization Branches Out\", month = jul, year = \"2004\", address = \"Barcelona, Spain\", publisher = \"Association for Computational Linguistics\", url = \"https://www.aclweb.org/anthology/W04-1013\", pages = \"74--81\", } """ _SCREAMING_SNAKE_CASE = """\ ROUGE, or Recall-Oriented Understudy for Gisting Evaluation, is a set of metrics and a software package used for evaluating automatic summarization and machine translation software in natural language processing. The metrics compare an automatically produced summary or translation against a reference or a set of references (human-produced) summary or translation. Note that ROUGE is case insensitive, meaning that upper case letters are treated the same way as lower case letters. This metrics is a wrapper around Google Research reimplementation of ROUGE: https://github.com/google-research/google-research/tree/master/rouge """ _SCREAMING_SNAKE_CASE = """ Calculates average rouge scores for a list of hypotheses and references Args: predictions: list of predictions to score. Each prediction should be a string with tokens separated by spaces. references: list of reference for each prediction. Each reference should be a string with tokens separated by spaces. rouge_types: A list of rouge types to calculate. Valid names: `\"rouge{n}\"` (e.g. `\"rouge1\"`, `\"rouge2\"`) where: {n} is the n-gram based scoring, `\"rougeL\"`: Longest common subsequence based scoring. `\"rougeLSum\"`: rougeLsum splits text using `\"\n\"`. See details in https://github.com/huggingface/datasets/issues/617 use_stemmer: Bool indicating whether Porter stemmer should be used to strip word suffixes. use_aggregator: Return aggregates if this is set to True Returns: rouge1: rouge_1 (precision, recall, f1), rouge2: rouge_2 (precision, recall, f1), rougeL: rouge_l (precision, recall, f1), rougeLsum: rouge_lsum (precision, recall, f1) Examples: >>> rouge = datasets.load_metric('rouge') >>> predictions = [\"hello there\", \"general kenobi\"] >>> references = [\"hello there\", \"general kenobi\"] >>> results = rouge.compute(predictions=predictions, references=references) >>> print(list(results.keys())) ['rouge1', 'rouge2', 'rougeL', 'rougeLsum'] >>> print(results[\"rouge1\"]) AggregateScore(low=Score(precision=1.0, recall=1.0, fmeasure=1.0), mid=Score(precision=1.0, recall=1.0, fmeasure=1.0), high=Score(precision=1.0, recall=1.0, fmeasure=1.0)) >>> print(results[\"rouge1\"].mid.fmeasure) 1.0 """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class SCREAMING_SNAKE_CASE_ ( datasets.Metric ): def UpperCAmelCase_ ( self : str ) -> Optional[Any]: """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { 'predictions': datasets.Value('string' , id='sequence' ), 'references': datasets.Value('string' , id='sequence' ), } ) , codebase_urls=['https://github.com/google-research/google-research/tree/master/rouge'] , reference_urls=[ 'https://en.wikipedia.org/wiki/ROUGE_(metric)', 'https://github.com/google-research/google-research/tree/master/rouge', ] , ) def UpperCAmelCase_ ( self : Optional[int] , _A : List[Any] , _A : List[Any] , _A : List[str]=None , _A : Optional[Any]=True , _A : Tuple=False ) -> int: """simple docstring""" if rouge_types is None: snake_case_ : Optional[int] = ['rouge1', 'rouge2', 'rougeL', 'rougeLsum'] snake_case_ : Optional[int] = rouge_scorer.RougeScorer(rouge_types=_A , use_stemmer=_A ) if use_aggregator: snake_case_ : Optional[int] = scoring.BootstrapAggregator() else: snake_case_ : int = [] for ref, pred in zip(_A , _A ): snake_case_ : Optional[Any] = scorer.score(_A , _A ) if use_aggregator: aggregator.add_scores(_A ) else: scores.append(_A ) if use_aggregator: snake_case_ : List[Any] = aggregator.aggregate() else: snake_case_ : Optional[Any] = {} for key in scores[0]: snake_case_ : List[Any] = [score[key] for score in scores] return result
327
import os import torch from ..logging import get_logger from .constants import FSDP_PYTORCH_VERSION, MODEL_NAME, OPTIMIZER_NAME from .versions import is_torch_version if is_torch_version(""">=""", FSDP_PYTORCH_VERSION): import torch.distributed.checkpoint as dist_cp from torch.distributed.checkpoint.default_planner import DefaultLoadPlanner, DefaultSavePlanner from torch.distributed.checkpoint.optimizer import load_sharded_optimizer_state_dict from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType _SCREAMING_SNAKE_CASE = get_logger(__name__) def SCREAMING_SNAKE_CASE__ ( __a , __a , __a , __a , __a=0 ): os.makedirs(__a , exist_ok=__a ) with FSDP.state_dict_type( __a , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ): snake_case_ : Dict = model.state_dict() if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: snake_case_ : Optional[int] = f"""{MODEL_NAME}.bin""" if model_index == 0 else f"""{MODEL_NAME}_{model_index}.bin""" snake_case_ : Dict = os.path.join(__a , __a ) if accelerator.process_index == 0: logger.info(f"""Saving model to {output_model_file}""" ) torch.save(__a , __a ) logger.info(f"""Model saved to {output_model_file}""" ) elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT: snake_case_ : Dict = ( f"""{MODEL_NAME}_rank{accelerator.process_index}.bin""" if model_index == 0 else f"""{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin""" ) snake_case_ : Dict = os.path.join(__a , __a ) logger.info(f"""Saving model to {output_model_file}""" ) torch.save(__a , __a ) logger.info(f"""Model saved to {output_model_file}""" ) elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT: snake_case_ : Optional[int] = os.path.join(__a , f"""{MODEL_NAME}_{model_index}""" ) os.makedirs(__a , exist_ok=__a ) logger.info(f"""Saving model to {ckpt_dir}""" ) snake_case_ : int = {'model': state_dict} dist_cp.save_state_dict( state_dict=__a , storage_writer=dist_cp.FileSystemWriter(__a ) , planner=DefaultSavePlanner() , ) logger.info(f"""Model saved to {ckpt_dir}""" ) def SCREAMING_SNAKE_CASE__ ( __a , __a , __a , __a , __a=0 ): accelerator.wait_for_everyone() with FSDP.state_dict_type( __a , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ): if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: if type(__a ) != FSDP and accelerator.process_index != 0: if not fsdp_plugin.sync_module_states: raise ValueError( 'Set the `sync_module_states` flag to `True` so that model states are synced across processes when ' 'initializing FSDP object' ) return snake_case_ : Optional[int] = f"""{MODEL_NAME}.bin""" if model_index == 0 else f"""{MODEL_NAME}_{model_index}.bin""" snake_case_ : Optional[Any] = os.path.join(__a , __a ) logger.info(f"""Loading model from {input_model_file}""" ) snake_case_ : Optional[Any] = torch.load(__a ) logger.info(f"""Model loaded from {input_model_file}""" ) elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT: snake_case_ : Optional[Any] = ( f"""{MODEL_NAME}_rank{accelerator.process_index}.bin""" if model_index == 0 else f"""{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin""" ) snake_case_ : Tuple = os.path.join(__a , __a ) logger.info(f"""Loading model from {input_model_file}""" ) snake_case_ : Optional[int] = torch.load(__a ) logger.info(f"""Model loaded from {input_model_file}""" ) elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT: snake_case_ : Tuple = ( os.path.join(__a , f"""{MODEL_NAME}_{model_index}""" ) if f"""{MODEL_NAME}""" not in input_dir else input_dir ) logger.info(f"""Loading model from {ckpt_dir}""" ) snake_case_ : List[Any] = {'model': model.state_dict()} dist_cp.load_state_dict( state_dict=__a , storage_reader=dist_cp.FileSystemReader(__a ) , planner=DefaultLoadPlanner() , ) snake_case_ : Any = state_dict['model'] logger.info(f"""Model loaded from {ckpt_dir}""" ) model.load_state_dict(__a ) def SCREAMING_SNAKE_CASE__ ( __a , __a , __a , __a , __a , __a=0 ): os.makedirs(__a , exist_ok=__a ) with FSDP.state_dict_type( __a , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ): snake_case_ : List[str] = FSDP.optim_state_dict(__a , __a ) if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: if accelerator.process_index == 0: snake_case_ : str = ( f"""{OPTIMIZER_NAME}.bin""" if optimizer_index == 0 else f"""{OPTIMIZER_NAME}_{optimizer_index}.bin""" ) snake_case_ : Any = os.path.join(__a , __a ) logger.info(f"""Saving Optimizer state to {output_optimizer_file}""" ) torch.save(__a , __a ) logger.info(f"""Optimizer state saved in {output_optimizer_file}""" ) else: snake_case_ : Optional[int] = os.path.join(__a , f"""{OPTIMIZER_NAME}_{optimizer_index}""" ) os.makedirs(__a , exist_ok=__a ) logger.info(f"""Saving Optimizer state to {ckpt_dir}""" ) dist_cp.save_state_dict( state_dict={'optimizer': optim_state} , storage_writer=dist_cp.FileSystemWriter(__a ) , planner=DefaultSavePlanner() , ) logger.info(f"""Optimizer state saved in {ckpt_dir}""" ) def SCREAMING_SNAKE_CASE__ ( __a , __a , __a , __a , __a , __a=0 ): accelerator.wait_for_everyone() with FSDP.state_dict_type( __a , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ): if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: snake_case_ : Optional[Any] = None # below check should work but currently it isn't working (mostly opytorch issue), # in the meantime disabling it at the cost of excess memory usage # if accelerator.process_index == 0 or not fsdp_plugin.optim_state_dict_config.rank0_only: snake_case_ : Union[str, Any] = ( f"""{OPTIMIZER_NAME}.bin""" if optimizer_index == 0 else f"""{OPTIMIZER_NAME}_{optimizer_index}.bin""" ) snake_case_ : List[Any] = os.path.join(__a , __a ) logger.info(f"""Loading Optimizer state from {input_optimizer_file}""" ) snake_case_ : Optional[int] = torch.load(__a ) logger.info(f"""Optimizer state loaded from {input_optimizer_file}""" ) else: snake_case_ : str = ( os.path.join(__a , f"""{OPTIMIZER_NAME}_{optimizer_index}""" ) if f"""{OPTIMIZER_NAME}""" not in input_dir else input_dir ) logger.info(f"""Loading Optimizer from {ckpt_dir}""" ) snake_case_ : Any = load_sharded_optimizer_state_dict( model_state_dict=model.state_dict() , optimizer_key='optimizer' , storage_reader=dist_cp.FileSystemReader(__a ) , ) snake_case_ : Optional[int] = optim_state['optimizer'] logger.info(f"""Optimizer loaded from {ckpt_dir}""" ) snake_case_ : Optional[Any] = FSDP.optim_state_dict_to_load(__a , __a , __a ) optimizer.load_state_dict(__a )
327
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tensorflow_text_available, is_torch_available _SCREAMING_SNAKE_CASE = { """configuration_ernie""": ["""ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ErnieConfig""", """ErnieOnnxConfig"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _SCREAMING_SNAKE_CASE = [ """ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST""", """ErnieForCausalLM""", """ErnieForMaskedLM""", """ErnieForMultipleChoice""", """ErnieForNextSentencePrediction""", """ErnieForPreTraining""", """ErnieForQuestionAnswering""", """ErnieForSequenceClassification""", """ErnieForTokenClassification""", """ErnieModel""", """ErniePreTrainedModel""", ] if TYPE_CHECKING: from .configuration_ernie import ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP, ErnieConfig, ErnieOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_ernie import ( ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST, ErnieForCausalLM, ErnieForMaskedLM, ErnieForMultipleChoice, ErnieForNextSentencePrediction, ErnieForPreTraining, ErnieForQuestionAnswering, ErnieForSequenceClassification, ErnieForTokenClassification, ErnieModel, ErniePreTrainedModel, ) else: import sys _SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
327
import unittest from typing import Dict, List, Optional, Union import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import BridgeTowerImageProcessor class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ): def __init__( self : List[str] , _A : List[Any] , _A : bool = True , _A : Dict[str, int] = None , _A : int = 32 , _A : bool = True , _A : Union[int, float] = 1 / 255 , _A : bool = True , _A : bool = True , _A : Optional[Union[float, List[float]]] = [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3] , _A : Optional[Union[float, List[float]]] = [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1] , _A : bool = True , _A : Tuple=7 , _A : Tuple=30 , _A : int=400 , _A : Tuple=3 , ) -> Optional[int]: """simple docstring""" snake_case_ : str = parent snake_case_ : str = do_resize snake_case_ : str = size if size is not None else {'shortest_edge': 288} snake_case_ : Any = size_divisor snake_case_ : Any = do_rescale snake_case_ : Union[str, Any] = rescale_factor snake_case_ : str = do_normalize snake_case_ : int = do_center_crop snake_case_ : str = image_mean snake_case_ : int = image_std snake_case_ : Any = do_pad snake_case_ : Optional[int] = batch_size snake_case_ : List[str] = num_channels snake_case_ : Any = min_resolution snake_case_ : str = max_resolution def UpperCAmelCase_ ( self : Tuple ) -> Optional[int]: """simple docstring""" return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "size": self.size, "size_divisor": self.size_divisor, } def UpperCAmelCase_ ( self : Dict , _A : str , _A : Union[str, Any]=False ) -> int: """simple docstring""" if not batched: snake_case_ : Optional[int] = self.size['shortest_edge'] snake_case_ : List[Any] = image_inputs[0] if isinstance(_A , Image.Image ): snake_case_ ,snake_case_ : Optional[Any] = image.size else: snake_case_ ,snake_case_ : str = image.shape[1], image.shape[2] snake_case_ : Dict = size / min(_A , _A ) if h < w: snake_case_ ,snake_case_ : str = size, scale * w else: snake_case_ ,snake_case_ : Tuple = scale * h, size snake_case_ : Dict = int((1333 / 800) * size ) if max(_A , _A ) > max_size: snake_case_ : Union[str, Any] = max_size / max(_A , _A ) snake_case_ : Any = newh * scale snake_case_ : Union[str, Any] = neww * scale snake_case_ ,snake_case_ : Any = int(newh + 0.5 ), int(neww + 0.5 ) snake_case_ ,snake_case_ : int = ( newh // self.size_divisor * self.size_divisor, neww // self.size_divisor * self.size_divisor, ) else: snake_case_ : Optional[int] = [] for image in image_inputs: snake_case_ ,snake_case_ : Optional[int] = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) snake_case_ : str = max(_A , key=lambda _A : item[0] )[0] snake_case_ : List[str] = max(_A , key=lambda _A : item[1] )[1] return expected_height, expected_width @require_torch @require_vision class SCREAMING_SNAKE_CASE_ ( snake_case_ , unittest.TestCase ): __magic_name__: List[Any] = BridgeTowerImageProcessor if is_vision_available() else None def UpperCAmelCase_ ( self : List[Any] ) -> Tuple: """simple docstring""" snake_case_ : int = BridgeTowerImageProcessingTester(self ) @property def UpperCAmelCase_ ( self : int ) -> Any: """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def UpperCAmelCase_ ( self : Union[str, Any] ) -> Dict: """simple docstring""" snake_case_ : Optional[Any] = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_A , 'image_mean' ) ) self.assertTrue(hasattr(_A , 'image_std' ) ) self.assertTrue(hasattr(_A , 'do_normalize' ) ) self.assertTrue(hasattr(_A , 'do_resize' ) ) self.assertTrue(hasattr(_A , 'size' ) ) self.assertTrue(hasattr(_A , 'size_divisor' ) ) def UpperCAmelCase_ ( self : Optional[int] ) -> List[str]: """simple docstring""" pass def UpperCAmelCase_ ( self : Tuple ) -> Tuple: """simple docstring""" snake_case_ : Optional[Any] = self.image_processing_class(**self.image_processor_dict ) # create random PIL images snake_case_ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A ) for image in image_inputs: self.assertIsInstance(_A , Image.Image ) # Test not batched input snake_case_ : Dict = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values snake_case_ ,snake_case_ : Optional[Any] = self.image_processor_tester.get_expected_values(_A ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched snake_case_ : List[str] = image_processing(_A , return_tensors='pt' ).pixel_values snake_case_ ,snake_case_ : Optional[Any] = self.image_processor_tester.get_expected_values(_A , batched=_A ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def UpperCAmelCase_ ( self : Union[str, Any] ) -> Any: """simple docstring""" snake_case_ : int = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors snake_case_ : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , numpify=_A ) for image in image_inputs: self.assertIsInstance(_A , np.ndarray ) # Test not batched input snake_case_ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values snake_case_ ,snake_case_ : Tuple = self.image_processor_tester.get_expected_values(_A ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched snake_case_ : Any = image_processing(_A , return_tensors='pt' ).pixel_values snake_case_ ,snake_case_ : Any = self.image_processor_tester.get_expected_values(_A , batched=_A ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def UpperCAmelCase_ ( self : Dict ) -> List[str]: """simple docstring""" snake_case_ : Optional[Any] = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors snake_case_ : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , torchify=_A ) for image in image_inputs: self.assertIsInstance(_A , torch.Tensor ) # Test not batched input snake_case_ : Optional[int] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values snake_case_ ,snake_case_ : Optional[Any] = self.image_processor_tester.get_expected_values(_A ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched snake_case_ : str = image_processing(_A , return_tensors='pt' ).pixel_values snake_case_ ,snake_case_ : Tuple = self.image_processor_tester.get_expected_values(_A , batched=_A ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , )
327
1
import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import XLMRobertaTokenizerFast from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class SCREAMING_SNAKE_CASE_ ( snake_case_ , unittest.TestCase ): __magic_name__: int = KandinskyInpaintPipeline __magic_name__: List[str] = ["prompt", "image_embeds", "negative_image_embeds", "image", "mask_image"] __magic_name__: str = [ "prompt", "negative_prompt", "image_embeds", "negative_image_embeds", "image", "mask_image", ] __magic_name__: str = [ "generator", "height", "width", "latents", "guidance_scale", "negative_prompt", "num_inference_steps", "return_dict", "guidance_scale", "num_images_per_prompt", "output_type", "return_dict", ] __magic_name__: Optional[Any] = False @property def UpperCAmelCase_ ( self : Optional[Any] ) -> Union[str, Any]: """simple docstring""" return 32 @property def UpperCAmelCase_ ( self : Optional[Any] ) -> Union[str, Any]: """simple docstring""" return 32 @property def UpperCAmelCase_ ( self : Tuple ) -> Dict: """simple docstring""" return self.time_input_dim @property def UpperCAmelCase_ ( self : Optional[Any] ) -> Tuple: """simple docstring""" return self.time_input_dim * 4 @property def UpperCAmelCase_ ( self : Union[str, Any] ) -> Optional[int]: """simple docstring""" return 100 @property def UpperCAmelCase_ ( self : Optional[int] ) -> Optional[Any]: """simple docstring""" snake_case_ : List[Any] = XLMRobertaTokenizerFast.from_pretrained('YiYiXu/tiny-random-mclip-base' ) return tokenizer @property def UpperCAmelCase_ ( self : Dict ) -> List[str]: """simple docstring""" torch.manual_seed(0 ) snake_case_ : Optional[int] = MCLIPConfig( numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1005 , ) snake_case_ : Union[str, Any] = MultilingualCLIP(_A ) snake_case_ : Tuple = text_encoder.eval() return text_encoder @property def UpperCAmelCase_ ( self : Optional[Any] ) -> str: """simple docstring""" torch.manual_seed(0 ) snake_case_ : Tuple = { 'in_channels': 9, # Out channels is double in channels because predicts mean and variance 'out_channels': 8, 'addition_embed_type': 'text_image', 'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'), 'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'), 'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn', 'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2), 'layers_per_block': 1, 'encoder_hid_dim': self.text_embedder_hidden_size, 'encoder_hid_dim_type': 'text_image_proj', 'cross_attention_dim': self.cross_attention_dim, 'attention_head_dim': 4, 'resnet_time_scale_shift': 'scale_shift', 'class_embed_type': None, } snake_case_ : Dict = UNetaDConditionModel(**_A ) return model @property def UpperCAmelCase_ ( self : Optional[int] ) -> List[Any]: """simple docstring""" return { "block_out_channels": [32, 64], "down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": [ "AttnUpDecoderBlock2D", "UpDecoderBlock2D", ], "vq_embed_dim": 4, } @property def UpperCAmelCase_ ( self : Union[str, Any] ) -> Union[str, Any]: """simple docstring""" torch.manual_seed(0 ) snake_case_ : str = VQModel(**self.dummy_movq_kwargs ) return model def UpperCAmelCase_ ( self : Union[str, Any] ) -> Dict: """simple docstring""" snake_case_ : List[str] = self.dummy_text_encoder snake_case_ : List[str] = self.dummy_tokenizer snake_case_ : Any = self.dummy_unet snake_case_ : Optional[Any] = self.dummy_movq snake_case_ : str = DDIMScheduler( num_train_timesteps=1000 , beta_schedule='linear' , beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , clip_sample=_A , set_alpha_to_one=_A , steps_offset=1 , prediction_type='epsilon' , thresholding=_A , ) snake_case_ : Dict = { 'text_encoder': text_encoder, 'tokenizer': tokenizer, 'unet': unet, 'scheduler': scheduler, 'movq': movq, } return components def UpperCAmelCase_ ( self : int , _A : Optional[Any] , _A : Optional[int]=0 ) -> Any: """simple docstring""" snake_case_ : List[str] = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(_A ) ).to(_A ) snake_case_ : Optional[Any] = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(_A ) # create init_image snake_case_ : int = floats_tensor((1, 3, 64, 64) , rng=random.Random(_A ) ).to(_A ) snake_case_ : str = image.cpu().permute(0 , 2 , 3 , 1 )[0] snake_case_ : Any = Image.fromarray(np.uinta(_A ) ).convert('RGB' ).resize((256, 256) ) # create mask snake_case_ : Optional[int] = np.ones((64, 64) , dtype=np.floataa ) snake_case_ : List[Any] = 0 if str(_A ).startswith('mps' ): snake_case_ : List[Any] = torch.manual_seed(_A ) else: snake_case_ : int = torch.Generator(device=_A ).manual_seed(_A ) snake_case_ : Optional[Any] = { 'prompt': 'horse', 'image': init_image, 'mask_image': mask, 'image_embeds': image_embeds, 'negative_image_embeds': negative_image_embeds, 'generator': generator, 'height': 64, 'width': 64, 'num_inference_steps': 2, 'guidance_scale': 4.0, 'output_type': 'np', } return inputs def UpperCAmelCase_ ( self : Any ) -> Optional[int]: """simple docstring""" snake_case_ : Union[str, Any] = 'cpu' snake_case_ : List[str] = self.get_dummy_components() snake_case_ : List[Any] = self.pipeline_class(**_A ) snake_case_ : Union[str, Any] = pipe.to(_A ) pipe.set_progress_bar_config(disable=_A ) snake_case_ : Optional[Any] = pipe(**self.get_dummy_inputs(_A ) ) snake_case_ : Union[str, Any] = output.images snake_case_ : Any = pipe( **self.get_dummy_inputs(_A ) , return_dict=_A , )[0] snake_case_ : List[str] = image[0, -3:, -3:, -1] snake_case_ : Optional[int] = image_from_tuple[0, -3:, -3:, -1] print(F"""image.shape {image.shape}""" ) assert image.shape == (1, 64, 64, 3) snake_case_ : Dict = np.array( [0.8_3_2_6_9_1_9, 0.7_3_7_9_0_4_6_7, 0.2_0_9_1_8_5_8_1, 0.9_3_0_9_6_1_2, 0.5_5_1_1_7_9_1, 0.4_3_7_1_3_3_2_8, 0.5_5_1_3_3_2_1, 0.4_9_9_2_2_9_3_4, 0.5_9_4_9_7_7_8_6] ) assert ( np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 ), F""" expected_slice {expected_slice}, but got {image_slice.flatten()}""" assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 ), F""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}""" def UpperCAmelCase_ ( self : int ) -> List[str]: """simple docstring""" super().test_inference_batch_single_identical(expected_max_diff=3E-3 ) @slow @require_torch_gpu class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ): def UpperCAmelCase_ ( self : Any ) -> str: """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def UpperCAmelCase_ ( self : Optional[Any] ) -> Dict: """simple docstring""" snake_case_ : Any = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy' ) snake_case_ : List[Any] = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png' ) snake_case_ : Dict = np.ones((768, 768) , dtype=np.floataa ) snake_case_ : List[str] = 0 snake_case_ : List[Any] = 'a hat' snake_case_ : Optional[Any] = KandinskyPriorPipeline.from_pretrained( 'kandinsky-community/kandinsky-2-1-prior' , torch_dtype=torch.floataa ) pipe_prior.to(_A ) snake_case_ : Optional[Any] = KandinskyInpaintPipeline.from_pretrained( 'kandinsky-community/kandinsky-2-1-inpaint' , torch_dtype=torch.floataa ) snake_case_ : List[str] = pipeline.to(_A ) pipeline.set_progress_bar_config(disable=_A ) snake_case_ : Dict = torch.Generator(device='cpu' ).manual_seed(0 ) snake_case_ ,snake_case_ : Union[str, Any] = pipe_prior( _A , generator=_A , num_inference_steps=5 , negative_prompt='' , ).to_tuple() snake_case_ : List[str] = pipeline( _A , image=_A , mask_image=_A , image_embeds=_A , negative_image_embeds=_A , generator=_A , num_inference_steps=100 , height=768 , width=768 , output_type='np' , ) snake_case_ : Optional[int] = output.images[0] assert image.shape == (768, 768, 3) assert_mean_pixel_difference(_A , _A )
327
import json import os import tempfile import transformers import datasets from utils import generate_example_dataset, get_duration _SCREAMING_SNAKE_CASE = 50_00_00 _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = os.path.split(__file__) _SCREAMING_SNAKE_CASE = os.path.join(RESULTS_BASEPATH, """results""", RESULTS_FILENAME.replace(""".py""", """.json""")) @get_duration def SCREAMING_SNAKE_CASE__ ( __a , **__a ): snake_case_ : int = dataset.map(**__a ) @get_duration def SCREAMING_SNAKE_CASE__ ( __a , **__a ): snake_case_ : Dict = dataset.filter(**__a ) def SCREAMING_SNAKE_CASE__ ( ): snake_case_ : Tuple = {'num examples': SPEED_TEST_N_EXAMPLES} with tempfile.TemporaryDirectory() as tmp_dir: snake_case_ : Dict = datasets.Features({'text': datasets.Value('string' ), 'numbers': datasets.Value('float32' )} ) snake_case_ : List[Any] = generate_example_dataset( os.path.join(__a , 'dataset.arrow' ) , __a , num_examples=__a ) snake_case_ : str = transformers.AutoTokenizer.from_pretrained('bert-base-cased' , use_fast=__a ) def tokenize(__a ): return tokenizer(examples['text'] ) snake_case_ : Any = map(__a ) snake_case_ : Tuple = map(__a , batched=__a ) snake_case_ : str = map(__a , function=lambda __a : None , batched=__a ) with dataset.formatted_as(type='numpy' ): snake_case_ : Optional[int] = map(__a , function=lambda __a : None , batched=__a ) with dataset.formatted_as(type='pandas' ): snake_case_ : str = map(__a , function=lambda __a : None , batched=__a ) with dataset.formatted_as(type='torch' , columns='numbers' ): snake_case_ : int = map(__a , function=lambda __a : None , batched=__a ) with dataset.formatted_as(type='tensorflow' , columns='numbers' ): snake_case_ : List[Any] = map(__a , function=lambda __a : None , batched=__a ) snake_case_ : int = map(__a , function=__a , batched=__a ) snake_case_ : Optional[Any] = filter(__a ) # Activate later when tokenizer support batched inputs # with dataset.formatted_as(type='numpy'): # times[func.__name__ + " fast-tokenizer batched numpy"] = func(dataset, function=tokenize, batched=True) with open(__a , 'wb' ) as f: f.write(json.dumps(__a ).encode('utf-8' ) ) if __name__ == "__main__": # useful to run the profiler benchmark_map_filter()
327
1
import argparse import torch from transformers import OpenAIGPTConfig, OpenAIGPTModel, load_tf_weights_in_openai_gpt from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging logging.set_verbosity_info() def SCREAMING_SNAKE_CASE__ ( __a , __a , __a ): # Construct model if openai_config_file == "": snake_case_ : Any = OpenAIGPTConfig() else: snake_case_ : List[str] = OpenAIGPTConfig.from_json_file(__a ) snake_case_ : str = OpenAIGPTModel(__a ) # Load weights from numpy load_tf_weights_in_openai_gpt(__a , __a , __a ) # Save pytorch-model snake_case_ : Optional[Any] = pytorch_dump_folder_path + '/' + WEIGHTS_NAME snake_case_ : Union[str, Any] = pytorch_dump_folder_path + '/' + CONFIG_NAME print(f"""Save PyTorch model to {pytorch_weights_dump_path}""" ) torch.save(model.state_dict() , __a ) print(f"""Save configuration file to {pytorch_config_dump_path}""" ) with open(__a , 'w' , encoding='utf-8' ) as f: f.write(config.to_json_string() ) if __name__ == "__main__": _SCREAMING_SNAKE_CASE = argparse.ArgumentParser() # Required parameters parser.add_argument( """--openai_checkpoint_folder_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) parser.add_argument( """--openai_config_file""", default="""""", type=str, help=( """An optional config json file corresponding to the pre-trained OpenAI model. \n""" """This specifies the model architecture.""" ), ) _SCREAMING_SNAKE_CASE = parser.parse_args() convert_openai_checkpoint_to_pytorch( args.openai_checkpoint_folder_path, args.openai_config_file, args.pytorch_dump_folder_path )
327
from collections import namedtuple import requests from lxml import html # type: ignore _SCREAMING_SNAKE_CASE = namedtuple("""covid_data""", """cases deaths recovered""") def SCREAMING_SNAKE_CASE__ ( __a = "https://www.worldometers.info/coronavirus/" ): snake_case_ : Union[str, Any] = '//div[@class = "maincounter-number"]/span/text()' return covid_data(*html.fromstring(requests.get(__a ).content ).xpath(__a ) ) _SCREAMING_SNAKE_CASE = """Total COVID-19 cases in the world: {} Total deaths due to COVID-19 in the world: {} Total COVID-19 patients recovered in the world: {}""" print(fmt.format(*covid_stats()))
327
1
def SCREAMING_SNAKE_CASE__ ( __a ): if number > 0: raise ValueError('input must be a negative integer' ) snake_case_ : Dict = len(bin(__a )[3:] ) snake_case_ : Union[str, Any] = bin(abs(__a ) - (1 << binary_number_length) )[3:] snake_case_ : List[Any] = ( ( '1' + '0' * (binary_number_length - len(__a )) + twos_complement_number ) if number < 0 else '0' ) return "0b" + twos_complement_number if __name__ == "__main__": import doctest doctest.testmod()
327
import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from .tokenization_lxmert import LxmertTokenizer _SCREAMING_SNAKE_CASE = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""} _SCREAMING_SNAKE_CASE = { """vocab_file""": { """unc-nlp/lxmert-base-uncased""": """https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/vocab.txt""", }, """tokenizer_file""": { """unc-nlp/lxmert-base-uncased""": ( """https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/tokenizer.json""" ), }, } _SCREAMING_SNAKE_CASE = { """unc-nlp/lxmert-base-uncased""": 5_12, } _SCREAMING_SNAKE_CASE = { """unc-nlp/lxmert-base-uncased""": {"""do_lower_case""": True}, } class SCREAMING_SNAKE_CASE_ ( snake_case_ ): __magic_name__: List[Any] = VOCAB_FILES_NAMES __magic_name__: List[str] = PRETRAINED_VOCAB_FILES_MAP __magic_name__: List[str] = PRETRAINED_INIT_CONFIGURATION __magic_name__: Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __magic_name__: Union[str, Any] = LxmertTokenizer def __init__( self : List[str] , _A : Union[str, Any]=None , _A : Optional[Any]=None , _A : Dict=True , _A : Dict="[UNK]" , _A : Optional[int]="[SEP]" , _A : Dict="[PAD]" , _A : Union[str, Any]="[CLS]" , _A : str="[MASK]" , _A : Tuple=True , _A : Dict=None , **_A : List[Any] , ) -> Optional[int]: """simple docstring""" super().__init__( _A , tokenizer_file=_A , do_lower_case=_A , unk_token=_A , sep_token=_A , pad_token=_A , cls_token=_A , mask_token=_A , tokenize_chinese_chars=_A , strip_accents=_A , **_A , ) snake_case_ : Optional[int] = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get('lowercase' , _A ) != do_lower_case or normalizer_state.get('strip_accents' , _A ) != strip_accents or normalizer_state.get('handle_chinese_chars' , _A ) != tokenize_chinese_chars ): snake_case_ : Tuple = getattr(_A , normalizer_state.pop('type' ) ) snake_case_ : Union[str, Any] = do_lower_case snake_case_ : int = strip_accents snake_case_ : Optional[Any] = tokenize_chinese_chars snake_case_ : List[Any] = normalizer_class(**_A ) snake_case_ : Tuple = do_lower_case def UpperCAmelCase_ ( self : Dict , _A : Any , _A : List[Any]=None ) -> Dict: """simple docstring""" snake_case_ : Optional[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def UpperCAmelCase_ ( self : Optional[Any] , _A : List[int] , _A : Optional[List[int]] = None ) -> List[int]: """simple docstring""" snake_case_ : str = [self.sep_token_id] snake_case_ : Dict = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def UpperCAmelCase_ ( self : Optional[int] , _A : str , _A : Optional[str] = None ) -> Tuple[str]: """simple docstring""" snake_case_ : Union[str, Any] = self._tokenizer.model.save(_A , name=_A ) return tuple(_A )
327
1
import pickle import numpy as np from matplotlib import pyplot as plt class SCREAMING_SNAKE_CASE_ : def __init__( self : Optional[int] , _A : List[str] , _A : int , _A : Any , _A : str , _A : Optional[int] , _A : Optional[Any]=0.2 , _A : Optional[Any]=0.2 ) -> Dict: """simple docstring""" snake_case_ : Optional[Any] = bp_numa snake_case_ : List[str] = bp_numa snake_case_ : Dict = bp_numa snake_case_ : int = conva_get[:2] snake_case_ : Union[str, Any] = conva_get[2] snake_case_ : Optional[Any] = size_pa snake_case_ : List[Any] = rate_w snake_case_ : Optional[int] = rate_t snake_case_ : Union[str, Any] = [ np.mat(-1 * np.random.rand(self.conva[0] , self.conva[0] ) + 0.5 ) for i in range(self.conva[1] ) ] snake_case_ : str = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 ) snake_case_ : Any = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 ) snake_case_ : Any = -2 * np.random.rand(self.conva[1] ) + 1 snake_case_ : Union[str, Any] = -2 * np.random.rand(self.num_bpa ) + 1 snake_case_ : str = -2 * np.random.rand(self.num_bpa ) + 1 def UpperCAmelCase_ ( self : Any , _A : List[str] ) -> Tuple: """simple docstring""" snake_case_ : Dict = { 'num_bp1': self.num_bpa, 'num_bp2': self.num_bpa, 'num_bp3': self.num_bpa, 'conv1': self.conva, 'step_conv1': self.step_conva, 'size_pooling1': self.size_poolinga, 'rate_weight': self.rate_weight, 'rate_thre': self.rate_thre, 'w_conv1': self.w_conva, 'wkj': self.wkj, 'vji': self.vji, 'thre_conv1': self.thre_conva, 'thre_bp2': self.thre_bpa, 'thre_bp3': self.thre_bpa, } with open(_A , 'wb' ) as f: pickle.dump(_A , _A ) print(F"""Model saved: {save_path}""" ) @classmethod def UpperCAmelCase_ ( cls : Dict , _A : List[str] ) -> Optional[int]: """simple docstring""" with open(_A , 'rb' ) as f: snake_case_ : Tuple = pickle.load(_A ) # noqa: S301 snake_case_ : str = model_dic.get('conv1' ) conv_get.append(model_dic.get('step_conv1' ) ) snake_case_ : int = model_dic.get('size_pooling1' ) snake_case_ : Tuple = model_dic.get('num_bp1' ) snake_case_ : List[str] = model_dic.get('num_bp2' ) snake_case_ : Tuple = model_dic.get('num_bp3' ) snake_case_ : List[Any] = model_dic.get('rate_weight' ) snake_case_ : Any = model_dic.get('rate_thre' ) # create model instance snake_case_ : Any = CNN(_A , _A , _A , _A , _A , _A , _A ) # modify model parameter snake_case_ : Any = model_dic.get('w_conv1' ) snake_case_ : Optional[int] = model_dic.get('wkj' ) snake_case_ : Any = model_dic.get('vji' ) snake_case_ : List[Any] = model_dic.get('thre_conv1' ) snake_case_ : List[Any] = model_dic.get('thre_bp2' ) snake_case_ : Dict = model_dic.get('thre_bp3' ) return conv_ins def UpperCAmelCase_ ( self : List[Any] , _A : Union[str, Any] ) -> Optional[Any]: """simple docstring""" return 1 / (1 + np.exp(-1 * x )) def UpperCAmelCase_ ( self : Optional[int] , _A : List[Any] ) -> Dict: """simple docstring""" return round(_A , 3 ) def UpperCAmelCase_ ( self : Optional[int] , _A : Union[str, Any] , _A : Optional[Any] , _A : List[Any] , _A : List[Any] , _A : Tuple ) -> int: """simple docstring""" snake_case_ : int = convs[0] snake_case_ : Dict = convs[1] snake_case_ : Any = np.shape(_A )[0] # get the data slice of original image data, data_focus snake_case_ : Dict = [] for i_focus in range(0 , size_data - size_conv + 1 , _A ): for j_focus in range(0 , size_data - size_conv + 1 , _A ): snake_case_ : Tuple = data[ i_focus : i_focus + size_conv, j_focus : j_focus + size_conv ] data_focus.append(_A ) # calculate the feature map of every single kernel, and saved as list of matrix snake_case_ : str = [] snake_case_ : List[str] = int((size_data - size_conv) / conv_step + 1 ) for i_map in range(_A ): snake_case_ : Union[str, Any] = [] for i_focus in range(len(_A ) ): snake_case_ : List[Any] = ( np.sum(np.multiply(data_focus[i_focus] , w_convs[i_map] ) ) - thre_convs[i_map] ) featuremap.append(self.sig(_A ) ) snake_case_ : Any = np.asmatrix(_A ).reshape( _A , _A ) data_featuremap.append(_A ) # expanding the data slice to One dimenssion snake_case_ : str = [] for each_focus in data_focus: focusa_list.extend(self.Expand_Mat(_A ) ) snake_case_ : Optional[int] = np.asarray(_A ) return focus_list, data_featuremap def UpperCAmelCase_ ( self : Dict , _A : Tuple , _A : List[Any] , _A : Any="average_pool" ) -> Optional[Any]: """simple docstring""" snake_case_ : Any = len(featuremaps[0] ) snake_case_ : Dict = int(size_map / size_pooling ) snake_case_ : str = [] for i_map in range(len(_A ) ): snake_case_ : List[str] = featuremaps[i_map] snake_case_ : int = [] for i_focus in range(0 , _A , _A ): for j_focus in range(0 , _A , _A ): snake_case_ : Optional[Any] = feature_map[ i_focus : i_focus + size_pooling, j_focus : j_focus + size_pooling, ] if pooling_type == "average_pool": # average pooling map_pooled.append(np.average(_A ) ) elif pooling_type == "max_pooling": # max pooling map_pooled.append(np.max(_A ) ) snake_case_ : Tuple = np.asmatrix(_A ).reshape(_A , _A ) featuremap_pooled.append(_A ) return featuremap_pooled def UpperCAmelCase_ ( self : Tuple , _A : List[Any] ) -> List[Any]: """simple docstring""" snake_case_ : Any = [] for i in range(len(_A ) ): snake_case_ : Optional[int] = np.shape(data[i] ) snake_case_ : Union[str, Any] = data[i].reshape(1 , shapes[0] * shapes[1] ) snake_case_ : Union[str, Any] = data_listed.getA().tolist()[0] data_expanded.extend(_A ) snake_case_ : int = np.asarray(_A ) return data_expanded def UpperCAmelCase_ ( self : Union[str, Any] , _A : Optional[int] ) -> Tuple: """simple docstring""" snake_case_ : int = np.asarray(_A ) snake_case_ : Dict = np.shape(_A ) snake_case_ : Dict = data_mat.reshape(1 , shapes[0] * shapes[1] ) return data_expanded def UpperCAmelCase_ ( self : Dict , _A : Optional[Any] , _A : Union[str, Any] , _A : List[str] , _A : Tuple , _A : Dict ) -> Optional[int]: """simple docstring""" snake_case_ : str = [] snake_case_ : Dict = 0 for i_map in range(_A ): snake_case_ : List[Any] = np.ones((size_map, size_map) ) for i in range(0 , _A , _A ): for j in range(0 , _A , _A ): snake_case_ : Tuple = pd_pool[ i_pool ] snake_case_ : Tuple = i_pool + 1 snake_case_ : Any = np.multiply( _A , np.multiply(out_map[i_map] , (1 - out_map[i_map]) ) ) pd_all.append(_A ) return pd_all def UpperCAmelCase_ ( self : Optional[Any] , _A : Optional[Any] , _A : Optional[Any] , _A : str , _A : List[str] , _A : int , _A : Optional[int]=bool ) -> Union[str, Any]: """simple docstring""" print('----------------------Start Training-------------------------' ) print((' - - Shape: Train_Data ', np.shape(_A )) ) print((' - - Shape: Teach_Data ', np.shape(_A )) ) snake_case_ : Union[str, Any] = 0 snake_case_ : Optional[Any] = [] snake_case_ : Optional[Any] = 10000 while rp < n_repeat and mse >= error_accuracy: snake_case_ : Dict = 0 print(F"""-------------Learning Time {rp}--------------""" ) for p in range(len(_A ) ): # print('------------Learning Image: %d--------------'%p) snake_case_ : List[str] = np.asmatrix(datas_train[p] ) snake_case_ : Optional[int] = np.asarray(datas_teach[p] ) snake_case_ ,snake_case_ : List[str] = self.convolute( _A , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , ) snake_case_ : Union[str, Any] = self.pooling(_A , self.size_poolinga ) snake_case_ : str = np.shape(_A ) snake_case_ : Optional[int] = self._expand(_A ) snake_case_ : Dict = data_bp_input snake_case_ : Tuple = np.dot(_A , self.vji.T ) - self.thre_bpa snake_case_ : Union[str, Any] = self.sig(_A ) snake_case_ : List[str] = np.dot(_A , self.wkj.T ) - self.thre_bpa snake_case_ : Union[str, Any] = self.sig(_A ) # --------------Model Leaning ------------------------ # calculate error and gradient--------------- snake_case_ : List[str] = np.multiply( (data_teach - bp_outa) , np.multiply(_A , (1 - bp_outa) ) ) snake_case_ : List[Any] = np.multiply( np.dot(_A , self.wkj ) , np.multiply(_A , (1 - bp_outa) ) ) snake_case_ : int = np.dot(_A , self.vji ) snake_case_ : Any = pd_i_all / (self.size_poolinga * self.size_poolinga) snake_case_ : List[str] = pd_conva_pooled.T.getA().tolist() snake_case_ : int = self._calculate_gradient_from_pool( _A , _A , shape_featuremapa[0] , shape_featuremapa[1] , self.size_poolinga , ) # weight and threshold learning process--------- # convolution layer for k_conv in range(self.conva[1] ): snake_case_ : Any = self._expand_mat(pd_conva_all[k_conv] ) snake_case_ : Union[str, Any] = self.rate_weight * np.dot(_A , _A ) snake_case_ : int = self.w_conva[k_conv] + delta_w.reshape( (self.conva[0], self.conva[0]) ) snake_case_ : str = ( self.thre_conva[k_conv] - np.sum(pd_conva_all[k_conv] ) * self.rate_thre ) # all connected layer snake_case_ : Tuple = self.wkj + pd_k_all.T * bp_outa * self.rate_weight snake_case_ : Tuple = self.vji + pd_j_all.T * bp_outa * self.rate_weight snake_case_ : Tuple = self.thre_bpa - pd_k_all * self.rate_thre snake_case_ : Dict = self.thre_bpa - pd_j_all * self.rate_thre # calculate the sum error of all single image snake_case_ : Dict = np.sum(abs(data_teach - bp_outa ) ) error_count += errors # print(' ----Teach ',data_teach) # print(' ----BP_output ',bp_out3) snake_case_ : Optional[Any] = rp + 1 snake_case_ : List[Any] = error_count / patterns all_mse.append(_A ) def draw_error(): snake_case_ : List[Any] = [error_accuracy for i in range(int(n_repeat * 1.2 ) )] plt.plot(_A , '+-' ) plt.plot(_A , 'r--' ) plt.xlabel('Learning Times' ) plt.ylabel('All_mse' ) plt.grid(_A , alpha=0.5 ) plt.show() print('------------------Training Complished---------------------' ) print((' - - Training epoch: ', rp, F""" - - Mse: {mse:.6f}""") ) if draw_e: draw_error() return mse def UpperCAmelCase_ ( self : int , _A : str ) -> List[str]: """simple docstring""" snake_case_ : Optional[int] = [] print('-------------------Start Testing-------------------------' ) print((' - - Shape: Test_Data ', np.shape(_A )) ) for p in range(len(_A ) ): snake_case_ : str = np.asmatrix(datas_test[p] ) snake_case_ ,snake_case_ : Union[str, Any] = self.convolute( _A , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , ) snake_case_ : Optional[Any] = self.pooling(_A , self.size_poolinga ) snake_case_ : Union[str, Any] = self._expand(_A ) snake_case_ : int = data_bp_input snake_case_ : Any = bp_outa * self.vji.T - self.thre_bpa snake_case_ : Optional[Any] = self.sig(_A ) snake_case_ : Tuple = bp_outa * self.wkj.T - self.thre_bpa snake_case_ : List[str] = self.sig(_A ) produce_out.extend(bp_outa.getA().tolist() ) snake_case_ : Union[str, Any] = [list(map(self.do_round , _A ) ) for each in produce_out] return np.asarray(_A ) def UpperCAmelCase_ ( self : Any , _A : List[Any] ) -> Optional[int]: """simple docstring""" snake_case_ : int = np.asmatrix(_A ) snake_case_ ,snake_case_ : Any = self.convolute( _A , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , ) snake_case_ : List[str] = self.pooling(_A , self.size_poolinga ) return data_conveda, data_pooleda if __name__ == "__main__": pass
327
def SCREAMING_SNAKE_CASE__ ( __a ): if not isinstance(__a , __a ): snake_case_ : int = f"""Input value of [number={number}] must be an integer""" raise TypeError(__a ) if number < 0: return False snake_case_ : Dict = number * number while number > 0: if number % 10 != number_square % 10: return False number //= 10 number_square //= 10 return True if __name__ == "__main__": import doctest doctest.testmod()
327
1
# We ignore warnings about stepping the scheduler since we step it ourselves during gradient accumulation import warnings from .state import AcceleratorState, GradientState warnings.filterwarnings("""ignore""", category=UserWarning, module="""torch.optim.lr_scheduler""") class SCREAMING_SNAKE_CASE_ : def __init__( self : int , _A : List[str] , _A : int , _A : bool = True , _A : bool = False ) -> Any: """simple docstring""" snake_case_ : List[str] = scheduler snake_case_ : Dict = optimizers if isinstance(_A , (list, tuple) ) else [optimizers] snake_case_ : Optional[int] = split_batches snake_case_ : Union[str, Any] = step_with_optimizer snake_case_ : Any = GradientState() def UpperCAmelCase_ ( self : Any , *_A : int , **_A : Tuple ) -> List[Any]: """simple docstring""" if not self.step_with_optimizer: # No link between scheduler and optimizer -> just step self.scheduler.step(*_A , **_A ) return # Otherwise, first make sure the optimizer was stepped. if not self.gradient_state.sync_gradients: if self.gradient_state.adjust_scheduler: self.scheduler._step_count += 1 return for opt in self.optimizers: if opt.step_was_skipped: return if self.split_batches: # Split batches -> the training dataloader batch size is not changed so one step per training step self.scheduler.step(*_A , **_A ) else: # Otherwise the training dataloader batch size was multiplied by `num_processes`, so we need to do # num_processes steps per training step snake_case_ : int = AcceleratorState().num_processes for _ in range(_A ): # Special case when using OneCycle and `drop_last` was not used if hasattr(self.scheduler , 'total_steps' ): if self.scheduler._step_count <= self.scheduler.total_steps: self.scheduler.step(*_A , **_A ) else: self.scheduler.step(*_A , **_A ) def UpperCAmelCase_ ( self : int ) -> Any: """simple docstring""" return self.scheduler.get_last_lr() def UpperCAmelCase_ ( self : Optional[Any] ) -> Optional[int]: """simple docstring""" return self.scheduler.state_dict() def UpperCAmelCase_ ( self : Tuple , _A : Optional[Any] ) -> int: """simple docstring""" self.scheduler.load_state_dict(_A ) def UpperCAmelCase_ ( self : Optional[Any] ) -> int: """simple docstring""" return self.scheduler.get_lr() def UpperCAmelCase_ ( self : Optional[int] , *_A : List[Any] , **_A : int ) -> Any: """simple docstring""" return self.scheduler.print_lr(*_A , **_A )
327
from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _SCREAMING_SNAKE_CASE = { """configuration_autoformer""": [ """AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """AutoformerConfig""", ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _SCREAMING_SNAKE_CASE = [ """AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""", """AutoformerForPrediction""", """AutoformerModel""", """AutoformerPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_autoformer import ( AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, AutoformerConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_autoformer import ( AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, AutoformerForPrediction, AutoformerModel, AutoformerPreTrainedModel, ) else: import sys _SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
327
1
import torch from diffusers import DPMSolverSDEScheduler from diffusers.utils import torch_device from diffusers.utils.testing_utils import require_torchsde from .test_schedulers import SchedulerCommonTest @require_torchsde class SCREAMING_SNAKE_CASE_ ( snake_case_ ): __magic_name__: Optional[int] = (DPMSolverSDEScheduler,) __magic_name__: Optional[Any] = 10 def UpperCAmelCase_ ( self : Optional[int] , **_A : Any ) -> List[str]: """simple docstring""" snake_case_ : Dict = { 'num_train_timesteps': 1100, 'beta_start': 0.0_0_0_1, 'beta_end': 0.0_2, 'beta_schedule': 'linear', 'noise_sampler_seed': 0, } config.update(**_A ) return config def UpperCAmelCase_ ( self : Union[str, Any] ) -> int: """simple docstring""" for timesteps in [10, 50, 100, 1000]: self.check_over_configs(num_train_timesteps=_A ) def UpperCAmelCase_ ( self : str ) -> Dict: """simple docstring""" for beta_start, beta_end in zip([0.0_0_0_0_1, 0.0_0_0_1, 0.0_0_1] , [0.0_0_0_2, 0.0_0_2, 0.0_2] ): self.check_over_configs(beta_start=_A , beta_end=_A ) def UpperCAmelCase_ ( self : Dict ) -> List[Any]: """simple docstring""" for schedule in ["linear", "scaled_linear"]: self.check_over_configs(beta_schedule=_A ) def UpperCAmelCase_ ( self : Union[str, Any] ) -> List[str]: """simple docstring""" for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=_A ) def UpperCAmelCase_ ( self : List[Any] ) -> Optional[int]: """simple docstring""" snake_case_ : Optional[Any] = self.scheduler_classes[0] snake_case_ : Tuple = self.get_scheduler_config() snake_case_ : str = scheduler_class(**_A ) scheduler.set_timesteps(self.num_inference_steps ) snake_case_ : int = self.dummy_model() snake_case_ : List[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma snake_case_ : Dict = sample.to(_A ) for i, t in enumerate(scheduler.timesteps ): snake_case_ : List[str] = scheduler.scale_model_input(_A , _A ) snake_case_ : Dict = model(_A , _A ) snake_case_ : Optional[int] = scheduler.step(_A , _A , _A ) snake_case_ : List[str] = output.prev_sample snake_case_ : List[Any] = torch.sum(torch.abs(_A ) ) snake_case_ : List[Any] = torch.mean(torch.abs(_A ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 1_6_7.4_7_8_2_1_0_4_4_9_2_1_8_7_5 ) < 1E-2 assert abs(result_mean.item() - 0.2_1_7_8_7_0_5_9_6_4_5_6_5_2_7_7 ) < 1E-3 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 1_7_1.5_9_3_5_2_1_1_1_8_1_6_4_0_6 ) < 1E-2 assert abs(result_mean.item() - 0.2_2_3_4_2_9_0_6_8_9_2_2_9_9_6_5_2 ) < 1E-3 else: assert abs(result_sum.item() - 1_6_2.5_2_3_8_3_4_2_2_8_5_1_5_6_2 ) < 1E-2 assert abs(result_mean.item() - 0.2_1_1_6_1_9_5_7_0_8_5_1_3_2_6 ) < 1E-3 def UpperCAmelCase_ ( self : Tuple ) -> Dict: """simple docstring""" snake_case_ : Dict = self.scheduler_classes[0] snake_case_ : Optional[int] = self.get_scheduler_config(prediction_type='v_prediction' ) snake_case_ : List[str] = scheduler_class(**_A ) scheduler.set_timesteps(self.num_inference_steps ) snake_case_ : Tuple = self.dummy_model() snake_case_ : str = self.dummy_sample_deter * scheduler.init_noise_sigma snake_case_ : List[Any] = sample.to(_A ) for i, t in enumerate(scheduler.timesteps ): snake_case_ : Any = scheduler.scale_model_input(_A , _A ) snake_case_ : List[str] = model(_A , _A ) snake_case_ : Optional[Any] = scheduler.step(_A , _A , _A ) snake_case_ : Dict = output.prev_sample snake_case_ : int = torch.sum(torch.abs(_A ) ) snake_case_ : Tuple = torch.mean(torch.abs(_A ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 1_2_4.7_7_1_4_9_2_0_0_4_3_9_4_5_3 ) < 1E-2 assert abs(result_mean.item() - 0.1_6_2_2_6_2_8_9_0_1_4_8_1_6_2_8_4 ) < 1E-3 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 1_2_8.1_6_6_3_3_6_0_5_9_5_7_0_3 ) < 1E-2 assert abs(result_mean.item() - 0.1_6_6_8_8_3_2_6_0_0_1_1_6_7_2_9_7 ) < 1E-3 else: assert abs(result_sum.item() - 1_1_9.8_4_8_7_5_4_8_8_2_8_1_2_5 ) < 1E-2 assert abs(result_mean.item() - 0.1_5_6_0_5_3_0_6_6_2_5_3_6_6_2_1 ) < 1E-3 def UpperCAmelCase_ ( self : List[str] ) -> Optional[Any]: """simple docstring""" snake_case_ : Union[str, Any] = self.scheduler_classes[0] snake_case_ : str = self.get_scheduler_config() snake_case_ : Any = scheduler_class(**_A ) scheduler.set_timesteps(self.num_inference_steps , device=_A ) snake_case_ : List[str] = self.dummy_model() snake_case_ : List[str] = self.dummy_sample_deter.to(_A ) * scheduler.init_noise_sigma for t in scheduler.timesteps: snake_case_ : Optional[int] = scheduler.scale_model_input(_A , _A ) snake_case_ : Any = model(_A , _A ) snake_case_ : Dict = scheduler.step(_A , _A , _A ) snake_case_ : Any = output.prev_sample snake_case_ : Optional[int] = torch.sum(torch.abs(_A ) ) snake_case_ : Tuple = torch.mean(torch.abs(_A ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 1_6_7.4_6_9_5_7_3_9_7_4_6_0_9_3_8 ) < 1E-2 assert abs(result_mean.item() - 0.2_1_8_0_5_9_3_4_6_0_7_9_8_2_6_3_5 ) < 1E-3 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 1_7_1.5_9_3_5_3_6_3_7_6_9_5_3_1_2 ) < 1E-2 assert abs(result_mean.item() - 0.2_2_3_4_2_9_0_8_3_8_2_4_1_5_7_7_1 ) < 1E-3 else: assert abs(result_sum.item() - 1_6_2.5_2_3_8_3_4_2_2_8_5_1_5_6_2 ) < 1E-2 assert abs(result_mean.item() - 0.2_1_1_6_1_9_5_7_0_8_5_1_3_2_6 ) < 1E-3 def UpperCAmelCase_ ( self : int ) -> str: """simple docstring""" snake_case_ : int = self.scheduler_classes[0] snake_case_ : List[Any] = self.get_scheduler_config() snake_case_ : List[str] = scheduler_class(**_A , use_karras_sigmas=_A ) scheduler.set_timesteps(self.num_inference_steps , device=_A ) snake_case_ : Dict = self.dummy_model() snake_case_ : Optional[int] = self.dummy_sample_deter.to(_A ) * scheduler.init_noise_sigma snake_case_ : List[Any] = sample.to(_A ) for t in scheduler.timesteps: snake_case_ : Dict = scheduler.scale_model_input(_A , _A ) snake_case_ : List[str] = model(_A , _A ) snake_case_ : List[str] = scheduler.step(_A , _A , _A ) snake_case_ : Optional[int] = output.prev_sample snake_case_ : Optional[int] = torch.sum(torch.abs(_A ) ) snake_case_ : int = torch.mean(torch.abs(_A ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 1_7_6.6_6_9_7_4_1_3_5_7_4_2_1_8_8 ) < 1E-2 assert abs(result_mean.item() - 0.2_3_0_0_3_8_7_2_7_3_0_9_8_1_8_1_1 ) < 1E-2 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 1_7_7.6_3_6_5_3_5_6_4_4_5_3_1_2_5 ) < 1E-2 assert abs(result_mean.item() - 0.2_3_0_0_3_8_7_2_7_3_0_9_8_1_8_1_1 ) < 1E-2 else: assert abs(result_sum.item() - 1_7_0.3_1_3_5_2_2_3_3_8_8_6_7_2 ) < 1E-2 assert abs(result_mean.item() - 0.2_3_0_0_3_8_7_2_7_3_0_9_8_1_8_1_1 ) < 1E-2
327
from typing import Dict from .base import GenericTensor, Pipeline class SCREAMING_SNAKE_CASE_ ( snake_case_ ): def UpperCAmelCase_ ( self : str , _A : Optional[Any]=None , _A : List[str]=None , _A : Optional[Any]=None , **_A : List[str] ) -> Any: """simple docstring""" if tokenize_kwargs is None: snake_case_ : Optional[Any] = {} if truncation is not None: if "truncation" in tokenize_kwargs: raise ValueError( 'truncation parameter defined twice (given as keyword argument as well as in tokenize_kwargs)' ) snake_case_ : int = truncation snake_case_ : Optional[int] = tokenize_kwargs snake_case_ : Dict = {} if return_tensors is not None: snake_case_ : Union[str, Any] = return_tensors return preprocess_params, {}, postprocess_params def UpperCAmelCase_ ( self : Optional[int] , _A : int , **_A : Any ) -> Dict[str, GenericTensor]: """simple docstring""" snake_case_ : Dict = self.framework snake_case_ : Any = self.tokenizer(_A , return_tensors=_A , **_A ) return model_inputs def UpperCAmelCase_ ( self : Optional[Any] , _A : List[str] ) -> int: """simple docstring""" snake_case_ : Tuple = self.model(**_A ) return model_outputs def UpperCAmelCase_ ( self : Union[str, Any] , _A : str , _A : str=False ) -> Any: """simple docstring""" if return_tensors: return model_outputs[0] if self.framework == "pt": return model_outputs[0].tolist() elif self.framework == "tf": return model_outputs[0].numpy().tolist() def __call__( self : List[str] , *_A : Union[str, Any] , **_A : Tuple ) -> List[str]: """simple docstring""" return super().__call__(*_A , **_A )
327
1
from collections import OrderedDict from typing import TYPE_CHECKING, Any, List, Mapping, Optional from packaging import version if TYPE_CHECKING: from ... import PreTrainedTokenizer, TensorType from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfigWithPast, PatchingSpec from ...utils import is_torch_available, logging UpperCAmelCase__ = logging.get_logger(__name__) UpperCAmelCase__ = { "bigscience/bloom": "https://huggingface.co/bigscience/bloom/resolve/main/config.json", "bigscience/bloom-560m": "https://huggingface.co/bigscience/bloom-560m/blob/main/config.json", "bigscience/bloom-1b1": "https://huggingface.co/bigscience/bloom-1b1/blob/main/config.json", "bigscience/bloom-1b7": "https://huggingface.co/bigscience/bloom-1b7/blob/main/config.json", "bigscience/bloom-3b": "https://huggingface.co/bigscience/bloom-3b/blob/main/config.json", "bigscience/bloom-7b1": "https://huggingface.co/bigscience/bloom-7b1/blob/main/config.json", } class lowercase_ ( lowercase ): '''simple docstring''' __snake_case = '''bloom''' __snake_case = ['''past_key_values'''] __snake_case = { '''num_hidden_layers''': '''n_layer''', '''num_attention_heads''': '''n_head''', } def __init__( self : Optional[Any] , __UpperCAmelCase : int=250_880 , __UpperCAmelCase : Tuple=64 , __UpperCAmelCase : Optional[int]=2 , __UpperCAmelCase : str=8 , __UpperCAmelCase : Union[str, Any]=1e-5 , __UpperCAmelCase : Union[str, Any]=0.02 , __UpperCAmelCase : str=True , __UpperCAmelCase : List[str]=1 , __UpperCAmelCase : Any=2 , __UpperCAmelCase : List[Any]=False , __UpperCAmelCase : Union[str, Any]=0.0 , __UpperCAmelCase : int=0.0 , __UpperCAmelCase : str=1 , __UpperCAmelCase : Optional[Any]=False , **__UpperCAmelCase : Any , ) ->Any: """simple docstring""" a = vocab_size # Backward compatibility with n_embed kwarg a = kwargs.pop('''n_embed''' , __UpperCAmelCase ) a = hidden_size if n_embed is None else n_embed a = n_layer a = n_head a = layer_norm_epsilon a = initializer_range a = use_cache a = pretraining_tp a = apply_residual_connection_post_layernorm a = hidden_dropout a = attention_dropout a = bos_token_id a = eos_token_id a = slow_but_exact super().__init__(bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , **__UpperCAmelCase ) class lowercase_ ( lowercase ): '''simple docstring''' __snake_case = version.parse('''1.12''' ) def __init__( self : Optional[int] , __UpperCAmelCase : PretrainedConfig , __UpperCAmelCase : str = "default" , __UpperCAmelCase : List[PatchingSpec] = None , __UpperCAmelCase : bool = False , ) ->List[str]: """simple docstring""" super().__init__(__UpperCAmelCase , task=__UpperCAmelCase , patching_specs=__UpperCAmelCase , use_past=__UpperCAmelCase ) if not getattr(self._config , '''pad_token_id''' , __UpperCAmelCase ): # TODO: how to do that better? a = 0 @property def __lowerCAmelCase ( self : Dict ) ->Mapping[str, Mapping[int, str]]: """simple docstring""" a = OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}} ) if self.use_past: # BLOOM stores values on dynamic axis 2. For more details see: https://github.com/huggingface/transformers/pull/18344 self.fill_with_past_key_values_(__UpperCAmelCase , direction='''inputs''' , inverted_values_shape=__UpperCAmelCase ) a = {0: '''batch''', 1: '''past_sequence + sequence'''} else: a = {0: '''batch''', 1: '''sequence'''} return common_inputs @property def __lowerCAmelCase ( self : Any ) ->int: """simple docstring""" return self._config.n_layer @property def __lowerCAmelCase ( self : int ) ->int: """simple docstring""" return self._config.n_head @property def __lowerCAmelCase ( self : Any ) ->float: """simple docstring""" return 1e-3 def __lowerCAmelCase ( self : List[str] , __UpperCAmelCase : "PreTrainedTokenizer" , __UpperCAmelCase : int = -1 , __UpperCAmelCase : int = -1 , __UpperCAmelCase : bool = False , __UpperCAmelCase : Optional["TensorType"] = None , ) ->Mapping[str, Any]: """simple docstring""" a = super(__UpperCAmelCase , self ).generate_dummy_inputs( __UpperCAmelCase , batch_size=__UpperCAmelCase , seq_length=__UpperCAmelCase , is_pair=__UpperCAmelCase , framework=__UpperCAmelCase ) # We need to order the input in the way they appears in the forward() a = OrderedDict({'''input_ids''': common_inputs['''input_ids''']} ) # Need to add the past_keys if self.use_past: if not is_torch_available(): raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' ) else: import torch a , a = common_inputs['''input_ids'''].shape # Not using the same length for past_key_values a = seqlen + 2 a = self._config.hidden_size // self.num_attention_heads a = ( batch * self.num_attention_heads, head_dim, past_key_values_length, ) a = ( batch * self.num_attention_heads, past_key_values_length, head_dim, ) a = [ (torch.zeros(__UpperCAmelCase ), torch.zeros(__UpperCAmelCase )) for _ in range(self.num_layers ) ] a = common_inputs['''attention_mask'''] if self.use_past: a = ordered_inputs['''attention_mask'''].dtype a = torch.cat( [ordered_inputs['''attention_mask'''], torch.ones(__UpperCAmelCase , __UpperCAmelCase , dtype=__UpperCAmelCase )] , dim=1 ) return ordered_inputs @property def __lowerCAmelCase ( self : Tuple ) ->int: """simple docstring""" return 13
0
from itertools import permutations def SCREAMING_SNAKE_CASE__ ( __a ): if num[3] % 2 != 0: return False if (num[2] + num[3] + num[4]) % 3 != 0: return False if num[5] % 5 != 0: return False snake_case_ : Any = [7, 11, 13, 17] for i, test in enumerate(__a ): if (num[i + 4] * 1_00 + num[i + 5] * 10 + num[i + 6]) % test != 0: return False return True def SCREAMING_SNAKE_CASE__ ( __a = 10 ): return sum( int(''.join(map(__a , __a ) ) ) for num in permutations(range(__a ) ) if is_substring_divisible(__a ) ) if __name__ == "__main__": print(F'''{solution() = }''')
327
0