code
stringlengths
82
54.1k
code_codestyle
int64
0
699
style_context
stringlengths
111
35.6k
style_context_codestyle
int64
0
699
label
int64
0
1
import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase__ : Tuple =logging.get_logger(__name__) lowerCAmelCase__ : Optional[int] ={ 'microsoft/git-base': 'https://huggingface.co/microsoft/git-base/resolve/main/config.json', } class __lowercase (__SCREAMING_SNAKE_CASE ): """simple docstring""" _UpperCAmelCase = """git_vision_model""" def __init__( self , lowerCAmelCase__=7_6_8 , lowerCAmelCase__=3_0_7_2 , lowerCAmelCase__=1_2 , lowerCAmelCase__=1_2 , lowerCAmelCase__=3 , lowerCAmelCase__=2_2_4 , lowerCAmelCase__=1_6 , lowerCAmelCase__="quick_gelu" , lowerCAmelCase__=1E-5 , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.02 , **lowerCAmelCase__ , ): """simple docstring""" super().__init__(**lowerCAmelCase__ ) SCREAMING_SNAKE_CASE_ : Optional[int] = hidden_size SCREAMING_SNAKE_CASE_ : Optional[int] = intermediate_size SCREAMING_SNAKE_CASE_ : Any = num_hidden_layers SCREAMING_SNAKE_CASE_ : Dict = num_attention_heads SCREAMING_SNAKE_CASE_ : Tuple = num_channels SCREAMING_SNAKE_CASE_ : Tuple = patch_size SCREAMING_SNAKE_CASE_ : Any = image_size SCREAMING_SNAKE_CASE_ : Dict = initializer_range SCREAMING_SNAKE_CASE_ : List[str] = attention_dropout SCREAMING_SNAKE_CASE_ : Any = layer_norm_eps SCREAMING_SNAKE_CASE_ : Union[str, Any] = hidden_act @classmethod def UpperCamelCase__ ( cls , lowerCAmelCase__ , **lowerCAmelCase__ ): """simple docstring""" cls._set_token_in_kwargs(lowerCAmelCase__ ) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[str] = cls.get_config_dict(lowerCAmelCase__ , **lowerCAmelCase__ ) # get the vision config dict if we are loading from GITConfig if config_dict.get('model_type' ) == "git": SCREAMING_SNAKE_CASE_ : Union[str, Any] = config_dict['vision_config'] if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type: logger.warning( F'''You are using a model of type {config_dict['model_type']} to instantiate a model of type ''' F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' ) return cls.from_dict(lowerCAmelCase__ , **lowerCAmelCase__ ) class __lowercase (__SCREAMING_SNAKE_CASE ): """simple docstring""" _UpperCAmelCase = """git""" def __init__( self , lowerCAmelCase__=None , lowerCAmelCase__=3_0_5_2_2 , lowerCAmelCase__=7_6_8 , lowerCAmelCase__=6 , lowerCAmelCase__=1_2 , lowerCAmelCase__=3_0_7_2 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=1_0_2_4 , lowerCAmelCase__=0.02 , lowerCAmelCase__=1E-12 , lowerCAmelCase__=0 , lowerCAmelCase__="absolute" , lowerCAmelCase__=True , lowerCAmelCase__=False , lowerCAmelCase__=1_0_1 , lowerCAmelCase__=1_0_2 , lowerCAmelCase__=None , **lowerCAmelCase__ , ): """simple docstring""" super().__init__(bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , pad_token_id=lowerCAmelCase__ , **lowerCAmelCase__ ) if vision_config is None: SCREAMING_SNAKE_CASE_ : Dict = {} logger.info('vision_config is None. initializing the GitVisionConfig with default values.' ) SCREAMING_SNAKE_CASE_ : str = GitVisionConfig(**lowerCAmelCase__ ) SCREAMING_SNAKE_CASE_ : Dict = vocab_size SCREAMING_SNAKE_CASE_ : Optional[Any] = hidden_size SCREAMING_SNAKE_CASE_ : Tuple = num_hidden_layers SCREAMING_SNAKE_CASE_ : Any = num_attention_heads SCREAMING_SNAKE_CASE_ : Union[str, Any] = hidden_act SCREAMING_SNAKE_CASE_ : Any = intermediate_size SCREAMING_SNAKE_CASE_ : Tuple = hidden_dropout_prob SCREAMING_SNAKE_CASE_ : str = attention_probs_dropout_prob SCREAMING_SNAKE_CASE_ : Union[str, Any] = max_position_embeddings SCREAMING_SNAKE_CASE_ : int = initializer_range SCREAMING_SNAKE_CASE_ : int = layer_norm_eps SCREAMING_SNAKE_CASE_ : List[str] = position_embedding_type SCREAMING_SNAKE_CASE_ : List[str] = use_cache SCREAMING_SNAKE_CASE_ : Any = tie_word_embeddings SCREAMING_SNAKE_CASE_ : Dict = num_image_with_embedding SCREAMING_SNAKE_CASE_ : List[str] = bos_token_id SCREAMING_SNAKE_CASE_ : str = eos_token_id def UpperCamelCase__ ( self ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Union[str, Any] = copy.deepcopy(self.__dict__ ) SCREAMING_SNAKE_CASE_ : str = self.vision_config.to_dict() SCREAMING_SNAKE_CASE_ : Optional[int] = self.__class__.model_type return output
101
print((lambda quine: quine % quine)('''print((lambda quine: quine %% quine)(%r))'''))
47
0
"""simple docstring""" from itertools import permutations def UpperCamelCase (SCREAMING_SNAKE_CASE ): if num[3] % 2 != 0: return False if (num[2] + num[3] + num[4]) % 3 != 0: return False if num[5] % 5 != 0: return False UpperCamelCase : Optional[int] = [7, 11, 13, 17] for i, test in enumerate(SCREAMING_SNAKE_CASE ): if (num[i + 4] * 100 + num[i + 5] * 10 + num[i + 6]) % test != 0: return False return True def UpperCamelCase (SCREAMING_SNAKE_CASE = 10 ): return sum( int("""""".join(map(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) ) ) for num in permutations(range(SCREAMING_SNAKE_CASE ) ) if is_substring_divisible(SCREAMING_SNAKE_CASE ) ) if __name__ == "__main__": print(f'''{solution() = }''')
102
import json import os import shutil import tempfile from unittest import TestCase from transformers import BartTokenizer, BartTokenizerFast, DPRQuestionEncoderTokenizer, DPRQuestionEncoderTokenizerFast from transformers.models.bart.configuration_bart import BartConfig from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES from transformers.models.dpr.configuration_dpr import DPRConfig from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES from transformers.testing_utils import require_faiss, require_tokenizers, require_torch, slow from transformers.utils import is_datasets_available, is_faiss_available, is_torch_available if is_torch_available() and is_datasets_available() and is_faiss_available(): from transformers.models.rag.configuration_rag import RagConfig from transformers.models.rag.tokenization_rag import RagTokenizer @require_faiss @require_torch class _UpperCamelCase( __lowerCamelCase ): def __lowerCAmelCase ( self : List[Any] ): '''simple docstring''' __a : List[Any] = tempfile.mkdtemp() __a : int = 8 # DPR tok __a : Dict = [ '[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing', ',', 'low', 'lowest', ] __a : int = os.path.join(self.tmpdirname , 'dpr_tokenizer' ) os.makedirs(SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ ) __a : Dict = os.path.join(SCREAMING_SNAKE_CASE__ , DPR_VOCAB_FILES_NAMES['vocab_file'] ) with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer: vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) ) # BART tok __a : str = [ 'l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', '\u0120', '\u0120l', '\u0120n', '\u0120lo', '\u0120low', 'er', '\u0120lowest', '\u0120newer', '\u0120wider', '<unk>', ] __a : Optional[int] = dict(zip(SCREAMING_SNAKE_CASE__ , range(len(SCREAMING_SNAKE_CASE__ ) ) ) ) __a : List[str] = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', ''] __a : List[str] = {'unk_token': '<unk>'} __a : Dict = os.path.join(self.tmpdirname , 'bart_tokenizer' ) os.makedirs(SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ ) __a : List[Any] = os.path.join(SCREAMING_SNAKE_CASE__ , BART_VOCAB_FILES_NAMES['vocab_file'] ) __a : Dict = os.path.join(SCREAMING_SNAKE_CASE__ , BART_VOCAB_FILES_NAMES['merges_file'] ) with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp: fp.write(json.dumps(SCREAMING_SNAKE_CASE__ ) + '\n' ) with open(self.merges_file , 'w' , encoding='utf-8' ) as fp: fp.write('\n'.join(SCREAMING_SNAKE_CASE__ ) ) def __lowerCAmelCase ( self : Tuple ): '''simple docstring''' return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , 'dpr_tokenizer' ) ) def __lowerCAmelCase ( self : str ): '''simple docstring''' return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , 'bart_tokenizer' ) ) def __lowerCAmelCase ( self : Union[str, Any] ): '''simple docstring''' shutil.rmtree(self.tmpdirname ) @require_tokenizers def __lowerCAmelCase ( self : Dict ): '''simple docstring''' __a : Tuple = os.path.join(self.tmpdirname , 'rag_tokenizer' ) __a : Optional[Any] = RagConfig(question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() ) __a : Optional[Any] = RagTokenizer(question_encoder=self.get_dpr_tokenizer() , generator=self.get_bart_tokenizer() ) rag_config.save_pretrained(SCREAMING_SNAKE_CASE__ ) rag_tokenizer.save_pretrained(SCREAMING_SNAKE_CASE__ ) __a : List[Any] = RagTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ , config=SCREAMING_SNAKE_CASE__ ) self.assertIsInstance(new_rag_tokenizer.question_encoder , SCREAMING_SNAKE_CASE__ ) self.assertEqual(new_rag_tokenizer.question_encoder.get_vocab() , rag_tokenizer.question_encoder.get_vocab() ) self.assertIsInstance(new_rag_tokenizer.generator , SCREAMING_SNAKE_CASE__ ) self.assertEqual(new_rag_tokenizer.generator.get_vocab() , rag_tokenizer.generator.get_vocab() ) @slow def __lowerCAmelCase ( self : Optional[Any] ): '''simple docstring''' __a : Optional[Any] = RagTokenizer.from_pretrained('facebook/rag-token-nq' ) __a : List[Any] = [ 'who got the first nobel prize in physics', 'when is the next deadpool movie being released', 'which mode is used for short wave broadcast service', 'who is the owner of reading football club', 'when is the next scandal episode coming out', 'when is the last time the philadelphia won the superbowl', 'what is the most current adobe flash player version', 'how many episodes are there in dragon ball z', 'what is the first step in the evolution of the eye', 'where is gall bladder situated in human body', 'what is the main mineral in lithium batteries', 'who is the president of usa right now', 'where do the greasers live in the outsiders', 'panda is a national animal of which country', 'what is the name of manchester united stadium', ] __a : Tuple = tokenizer(SCREAMING_SNAKE_CASE__ ) self.assertIsNotNone(SCREAMING_SNAKE_CASE__ ) @slow def __lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' __a : Any = RagTokenizer.from_pretrained('facebook/rag-sequence-nq' ) __a : Union[str, Any] = [ 'who got the first nobel prize in physics', 'when is the next deadpool movie being released', 'which mode is used for short wave broadcast service', 'who is the owner of reading football club', 'when is the next scandal episode coming out', 'when is the last time the philadelphia won the superbowl', 'what is the most current adobe flash player version', 'how many episodes are there in dragon ball z', 'what is the first step in the evolution of the eye', 'where is gall bladder situated in human body', 'what is the main mineral in lithium batteries', 'who is the president of usa right now', 'where do the greasers live in the outsiders', 'panda is a national animal of which country', 'what is the name of manchester united stadium', ] __a : str = tokenizer(SCREAMING_SNAKE_CASE__ ) self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
47
0
"""simple docstring""" from typing import Dict, List, Optional, Union import numpy as np from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin from .utils import PaddingStrategy, TensorType, is_tf_tensor, is_torch_tensor, logging, to_numpy snake_case = logging.get_logger(__name__) class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ): def __init__( self : str , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : float , **__lowerCamelCase : str ): """simple docstring""" _snake_case = feature_size _snake_case = sampling_rate _snake_case = padding_value _snake_case = kwargs.pop('''padding_side''' , '''right''' ) _snake_case = kwargs.pop('''return_attention_mask''' , __lowerCamelCase ) super().__init__(**__lowerCamelCase ) def __UpperCAmelCase ( self : Dict , __lowerCamelCase : Union[ BatchFeature, List[BatchFeature], Dict[str, BatchFeature], Dict[str, List[BatchFeature]], List[Dict[str, BatchFeature]], ] , __lowerCamelCase : Union[bool, str, PaddingStrategy] = True , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : bool = False , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : Optional[bool] = None , __lowerCamelCase : Optional[Union[str, TensorType]] = None , ): """simple docstring""" # If we have a list of dicts, let's convert it in a dict of lists # We do this to allow using this method as a collate_fn function in PyTorch Dataloader if isinstance(__lowerCamelCase , (list, tuple) ) and isinstance(processed_features[0] , (dict, BatchFeature) ): _snake_case = { key: [example[key] for example in processed_features] for key in processed_features[0].keys() } # The model's main input name, usually `input_values`, has be passed for padding if self.model_input_names[0] not in processed_features: raise ValueError( '''You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature`''' f""" to this method that includes {self.model_input_names[0]}, but you provided""" f""" {list(processed_features.keys() )}""" ) _snake_case = processed_features[self.model_input_names[0]] _snake_case = ( return_attention_mask if return_attention_mask is not None else self.return_attention_mask ) if len(__lowerCamelCase ) == 0: if return_attention_mask: _snake_case = [] return processed_features # If we have PyTorch/TF tensors or lists as inputs, we cast them as Numpy arrays # and rebuild them afterwards if no return_tensors is specified # Note that we lose the specific device the tensor may be on for PyTorch _snake_case = required_input[0] if isinstance(__lowerCamelCase , (list, tuple) ): # first_element might be an empty list/tuple in some edge cases so we grab the first non empty element. _snake_case = 0 while len(required_input[index] ) == 0: index += 1 if index < len(__lowerCamelCase ): _snake_case = required_input[index][0] if return_tensors is None: if is_tf_tensor(__lowerCamelCase ): _snake_case = '''tf''' elif is_torch_tensor(__lowerCamelCase ): _snake_case = '''pt''' elif isinstance(__lowerCamelCase , (int, float, list, tuple, np.ndarray) ): _snake_case = '''np''' else: raise ValueError( f"""type of {first_element} unknown: {type(__lowerCamelCase )}. """ '''Should be one of a python, numpy, pytorch or tensorflow object.''' ) for key, value in processed_features.items(): if isinstance(value[0] , (int, float) ): _snake_case = to_numpy(__lowerCamelCase ) else: _snake_case = [to_numpy(__lowerCamelCase ) for v in value] # Convert padding_strategy in PaddingStrategy _snake_case = self._get_padding_strategies(padding=__lowerCamelCase , max_length=__lowerCamelCase ) _snake_case = processed_features[self.model_input_names[0]] _snake_case = len(__lowerCamelCase ) if not all(len(__lowerCamelCase ) == batch_size for v in processed_features.values() ): raise ValueError('''Some items in the output dictionary have a different batch size than others.''' ) _snake_case = [] for i in range(__lowerCamelCase ): _snake_case = {k: v[i] for k, v in processed_features.items()} # truncation _snake_case = self._truncate( __lowerCamelCase , max_length=__lowerCamelCase , pad_to_multiple_of=__lowerCamelCase , truncation=__lowerCamelCase , ) truncated_inputs.append(__lowerCamelCase ) if padding_strategy == PaddingStrategy.LONGEST: # make sure that `max_length` cannot be longer than the longest truncated length _snake_case = max(len(input_slice[self.model_input_names[0]] ) for input_slice in truncated_inputs ) _snake_case = PaddingStrategy.MAX_LENGTH _snake_case = {} for i in range(__lowerCamelCase ): # padding _snake_case = self._pad( truncated_inputs[i] , max_length=__lowerCamelCase , padding_strategy=__lowerCamelCase , pad_to_multiple_of=__lowerCamelCase , return_attention_mask=__lowerCamelCase , ) for key, value in outputs.items(): if key not in batch_outputs: _snake_case = [] if value.dtype is np.dtype(np.floataa ): _snake_case = value.astype(np.floataa ) batch_outputs[key].append(__lowerCamelCase ) return BatchFeature(__lowerCamelCase , tensor_type=__lowerCamelCase ) def __UpperCAmelCase ( self : int , __lowerCamelCase : Union[Dict[str, np.ndarray], BatchFeature] , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : Optional[bool] = None , ): """simple docstring""" _snake_case = processed_features[self.model_input_names[0]] if padding_strategy == PaddingStrategy.LONGEST: _snake_case = len(__lowerCamelCase ) if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0): _snake_case = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of _snake_case = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(__lowerCamelCase ) < max_length if return_attention_mask and "attention_mask" not in processed_features: _snake_case = np.ones(len(__lowerCamelCase ) , dtype=np.intaa ) if needs_to_be_padded: _snake_case = max_length - len(__lowerCamelCase ) if self.padding_side == "right": if return_attention_mask: _snake_case = np.pad( processed_features['''attention_mask'''] , (0, difference) ) _snake_case = ((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference) _snake_case = np.pad( __lowerCamelCase , __lowerCamelCase , '''constant''' , constant_values=self.padding_value ) elif self.padding_side == "left": if return_attention_mask: _snake_case = np.pad( processed_features['''attention_mask'''] , (difference, 0) ) _snake_case = ((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0) _snake_case = np.pad( __lowerCamelCase , __lowerCamelCase , '''constant''' , constant_values=self.padding_value ) else: raise ValueError('''Invalid padding strategy:''' + str(self.padding_side ) ) return processed_features def __UpperCAmelCase ( self : Any , __lowerCamelCase : Union[Dict[str, np.ndarray], BatchFeature] , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : Optional[bool] = None , ): """simple docstring""" if not truncation: return processed_features elif truncation and max_length is None: raise ValueError('''When setting ``truncation=True``, make sure that ``max_length`` is defined.''' ) _snake_case = processed_features[self.model_input_names[0]] # find `max_length` that fits `pad_to_multiple_of` if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0): _snake_case = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of _snake_case = len(__lowerCamelCase ) > max_length if needs_to_be_truncated: _snake_case = processed_features[self.model_input_names[0]][:max_length] if "attention_mask" in processed_features: _snake_case = processed_features['''attention_mask'''][:max_length] return processed_features def __UpperCAmelCase ( self : List[Any] , __lowerCamelCase : str=False , __lowerCamelCase : Any=None ): """simple docstring""" # Get padding strategy if padding is not False: if padding is True: _snake_case = PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch elif not isinstance(__lowerCamelCase , __lowerCamelCase ): _snake_case = PaddingStrategy(__lowerCamelCase ) elif isinstance(__lowerCamelCase , __lowerCamelCase ): _snake_case = padding else: _snake_case = PaddingStrategy.DO_NOT_PAD # Set max length if needed if max_length is None: if padding_strategy == PaddingStrategy.MAX_LENGTH: raise ValueError( f"""When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined""" ) # Test if we have a padding value if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.padding_value is None): raise ValueError( '''Asking to pad but the feature_extractor does not have a padding value. Please select a value to use''' ''' as `padding_value`. For example: `feature_extractor.padding_value = 0.0`.''' ) return padding_strategy
103
import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ = {'''vocab_file''': '''spiece.model'''} SCREAMING_SNAKE_CASE__ = { '''vocab_file''': { '''bert_for_seq_generation''': ( '''https://huggingface.co/google/bert_for_seq_generation_L-24_bbc_encoder/resolve/main/spiece.model''' ), } } SCREAMING_SNAKE_CASE__ = {'''bert_for_seq_generation''': 512} class _UpperCamelCase( __lowerCamelCase ): __SCREAMING_SNAKE_CASE : int = VOCAB_FILES_NAMES __SCREAMING_SNAKE_CASE : List[str] = PRETRAINED_VOCAB_FILES_MAP __SCREAMING_SNAKE_CASE : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __SCREAMING_SNAKE_CASE : List[int] = [] __SCREAMING_SNAKE_CASE : int = ['''input_ids''', '''attention_mask'''] def __init__( self : str , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : List[Any]="<s>" , SCREAMING_SNAKE_CASE__ : Tuple="</s>" , SCREAMING_SNAKE_CASE__ : Any="<unk>" , SCREAMING_SNAKE_CASE__ : int="<pad>" , SCREAMING_SNAKE_CASE__ : List[str]="<::::>" , SCREAMING_SNAKE_CASE__ : Optional[Dict[str, Any]] = None , **SCREAMING_SNAKE_CASE__ : Tuple , ): '''simple docstring''' __a : Tuple = {} if sp_model_kwargs is None else sp_model_kwargs # Add extra_ids to the special token list super().__init__( bos_token=SCREAMING_SNAKE_CASE__ , eos_token=SCREAMING_SNAKE_CASE__ , unk_token=SCREAMING_SNAKE_CASE__ , pad_token=SCREAMING_SNAKE_CASE__ , sep_token=SCREAMING_SNAKE_CASE__ , sp_model_kwargs=self.sp_model_kwargs , **SCREAMING_SNAKE_CASE__ , ) __a : int = vocab_file __a : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(SCREAMING_SNAKE_CASE__ ) @property def __lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' return self.sp_model.get_piece_size() def __lowerCAmelCase ( self : int ): '''simple docstring''' __a : Dict = {self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self : Optional[Any] ): '''simple docstring''' __a : Union[str, Any] = self.__dict__.copy() __a : Any = None return state def __setstate__( self : int , SCREAMING_SNAKE_CASE__ : List[str] ): '''simple docstring''' __a : str = d # for backward compatibility if not hasattr(self , 'sp_model_kwargs' ): __a : str = {} __a : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def __lowerCAmelCase ( self : Dict , SCREAMING_SNAKE_CASE__ : str ): '''simple docstring''' return self.sp_model.encode(SCREAMING_SNAKE_CASE__ , out_type=SCREAMING_SNAKE_CASE__ ) def __lowerCAmelCase ( self : str , SCREAMING_SNAKE_CASE__ : Optional[int] ): '''simple docstring''' return self.sp_model.piece_to_id(SCREAMING_SNAKE_CASE__ ) def __lowerCAmelCase ( self : int , SCREAMING_SNAKE_CASE__ : str ): '''simple docstring''' __a : int = self.sp_model.IdToPiece(SCREAMING_SNAKE_CASE__ ) return token def __lowerCAmelCase ( self : Tuple , SCREAMING_SNAKE_CASE__ : Optional[int] ): '''simple docstring''' __a : Optional[Any] = [] __a : Optional[int] = '' for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: out_string += self.sp_model.decode(SCREAMING_SNAKE_CASE__ ) + token __a : Dict = [] else: current_sub_tokens.append(SCREAMING_SNAKE_CASE__ ) out_string += self.sp_model.decode(SCREAMING_SNAKE_CASE__ ) return out_string.strip() def __lowerCAmelCase ( self : List[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[str] = None ): '''simple docstring''' if not os.path.isdir(SCREAMING_SNAKE_CASE__ ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return __a : Tuple = os.path.join( SCREAMING_SNAKE_CASE__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE__ ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , SCREAMING_SNAKE_CASE__ ) elif not os.path.isfile(self.vocab_file ): with open(SCREAMING_SNAKE_CASE__ , 'wb' ) as fi: __a : List[str] = self.sp_model.serialized_model_proto() fi.write(SCREAMING_SNAKE_CASE__ ) return (out_vocab_file,)
47
0
"""simple docstring""" from typing import List, Optional, TypeVar from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets from .dataset_dict import DatasetDict, IterableDatasetDict from .info import DatasetInfo from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets from .splits import NamedSplit from .utils import logging from .utils.py_utils import Literal UpperCamelCase = logging.get_logger(__name__) UpperCamelCase = TypeVar("""DatasetType""", Dataset, IterableDataset) def _lowerCamelCase ( UpperCAmelCase_ : List[DatasetType], UpperCAmelCase_ : Optional[List[float]] = None, UpperCAmelCase_ : Optional[int] = None, UpperCAmelCase_ : Optional[DatasetInfo] = None, UpperCAmelCase_ : Optional[NamedSplit] = None, UpperCAmelCase_ : Literal["first_exhausted", "all_exhausted"] = "first_exhausted", ) -> DatasetType: """simple docstring""" from .arrow_dataset import Dataset from .iterable_dataset import IterableDataset if not datasets: raise ValueError("Unable to interleave an empty list of datasets." ) for i, dataset in enumerate(UpperCAmelCase_ ): if not isinstance(UpperCAmelCase_, (Dataset, IterableDataset) ): if isinstance(UpperCAmelCase_, (DatasetDict, IterableDatasetDict) ): if not dataset: raise ValueError( F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} """ "is an empty dataset dictionary." ) raise ValueError( F"""Dataset at position {i} has at least one split: {list(UpperCAmelCase_ )}\n""" F"""Please pick one to interleave with the other datasets, for example: dataset['{next(iter(UpperCAmelCase_ ) )}']""" ) raise ValueError( F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(UpperCAmelCase_ ).__name__}.""" ) if i == 0: A__ , A__ = ( (Dataset, IterableDataset) if isinstance(UpperCAmelCase_, UpperCAmelCase_ ) else (IterableDataset, Dataset) ) elif not isinstance(UpperCAmelCase_, UpperCAmelCase_ ): raise ValueError( F"""Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.""" ) if stopping_strategy not in ["first_exhausted", "all_exhausted"]: raise ValueError(F"""{stopping_strategy} is not supported. Please enter a valid stopping_strategy.""" ) if dataset_type is Dataset: return _interleave_map_style_datasets( UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, info=UpperCAmelCase_, split=UpperCAmelCase_, stopping_strategy=UpperCAmelCase_ ) else: return _interleave_iterable_datasets( UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, info=UpperCAmelCase_, split=UpperCAmelCase_, stopping_strategy=UpperCAmelCase_ ) def _lowerCamelCase ( UpperCAmelCase_ : List[DatasetType], UpperCAmelCase_ : Optional[DatasetInfo] = None, UpperCAmelCase_ : Optional[NamedSplit] = None, UpperCAmelCase_ : int = 0, ) -> DatasetType: """simple docstring""" if not dsets: raise ValueError("Unable to concatenate an empty list of datasets." ) for i, dataset in enumerate(UpperCAmelCase_ ): if not isinstance(UpperCAmelCase_, (Dataset, IterableDataset) ): if isinstance(UpperCAmelCase_, (DatasetDict, IterableDatasetDict) ): if not dataset: raise ValueError( F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} """ "is an empty dataset dictionary." ) raise ValueError( F"""Dataset at position {i} has at least one split: {list(UpperCAmelCase_ )}\n""" F"""Please pick one to interleave with the other datasets, for example: dataset['{next(iter(UpperCAmelCase_ ) )}']""" ) raise ValueError( F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(UpperCAmelCase_ ).__name__}.""" ) if i == 0: A__ , A__ = ( (Dataset, IterableDataset) if isinstance(UpperCAmelCase_, UpperCAmelCase_ ) else (IterableDataset, Dataset) ) elif not isinstance(UpperCAmelCase_, UpperCAmelCase_ ): raise ValueError( F"""Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.""" ) if dataset_type is Dataset: return _concatenate_map_style_datasets(UpperCAmelCase_, info=UpperCAmelCase_, split=UpperCAmelCase_, axis=UpperCAmelCase_ ) else: return _concatenate_iterable_datasets(UpperCAmelCase_, info=UpperCAmelCase_, split=UpperCAmelCase_, axis=UpperCAmelCase_ )
104
from ..utils import DummyObject, requires_backends class _UpperCamelCase( metaclass=__lowerCamelCase ): __SCREAMING_SNAKE_CASE : Optional[Any] = ['''torch''', '''transformers''', '''onnx'''] def __init__( self : Dict , *SCREAMING_SNAKE_CASE__ : Any , **SCREAMING_SNAKE_CASE__ : Any ): '''simple docstring''' requires_backends(self , ['torch', 'transformers', 'onnx'] ) @classmethod def __lowerCAmelCase ( cls : Tuple , *SCREAMING_SNAKE_CASE__ : Optional[Any] , **SCREAMING_SNAKE_CASE__ : Optional[int] ): '''simple docstring''' requires_backends(cls , ['torch', 'transformers', 'onnx'] ) @classmethod def __lowerCAmelCase ( cls : Any , *SCREAMING_SNAKE_CASE__ : Optional[Any] , **SCREAMING_SNAKE_CASE__ : Tuple ): '''simple docstring''' requires_backends(cls , ['torch', 'transformers', 'onnx'] ) class _UpperCamelCase( metaclass=__lowerCamelCase ): __SCREAMING_SNAKE_CASE : List[Any] = ['''torch''', '''transformers''', '''onnx'''] def __init__( self : Any , *SCREAMING_SNAKE_CASE__ : int , **SCREAMING_SNAKE_CASE__ : List[Any] ): '''simple docstring''' requires_backends(self , ['torch', 'transformers', 'onnx'] ) @classmethod def __lowerCAmelCase ( cls : List[str] , *SCREAMING_SNAKE_CASE__ : Tuple , **SCREAMING_SNAKE_CASE__ : int ): '''simple docstring''' requires_backends(cls , ['torch', 'transformers', 'onnx'] ) @classmethod def __lowerCAmelCase ( cls : str , *SCREAMING_SNAKE_CASE__ : int , **SCREAMING_SNAKE_CASE__ : List[Any] ): '''simple docstring''' requires_backends(cls , ['torch', 'transformers', 'onnx'] ) class _UpperCamelCase( metaclass=__lowerCamelCase ): __SCREAMING_SNAKE_CASE : Any = ['''torch''', '''transformers''', '''onnx'''] def __init__( self : Tuple , *SCREAMING_SNAKE_CASE__ : List[str] , **SCREAMING_SNAKE_CASE__ : List[Any] ): '''simple docstring''' requires_backends(self , ['torch', 'transformers', 'onnx'] ) @classmethod def __lowerCAmelCase ( cls : str , *SCREAMING_SNAKE_CASE__ : Any , **SCREAMING_SNAKE_CASE__ : Union[str, Any] ): '''simple docstring''' requires_backends(cls , ['torch', 'transformers', 'onnx'] ) @classmethod def __lowerCAmelCase ( cls : Dict , *SCREAMING_SNAKE_CASE__ : List[Any] , **SCREAMING_SNAKE_CASE__ : int ): '''simple docstring''' requires_backends(cls , ['torch', 'transformers', 'onnx'] ) class _UpperCamelCase( metaclass=__lowerCamelCase ): __SCREAMING_SNAKE_CASE : Dict = ['''torch''', '''transformers''', '''onnx'''] def __init__( self : Any , *SCREAMING_SNAKE_CASE__ : Dict , **SCREAMING_SNAKE_CASE__ : int ): '''simple docstring''' requires_backends(self , ['torch', 'transformers', 'onnx'] ) @classmethod def __lowerCAmelCase ( cls : int , *SCREAMING_SNAKE_CASE__ : Tuple , **SCREAMING_SNAKE_CASE__ : Tuple ): '''simple docstring''' requires_backends(cls , ['torch', 'transformers', 'onnx'] ) @classmethod def __lowerCAmelCase ( cls : Any , *SCREAMING_SNAKE_CASE__ : int , **SCREAMING_SNAKE_CASE__ : str ): '''simple docstring''' requires_backends(cls , ['torch', 'transformers', 'onnx'] ) class _UpperCamelCase( metaclass=__lowerCamelCase ): __SCREAMING_SNAKE_CASE : int = ['''torch''', '''transformers''', '''onnx'''] def __init__( self : List[str] , *SCREAMING_SNAKE_CASE__ : Tuple , **SCREAMING_SNAKE_CASE__ : List[Any] ): '''simple docstring''' requires_backends(self , ['torch', 'transformers', 'onnx'] ) @classmethod def __lowerCAmelCase ( cls : int , *SCREAMING_SNAKE_CASE__ : Tuple , **SCREAMING_SNAKE_CASE__ : Union[str, Any] ): '''simple docstring''' requires_backends(cls , ['torch', 'transformers', 'onnx'] ) @classmethod def __lowerCAmelCase ( cls : Optional[Any] , *SCREAMING_SNAKE_CASE__ : List[Any] , **SCREAMING_SNAKE_CASE__ : int ): '''simple docstring''' requires_backends(cls , ['torch', 'transformers', 'onnx'] ) class _UpperCamelCase( metaclass=__lowerCamelCase ): __SCREAMING_SNAKE_CASE : Optional[int] = ['''torch''', '''transformers''', '''onnx'''] def __init__( self : Optional[int] , *SCREAMING_SNAKE_CASE__ : Dict , **SCREAMING_SNAKE_CASE__ : Any ): '''simple docstring''' requires_backends(self , ['torch', 'transformers', 'onnx'] ) @classmethod def __lowerCAmelCase ( cls : Optional[Any] , *SCREAMING_SNAKE_CASE__ : Tuple , **SCREAMING_SNAKE_CASE__ : Union[str, Any] ): '''simple docstring''' requires_backends(cls , ['torch', 'transformers', 'onnx'] ) @classmethod def __lowerCAmelCase ( cls : int , *SCREAMING_SNAKE_CASE__ : Optional[Any] , **SCREAMING_SNAKE_CASE__ : Tuple ): '''simple docstring''' requires_backends(cls , ['torch', 'transformers', 'onnx'] )
47
0
import flax.linen as nn import jax.numpy as jnp from .attention_flax import FlaxTransformeraDModel from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD class lowerCAmelCase_ ( nn.Module ): __a : int __a : int __a : float = 0.0 __a : int = 1 __a : int = 1 __a : bool = True __a : bool = False __a : bool = False __a : bool = False __a : jnp.dtype = jnp.floataa def snake_case ( self ): SCREAMING_SNAKE_CASE_ : str = [] SCREAMING_SNAKE_CASE_ : Dict = [] for i in range(self.num_layers ): SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.in_channels if i == 0 else self.out_channels SCREAMING_SNAKE_CASE_ : str = FlaxResnetBlockaD( in_channels=snake_case__ ,out_channels=self.out_channels ,dropout_prob=self.dropout ,dtype=self.dtype ,) resnets.append(snake_case__ ) SCREAMING_SNAKE_CASE_ : List[Any] = FlaxTransformeraDModel( in_channels=self.out_channels ,n_heads=self.num_attention_heads ,d_head=self.out_channels // self.num_attention_heads ,depth=1 ,use_linear_projection=self.use_linear_projection ,only_cross_attention=self.only_cross_attention ,use_memory_efficient_attention=self.use_memory_efficient_attention ,dtype=self.dtype ,) attentions.append(snake_case__ ) SCREAMING_SNAKE_CASE_ : List[Any] = resnets SCREAMING_SNAKE_CASE_ : List[Any] = attentions if self.add_downsample: SCREAMING_SNAKE_CASE_ : List[str] = FlaxDownsampleaD(self.out_channels ,dtype=self.dtype ) def __call__( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__=True ): SCREAMING_SNAKE_CASE_ : List[Any] = () for resnet, attn in zip(self.resnets ,self.attentions ): SCREAMING_SNAKE_CASE_ : Tuple = resnet(snake_case__ ,snake_case__ ,deterministic=snake_case__ ) SCREAMING_SNAKE_CASE_ : str = attn(snake_case__ ,snake_case__ ,deterministic=snake_case__ ) output_states += (hidden_states,) if self.add_downsample: SCREAMING_SNAKE_CASE_ : List[Any] = self.downsamplers_a(snake_case__ ) output_states += (hidden_states,) return hidden_states, output_states class lowerCAmelCase_ ( nn.Module ): __a : int __a : int __a : float = 0.0 __a : int = 1 __a : bool = True __a : jnp.dtype = jnp.floataa def snake_case ( self ): SCREAMING_SNAKE_CASE_ : Optional[int] = [] for i in range(self.num_layers ): SCREAMING_SNAKE_CASE_ : List[str] = self.in_channels if i == 0 else self.out_channels SCREAMING_SNAKE_CASE_ : List[str] = FlaxResnetBlockaD( in_channels=snake_case__ ,out_channels=self.out_channels ,dropout_prob=self.dropout ,dtype=self.dtype ,) resnets.append(snake_case__ ) SCREAMING_SNAKE_CASE_ : Dict = resnets if self.add_downsample: SCREAMING_SNAKE_CASE_ : Any = FlaxDownsampleaD(self.out_channels ,dtype=self.dtype ) def __call__( self ,snake_case__ ,snake_case__ ,snake_case__=True ): SCREAMING_SNAKE_CASE_ : List[Any] = () for resnet in self.resnets: SCREAMING_SNAKE_CASE_ : List[Any] = resnet(snake_case__ ,snake_case__ ,deterministic=snake_case__ ) output_states += (hidden_states,) if self.add_downsample: SCREAMING_SNAKE_CASE_ : List[str] = self.downsamplers_a(snake_case__ ) output_states += (hidden_states,) return hidden_states, output_states class lowerCAmelCase_ ( nn.Module ): __a : int __a : int __a : int __a : float = 0.0 __a : int = 1 __a : int = 1 __a : bool = True __a : bool = False __a : bool = False __a : bool = False __a : jnp.dtype = jnp.floataa def snake_case ( self ): SCREAMING_SNAKE_CASE_ : Any = [] SCREAMING_SNAKE_CASE_ : List[Any] = [] for i in range(self.num_layers ): SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.in_channels if (i == self.num_layers - 1) else self.out_channels SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.prev_output_channel if i == 0 else self.out_channels SCREAMING_SNAKE_CASE_ : Union[str, Any] = FlaxResnetBlockaD( in_channels=resnet_in_channels + res_skip_channels ,out_channels=self.out_channels ,dropout_prob=self.dropout ,dtype=self.dtype ,) resnets.append(snake_case__ ) SCREAMING_SNAKE_CASE_ : List[Any] = FlaxTransformeraDModel( in_channels=self.out_channels ,n_heads=self.num_attention_heads ,d_head=self.out_channels // self.num_attention_heads ,depth=1 ,use_linear_projection=self.use_linear_projection ,only_cross_attention=self.only_cross_attention ,use_memory_efficient_attention=self.use_memory_efficient_attention ,dtype=self.dtype ,) attentions.append(snake_case__ ) SCREAMING_SNAKE_CASE_ : str = resnets SCREAMING_SNAKE_CASE_ : Any = attentions if self.add_upsample: SCREAMING_SNAKE_CASE_ : List[str] = FlaxUpsampleaD(self.out_channels ,dtype=self.dtype ) def __call__( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__=True ): for resnet, attn in zip(self.resnets ,self.attentions ): # pop res hidden states SCREAMING_SNAKE_CASE_ : Union[str, Any] = res_hidden_states_tuple[-1] SCREAMING_SNAKE_CASE_ : Dict = res_hidden_states_tuple[:-1] SCREAMING_SNAKE_CASE_ : Any = jnp.concatenate((hidden_states, res_hidden_states) ,axis=-1 ) SCREAMING_SNAKE_CASE_ : Optional[int] = resnet(snake_case__ ,snake_case__ ,deterministic=snake_case__ ) SCREAMING_SNAKE_CASE_ : Dict = attn(snake_case__ ,snake_case__ ,deterministic=snake_case__ ) if self.add_upsample: SCREAMING_SNAKE_CASE_ : Tuple = self.upsamplers_a(snake_case__ ) return hidden_states class lowerCAmelCase_ ( nn.Module ): __a : int __a : int __a : int __a : float = 0.0 __a : int = 1 __a : bool = True __a : jnp.dtype = jnp.floataa def snake_case ( self ): SCREAMING_SNAKE_CASE_ : int = [] for i in range(self.num_layers ): SCREAMING_SNAKE_CASE_ : int = self.in_channels if (i == self.num_layers - 1) else self.out_channels SCREAMING_SNAKE_CASE_ : Any = self.prev_output_channel if i == 0 else self.out_channels SCREAMING_SNAKE_CASE_ : List[str] = FlaxResnetBlockaD( in_channels=resnet_in_channels + res_skip_channels ,out_channels=self.out_channels ,dropout_prob=self.dropout ,dtype=self.dtype ,) resnets.append(snake_case__ ) SCREAMING_SNAKE_CASE_ : List[Any] = resnets if self.add_upsample: SCREAMING_SNAKE_CASE_ : Optional[Any] = FlaxUpsampleaD(self.out_channels ,dtype=self.dtype ) def __call__( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__=True ): for resnet in self.resnets: # pop res hidden states SCREAMING_SNAKE_CASE_ : List[str] = res_hidden_states_tuple[-1] SCREAMING_SNAKE_CASE_ : str = res_hidden_states_tuple[:-1] SCREAMING_SNAKE_CASE_ : List[Any] = jnp.concatenate((hidden_states, res_hidden_states) ,axis=-1 ) SCREAMING_SNAKE_CASE_ : Union[str, Any] = resnet(snake_case__ ,snake_case__ ,deterministic=snake_case__ ) if self.add_upsample: SCREAMING_SNAKE_CASE_ : str = self.upsamplers_a(snake_case__ ) return hidden_states class lowerCAmelCase_ ( nn.Module ): __a : int __a : float = 0.0 __a : int = 1 __a : int = 1 __a : bool = False __a : bool = False __a : jnp.dtype = jnp.floataa def snake_case ( self ): # there is always at least one resnet SCREAMING_SNAKE_CASE_ : Any = [ FlaxResnetBlockaD( in_channels=self.in_channels ,out_channels=self.in_channels ,dropout_prob=self.dropout ,dtype=self.dtype ,) ] SCREAMING_SNAKE_CASE_ : Tuple = [] for _ in range(self.num_layers ): SCREAMING_SNAKE_CASE_ : Optional[int] = FlaxTransformeraDModel( in_channels=self.in_channels ,n_heads=self.num_attention_heads ,d_head=self.in_channels // self.num_attention_heads ,depth=1 ,use_linear_projection=self.use_linear_projection ,use_memory_efficient_attention=self.use_memory_efficient_attention ,dtype=self.dtype ,) attentions.append(snake_case__ ) SCREAMING_SNAKE_CASE_ : Union[str, Any] = FlaxResnetBlockaD( in_channels=self.in_channels ,out_channels=self.in_channels ,dropout_prob=self.dropout ,dtype=self.dtype ,) resnets.append(snake_case__ ) SCREAMING_SNAKE_CASE_ : Tuple = resnets SCREAMING_SNAKE_CASE_ : Union[str, Any] = attentions def __call__( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__=True ): SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.resnets[0](snake_case__ ,snake_case__ ) for attn, resnet in zip(self.attentions ,self.resnets[1:] ): SCREAMING_SNAKE_CASE_ : Tuple = attn(snake_case__ ,snake_case__ ,deterministic=snake_case__ ) SCREAMING_SNAKE_CASE_ : Optional[int] = resnet(snake_case__ ,snake_case__ ,deterministic=snake_case__ ) return hidden_states
105
import math from datetime import datetime, timedelta def UpperCAmelCase__ ( lowerCamelCase_ : int ): __a : Union[str, Any] = year % 1_9 __a : int = year % 4 __a : Optional[int] = year % 7 __a : Dict = math.floor(year / 1_0_0 ) __a : Optional[Any] = math.floor((1_3 + 8 * leap_day_inhibits) / 2_5 ) __a : Union[str, Any] = leap_day_inhibits / 4 __a : str = ( 1_5 - lunar_orbit_correction + leap_day_inhibits - leap_day_reinstall_number ) % 3_0 __a : Union[str, Any] = (4 + leap_day_inhibits - leap_day_reinstall_number) % 7 # days to be added to March 21 __a : List[Any] = (1_9 * metonic_cycle + secular_moon_shift) % 3_0 # PHM -> Paschal Full Moon __a : List[Any] = ( 2 * julian_leap_year + 4 * non_leap_year + 6 * days_to_add + century_starting_point ) % 7 if days_to_add == 2_9 and days_from_phm_to_sunday == 6: return datetime(lowerCamelCase_ , 4 , 1_9 ) elif days_to_add == 2_8 and days_from_phm_to_sunday == 6: return datetime(lowerCamelCase_ , 4 , 1_8 ) else: return datetime(lowerCamelCase_ , 3 , 2_2 ) + timedelta( days=int(days_to_add + days_from_phm_to_sunday ) ) if __name__ == "__main__": for year in (1994, 2000, 2010, 2021, 2023): SCREAMING_SNAKE_CASE__ = '''will be''' if year > datetime.now().year else '''was''' print(F"Easter in {year} {tense} {gauss_easter(year)}")
47
0
from __future__ import annotations def lowerCamelCase_ ( lowerCAmelCase__ : list[int] , lowerCAmelCase__ : int ) -> int: '''simple docstring''' if len(lowerCAmelCase__ ) < k or k < 0: raise ValueError('Invalid Input' ) A = A = sum(array[:k] ) for i in range(len(lowerCAmelCase__ ) - k ): A = current_sum - array[i] + array[i + k] A = max(lowerCAmelCase__ , lowerCAmelCase__ ) return max_sum if __name__ == "__main__": from doctest import testmod from random import randint testmod() __snake_case :Any =[randint(-1000, 1000) for i in range(100)] __snake_case :Tuple =randint(0, 110) print(F'''The maximum sum of {k} consecutive elements is {max_sum_in_array(array,k)}''')
106
from typing import List, Optional, Union from ...configuration_utils import PretrainedConfig from ...utils import logging SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ = { '''huggingface/informer-tourism-monthly''': ( '''https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json''' ), # See all Informer models at https://huggingface.co/models?filter=informer } class _UpperCamelCase( __lowerCamelCase ): __SCREAMING_SNAKE_CASE : List[Any] = '''informer''' __SCREAMING_SNAKE_CASE : List[Any] = { '''hidden_size''': '''d_model''', '''num_attention_heads''': '''encoder_attention_heads''', '''num_hidden_layers''': '''encoder_layers''', } def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : str = "student_t" , SCREAMING_SNAKE_CASE__ : str = "nll" , SCREAMING_SNAKE_CASE__ : int = 1 , SCREAMING_SNAKE_CASE__ : List[int] = None , SCREAMING_SNAKE_CASE__ : Optional[Union[str, bool]] = "mean" , SCREAMING_SNAKE_CASE__ : int = 0 , SCREAMING_SNAKE_CASE__ : int = 0 , SCREAMING_SNAKE_CASE__ : int = 0 , SCREAMING_SNAKE_CASE__ : int = 0 , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None , SCREAMING_SNAKE_CASE__ : int = 6_4 , SCREAMING_SNAKE_CASE__ : int = 3_2 , SCREAMING_SNAKE_CASE__ : int = 3_2 , SCREAMING_SNAKE_CASE__ : int = 2 , SCREAMING_SNAKE_CASE__ : int = 2 , SCREAMING_SNAKE_CASE__ : int = 2 , SCREAMING_SNAKE_CASE__ : int = 2 , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : str = "gelu" , SCREAMING_SNAKE_CASE__ : float = 0.05 , SCREAMING_SNAKE_CASE__ : float = 0.1 , SCREAMING_SNAKE_CASE__ : float = 0.1 , SCREAMING_SNAKE_CASE__ : float = 0.1 , SCREAMING_SNAKE_CASE__ : float = 0.1 , SCREAMING_SNAKE_CASE__ : int = 1_0_0 , SCREAMING_SNAKE_CASE__ : float = 0.02 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=True , SCREAMING_SNAKE_CASE__ : str = "prob" , SCREAMING_SNAKE_CASE__ : int = 5 , SCREAMING_SNAKE_CASE__ : bool = True , **SCREAMING_SNAKE_CASE__ : Tuple , ): '''simple docstring''' __a : Dict = prediction_length __a : Tuple = context_length or prediction_length __a : Tuple = distribution_output __a : Tuple = loss __a : str = input_size __a : Dict = num_time_features __a : Optional[int] = lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7] __a : str = scaling __a : Tuple = num_dynamic_real_features __a : int = num_static_real_features __a : Dict = num_static_categorical_features # set cardinality if cardinality and num_static_categorical_features > 0: if len(SCREAMING_SNAKE_CASE__ ) != num_static_categorical_features: raise ValueError( 'The cardinality should be a list of the same length as `num_static_categorical_features`' ) __a : Optional[Any] = cardinality else: __a : Optional[int] = [0] # set embedding_dimension if embedding_dimension and num_static_categorical_features > 0: if len(SCREAMING_SNAKE_CASE__ ) != num_static_categorical_features: raise ValueError( 'The embedding dimension should be a list of the same length as `num_static_categorical_features`' ) __a : int = embedding_dimension else: __a : List[Any] = [min(5_0 , (cat + 1) // 2 ) for cat in self.cardinality] __a : int = num_parallel_samples # Transformer architecture configuration __a : str = input_size * len(self.lags_sequence ) + self._number_of_features __a : Optional[int] = d_model __a : Union[str, Any] = encoder_attention_heads __a : int = decoder_attention_heads __a : Any = encoder_ffn_dim __a : Union[str, Any] = decoder_ffn_dim __a : List[Any] = encoder_layers __a : Optional[int] = decoder_layers __a : int = dropout __a : Optional[Any] = attention_dropout __a : Dict = activation_dropout __a : Union[str, Any] = encoder_layerdrop __a : Optional[int] = decoder_layerdrop __a : List[str] = activation_function __a : str = init_std __a : Optional[int] = use_cache # Informer __a : Union[str, Any] = attention_type __a : str = sampling_factor __a : Dict = distil super().__init__(is_encoder_decoder=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) @property def __lowerCAmelCase ( self : Any ): '''simple docstring''' return ( sum(self.embedding_dimension ) + self.num_dynamic_real_features + self.num_time_features + self.num_static_real_features + self.input_size * 2 # the log1p(abs(loc)) and log(scale) features )
47
0
'''simple docstring''' import collections import tempfile import unittest import numpy as np from transformers.testing_utils import ( is_pt_flax_cross_test, require_flax, require_torch, require_vision, slow, torch_device, ) from transformers.utils import is_flax_available, is_torch_available, is_vision_available from ...test_modeling_flax_common import floats_tensor, ids_tensor, random_attention_mask from ..bert.test_modeling_flax_bert import FlaxBertModelTester from ..clip.test_modeling_flax_clip import FlaxCLIPVisionModelTester from ..vit.test_modeling_flax_vit import FlaxViTModelTester if is_flax_available(): from transformers import ( FlaxBertModel, FlaxCLIPVisionModel, FlaxVisionTextDualEncoderModel, FlaxViTModel, VisionTextDualEncoderConfig, VisionTextDualEncoderProcessor, ) from transformers.modeling_flax_pytorch_utils import ( convert_pytorch_state_dict_to_flax, load_flax_weights_in_pytorch_model, ) if is_torch_available(): import torch from transformers import VisionTextDualEncoderModel if is_vision_available(): from PIL import Image def _SCREAMING_SNAKE_CASE ( __snake_case : Any ): if isinstance(__snake_case , collections.abc.Iterable ): return x return (x, x) @require_flax class lowercase_ : """simple docstring""" def __UpperCAmelCase ( self : Tuple, UpperCamelCase__ : Optional[Any], UpperCamelCase__ : List[Any] ) -> Dict: pass def __UpperCAmelCase ( self : Optional[int] ) -> Union[str, Any]: pass def __UpperCAmelCase ( self : Optional[int] ) -> List[Any]: pass def __UpperCAmelCase ( self : List[Any], UpperCamelCase__ : np.ndarray, UpperCamelCase__ : np.ndarray, UpperCamelCase__ : float ) -> Union[str, Any]: _A = np.abs((a - b) ).max() self.assertLessEqual(UpperCamelCase__, UpperCamelCase__, f'Difference between torch and flax is {diff} (>= {tol}).' ) def __UpperCAmelCase ( self : Tuple, UpperCamelCase__ : List[str], UpperCamelCase__ : Union[str, Any], UpperCamelCase__ : int, UpperCamelCase__ : Optional[Any], UpperCamelCase__ : Dict=None, **UpperCamelCase__ : Union[str, Any] ) -> List[str]: _A = VisionTextDualEncoderConfig.from_vision_text_configs(UpperCamelCase__, UpperCamelCase__ ) _A = FlaxVisionTextDualEncoderModel(UpperCamelCase__ ) _A = model(input_ids=UpperCamelCase__, pixel_values=UpperCamelCase__, attention_mask=UpperCamelCase__ ) self.assertEqual(output['text_embeds'].shape, (input_ids.shape[0], config.projection_dim) ) self.assertEqual(output['image_embeds'].shape, (pixel_values.shape[0], config.projection_dim) ) def __UpperCAmelCase ( self : Dict, UpperCamelCase__ : Any, UpperCamelCase__ : Any, UpperCamelCase__ : List[str], UpperCamelCase__ : Optional[Any], UpperCamelCase__ : Optional[Any]=None, **UpperCamelCase__ : Any ) -> Any: _A , _A = self.get_vision_text_model(UpperCamelCase__, UpperCamelCase__ ) _A = {'vision_model': vision_model, 'text_model': text_model} _A = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**UpperCamelCase__ ) _A = model(input_ids=UpperCamelCase__, pixel_values=UpperCamelCase__, attention_mask=UpperCamelCase__ ) self.assertEqual(output['text_embeds'].shape, (input_ids.shape[0], model.config.projection_dim) ) self.assertEqual(output['image_embeds'].shape, (pixel_values.shape[0], model.config.projection_dim) ) def __UpperCAmelCase ( self : Optional[Any], UpperCamelCase__ : List[str], UpperCamelCase__ : Optional[int], UpperCamelCase__ : int, UpperCamelCase__ : List[Any], UpperCamelCase__ : List[str]=None, **UpperCamelCase__ : Optional[int] ) -> Optional[Any]: _A , _A = self.get_vision_text_model(UpperCamelCase__, UpperCamelCase__ ) _A = {'vision_model': vision_model, 'text_model': text_model} _A = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**UpperCamelCase__ ) _A = model(input_ids=UpperCamelCase__, pixel_values=UpperCamelCase__, attention_mask=UpperCamelCase__ ) _A = output[0] with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(UpperCamelCase__ ) _A = FlaxVisionTextDualEncoderModel.from_pretrained(UpperCamelCase__ ) _A = model(input_ids=UpperCamelCase__, pixel_values=UpperCamelCase__, attention_mask=UpperCamelCase__ ) _A = after_output[0] _A = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(UpperCamelCase__, 1e-3 ) def __UpperCAmelCase ( self : Optional[Any], UpperCamelCase__ : Tuple, UpperCamelCase__ : Optional[Any], UpperCamelCase__ : Dict, UpperCamelCase__ : Union[str, Any], UpperCamelCase__ : Optional[int]=None, **UpperCamelCase__ : Optional[Any] ) -> Tuple: _A , _A = self.get_vision_text_model(UpperCamelCase__, UpperCamelCase__ ) _A = {'vision_model': vision_model, 'text_model': text_model} _A = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**UpperCamelCase__ ) _A = model( input_ids=UpperCamelCase__, pixel_values=UpperCamelCase__, attention_mask=UpperCamelCase__, output_attentions=UpperCamelCase__ ) _A = output.vision_model_output.attentions self.assertEqual(len(UpperCamelCase__ ), vision_config.num_hidden_layers ) # in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token) _A = to_atuple(vision_model.config.image_size ) _A = to_atuple(vision_model.config.patch_size ) _A = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) _A = num_patches + 1 self.assertEqual(vision_attentions[0].shape[-3:], (vision_config.num_attention_heads, seq_len, seq_len) ) _A = output.text_model_output.attentions self.assertEqual(len(UpperCamelCase__ ), text_config.num_hidden_layers ) self.assertEqual( text_attentions[0].shape[-3:], (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]), ) def __UpperCAmelCase ( self : Optional[int], UpperCamelCase__ : int, UpperCamelCase__ : Any, UpperCamelCase__ : int ) -> int: pt_model.to(UpperCamelCase__ ) pt_model.eval() # prepare inputs _A = inputs_dict _A = {k: torch.tensor(v.tolist() ) for k, v in flax_inputs.items()} with torch.no_grad(): _A = pt_model(**UpperCamelCase__ ).to_tuple() _A = fx_model(**UpperCamelCase__ ).to_tuple() self.assertEqual(len(UpperCamelCase__ ), len(UpperCamelCase__ ), 'Output lengths differ between Flax and PyTorch' ) for fx_output, pt_output in zip(fx_outputs[:4], pt_outputs[:4] ): self.assert_almost_equals(UpperCamelCase__, pt_output.numpy(), 4e-2 ) # PT -> Flax with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(UpperCamelCase__ ) _A = FlaxVisionTextDualEncoderModel.from_pretrained(UpperCamelCase__, from_pt=UpperCamelCase__ ) _A = fx_model_loaded(**UpperCamelCase__ ).to_tuple() self.assertEqual(len(UpperCamelCase__ ), len(UpperCamelCase__ ), 'Output lengths differ between Flax and PyTorch' ) for fx_output_loaded, pt_output in zip(fx_outputs_loaded[:4], pt_outputs[:4] ): self.assert_almost_equals(UpperCamelCase__, pt_output.numpy(), 4e-2 ) # Flax -> PT with tempfile.TemporaryDirectory() as tmpdirname: fx_model.save_pretrained(UpperCamelCase__ ) _A = VisionTextDualEncoderModel.from_pretrained(UpperCamelCase__, from_flax=UpperCamelCase__ ) pt_model_loaded.to(UpperCamelCase__ ) pt_model_loaded.eval() with torch.no_grad(): _A = pt_model_loaded(**UpperCamelCase__ ).to_tuple() self.assertEqual(len(UpperCamelCase__ ), len(UpperCamelCase__ ), 'Output lengths differ between Flax and PyTorch' ) for fx_output, pt_output_loaded in zip(fx_outputs[:4], pt_outputs_loaded[:4] ): self.assert_almost_equals(UpperCamelCase__, pt_output_loaded.numpy(), 4e-2 ) def __UpperCAmelCase ( self : List[str], UpperCamelCase__ : List[str], UpperCamelCase__ : Optional[Any], UpperCamelCase__ : Optional[int] ) -> List[str]: _A = VisionTextDualEncoderConfig.from_vision_text_configs(UpperCamelCase__, UpperCamelCase__ ) _A = VisionTextDualEncoderModel(UpperCamelCase__ ) _A = FlaxVisionTextDualEncoderModel(UpperCamelCase__ ) _A = convert_pytorch_state_dict_to_flax(pt_model.state_dict(), UpperCamelCase__ ) _A = fx_state self.check_pt_flax_equivalence(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ ) def __UpperCAmelCase ( self : Dict, UpperCamelCase__ : Any, UpperCamelCase__ : List[str], UpperCamelCase__ : Union[str, Any] ) -> List[str]: _A = VisionTextDualEncoderConfig.from_vision_text_configs(UpperCamelCase__, UpperCamelCase__ ) _A = VisionTextDualEncoderModel(UpperCamelCase__ ) _A = FlaxVisionTextDualEncoderModel(UpperCamelCase__ ) _A = load_flax_weights_in_pytorch_model(UpperCamelCase__, fx_model.params ) self.check_pt_flax_equivalence(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ ) def __UpperCAmelCase ( self : Optional[int] ) -> str: _A = self.prepare_config_and_inputs() self.check_model_from_pretrained_configs(**UpperCamelCase__ ) def __UpperCAmelCase ( self : str ) -> Dict: _A = self.prepare_config_and_inputs() self.check_vision_text_dual_encoder_from_pretrained(**UpperCamelCase__ ) def __UpperCAmelCase ( self : Dict ) -> List[str]: _A = self.prepare_config_and_inputs() self.check_save_load(**UpperCamelCase__ ) def __UpperCAmelCase ( self : int ) -> int: _A = self.prepare_config_and_inputs() self.check_vision_text_output_attention(**UpperCamelCase__ ) @is_pt_flax_cross_test def __UpperCAmelCase ( self : int ) -> Union[str, Any]: _A = self.prepare_config_and_inputs() _A = config_inputs_dict.pop('vision_config' ) _A = config_inputs_dict.pop('text_config' ) _A = config_inputs_dict self.check_equivalence_pt_to_flax(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ ) self.check_equivalence_flax_to_pt(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ ) @slow def __UpperCAmelCase ( self : Dict ) -> Dict: _A , _A = self.get_pretrained_model_and_inputs() _A = model_a(**UpperCamelCase__ ) _A = outputs[0] with tempfile.TemporaryDirectory() as tmp_dirname: model_a.save_pretrained(UpperCamelCase__ ) _A = FlaxVisionTextDualEncoderModel.from_pretrained(UpperCamelCase__ ) _A = model_a(**UpperCamelCase__ ) _A = after_outputs[0] _A = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(UpperCamelCase__, 1e-5 ) @require_flax class lowercase_ ( _UpperCamelCase , unittest.TestCase ): """simple docstring""" def __UpperCAmelCase ( self : int ) -> List[Any]: _A = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained( 'hf-internal-testing/tiny-random-vit', 'hf-internal-testing/tiny-bert', vision_from_pt=UpperCamelCase__, text_from_pt=UpperCamelCase__, ) _A = 13 _A = floats_tensor( [ batch_size, model.config.vision_config.num_channels, model.config.vision_config.image_size, model.config.vision_config.image_size, ] ) _A = ids_tensor([batch_size, 4], model.config.text_config.vocab_size ) _A = random_attention_mask([batch_size, 4] ) _A = {'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask} return model, inputs def __UpperCAmelCase ( self : List[str], UpperCamelCase__ : List[Any], UpperCamelCase__ : Dict ) -> int: _A = FlaxViTModel(UpperCamelCase__ ) _A = FlaxBertModel(UpperCamelCase__ ) return vision_model, text_model def __UpperCAmelCase ( self : Tuple ) -> List[Any]: _A = FlaxViTModelTester(self ) _A = FlaxBertModelTester(self ) _A = vit_model_tester.prepare_config_and_inputs() _A = bert_model_tester.prepare_config_and_inputs() _A , _A = vision_config_and_inputs _A , _A , _A , _A = text_config_and_inputs # make sure that cross attention layers are added return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": attention_mask, "input_ids": input_ids, "token_type_ids": token_type_ids, } @require_torch class lowercase_ ( _UpperCamelCase , unittest.TestCase ): """simple docstring""" def __UpperCAmelCase ( self : Optional[Any] ) -> List[Any]: _A = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained( 'hf-internal-testing/tiny-random-clip', 'hf-internal-testing/tiny-bert', vision_from_pt=UpperCamelCase__, text_from_pt=UpperCamelCase__, ) _A = 13 _A = floats_tensor( [ batch_size, model.config.vision_config.num_channels, model.config.vision_config.image_size, model.config.vision_config.image_size, ] ) _A = ids_tensor([batch_size, 4], model.config.text_config.vocab_size ) _A = random_attention_mask([batch_size, 4] ) _A = {'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask} return model, inputs def __UpperCAmelCase ( self : Union[str, Any], UpperCamelCase__ : Optional[Any], UpperCamelCase__ : List[Any] ) -> List[str]: _A = FlaxCLIPVisionModel(UpperCamelCase__ ) _A = FlaxBertModel(UpperCamelCase__ ) return vision_model, text_model def __UpperCAmelCase ( self : Optional[int] ) -> Union[str, Any]: _A = FlaxCLIPVisionModelTester(self ) _A = FlaxBertModelTester(self ) _A = clip_model_tester.prepare_config_and_inputs() _A = bert_model_tester.prepare_config_and_inputs() _A , _A = vision_config_and_inputs _A , _A , _A , _A = text_config_and_inputs # make sure that cross attention layers are added return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": attention_mask, "input_ids": input_ids, "token_type_ids": token_type_ids, } @require_flax @require_vision class lowercase_ ( unittest.TestCase ): """simple docstring""" @slow def __UpperCAmelCase ( self : int ) -> List[Any]: _A = FlaxVisionTextDualEncoderModel.from_pretrained('clip-italian/clip-italian', logit_scale_init_value=1.0 ) _A = VisionTextDualEncoderProcessor.from_pretrained('clip-italian/clip-italian' ) _A = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) _A = processor( text=['una foto di un gatto', 'una foto di un cane'], images=UpperCamelCase__, padding=UpperCamelCase__, return_tensors='np' ) _A = model(**UpperCamelCase__ ) # verify the logits self.assertEqual(outputs.logits_per_image.shape, (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) ) self.assertEqual( outputs.logits_per_text.shape, (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]), ) _A = np.array([[1.2_284_727, 0.3_104_122]] ) self.assertTrue(np.allclose(outputs.logits_per_image, UpperCamelCase__, atol=1e-3 ) )
107
import torch from diffusers import DDIMParallelScheduler from .test_schedulers import SchedulerCommonTest class _UpperCamelCase( __lowerCamelCase ): __SCREAMING_SNAKE_CASE : int = (DDIMParallelScheduler,) __SCREAMING_SNAKE_CASE : Union[str, Any] = (('''eta''', 0.0), ('''num_inference_steps''', 50)) def __lowerCAmelCase ( self : str , **SCREAMING_SNAKE_CASE__ : Optional[int] ): '''simple docstring''' __a : List[Any] = { 'num_train_timesteps': 1_0_0_0, 'beta_start': 0.0_001, 'beta_end': 0.02, 'beta_schedule': 'linear', 'clip_sample': True, } config.update(**SCREAMING_SNAKE_CASE__ ) return config def __lowerCAmelCase ( self : str , **SCREAMING_SNAKE_CASE__ : Optional[int] ): '''simple docstring''' __a : Tuple = self.scheduler_classes[0] __a : Optional[Any] = self.get_scheduler_config(**SCREAMING_SNAKE_CASE__ ) __a : Any = scheduler_class(**SCREAMING_SNAKE_CASE__ ) __a , __a : List[str] = 1_0, 0.0 __a : Dict = self.dummy_model() __a : str = self.dummy_sample_deter scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ ) for t in scheduler.timesteps: __a : str = model(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) __a : List[str] = scheduler.step(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).prev_sample return sample def __lowerCAmelCase ( self : Tuple ): '''simple docstring''' for timesteps in [1_0_0, 5_0_0, 1_0_0_0]: self.check_over_configs(num_train_timesteps=SCREAMING_SNAKE_CASE__ ) def __lowerCAmelCase ( self : Dict ): '''simple docstring''' for steps_offset in [0, 1]: self.check_over_configs(steps_offset=SCREAMING_SNAKE_CASE__ ) __a : Optional[Any] = self.scheduler_classes[0] __a : List[str] = self.get_scheduler_config(steps_offset=1 ) __a : Optional[Any] = scheduler_class(**SCREAMING_SNAKE_CASE__ ) scheduler.set_timesteps(5 ) assert torch.equal(scheduler.timesteps , torch.LongTensor([8_0_1, 6_0_1, 4_0_1, 2_0_1, 1] ) ) def __lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' for beta_start, beta_end in zip([0.0_001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ): self.check_over_configs(beta_start=SCREAMING_SNAKE_CASE__ , beta_end=SCREAMING_SNAKE_CASE__ ) def __lowerCAmelCase ( self : int ): '''simple docstring''' for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=SCREAMING_SNAKE_CASE__ ) def __lowerCAmelCase ( self : Optional[Any] ): '''simple docstring''' for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=SCREAMING_SNAKE_CASE__ ) def __lowerCAmelCase ( self : List[Any] ): '''simple docstring''' for clip_sample in [True, False]: self.check_over_configs(clip_sample=SCREAMING_SNAKE_CASE__ ) def __lowerCAmelCase ( self : Optional[Any] ): '''simple docstring''' for timestep_spacing in ["trailing", "leading"]: self.check_over_configs(timestep_spacing=SCREAMING_SNAKE_CASE__ ) def __lowerCAmelCase ( self : int ): '''simple docstring''' for rescale_betas_zero_snr in [True, False]: self.check_over_configs(rescale_betas_zero_snr=SCREAMING_SNAKE_CASE__ ) def __lowerCAmelCase ( self : str ): '''simple docstring''' self.check_over_configs(thresholding=SCREAMING_SNAKE_CASE__ ) for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs( thresholding=SCREAMING_SNAKE_CASE__ , prediction_type=SCREAMING_SNAKE_CASE__ , sample_max_value=SCREAMING_SNAKE_CASE__ , ) def __lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' for t in [1, 1_0, 4_9]: self.check_over_forward(time_step=SCREAMING_SNAKE_CASE__ ) def __lowerCAmelCase ( self : List[str] ): '''simple docstring''' for t, num_inference_steps in zip([1, 1_0, 5_0] , [1_0, 5_0, 5_0_0] ): self.check_over_forward(time_step=SCREAMING_SNAKE_CASE__ , num_inference_steps=SCREAMING_SNAKE_CASE__ ) def __lowerCAmelCase ( self : List[Any] ): '''simple docstring''' for t, eta in zip([1, 1_0, 4_9] , [0.0, 0.5, 1.0] ): self.check_over_forward(time_step=SCREAMING_SNAKE_CASE__ , eta=SCREAMING_SNAKE_CASE__ ) def __lowerCAmelCase ( self : List[Any] ): '''simple docstring''' __a : List[str] = self.scheduler_classes[0] __a : Union[str, Any] = self.get_scheduler_config() __a : Any = scheduler_class(**SCREAMING_SNAKE_CASE__ ) assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(4_2_0 , 4_0_0 ) - 0.14_771 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(9_8_0 , 9_6_0 ) - 0.32_460 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(4_8_7 , 4_8_6 ) - 0.00_979 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(9_9_9 , 9_9_8 ) - 0.02 ) ) < 1e-5 def __lowerCAmelCase ( self : Optional[Any] ): '''simple docstring''' __a : List[str] = self.scheduler_classes[0] __a : List[str] = self.get_scheduler_config() __a : Optional[Any] = scheduler_class(**SCREAMING_SNAKE_CASE__ ) __a , __a : Any = 1_0, 0.0 scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ ) __a : List[Any] = self.dummy_model() __a : int = self.dummy_sample_deter __a : List[Any] = self.dummy_sample_deter + 0.1 __a : List[str] = self.dummy_sample_deter - 0.1 __a : Optional[Any] = samplea.shape[0] __a : Optional[Any] = torch.stack([samplea, samplea, samplea] , dim=0 ) __a : Union[str, Any] = torch.arange(SCREAMING_SNAKE_CASE__ )[0:3, None].repeat(1 , SCREAMING_SNAKE_CASE__ ) __a : int = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) ) __a : int = scheduler.batch_step_no_noise(SCREAMING_SNAKE_CASE__ , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) , SCREAMING_SNAKE_CASE__ ) __a : Dict = torch.sum(torch.abs(SCREAMING_SNAKE_CASE__ ) ) __a : Optional[Any] = torch.mean(torch.abs(SCREAMING_SNAKE_CASE__ ) ) assert abs(result_sum.item() - 1_147.7_904 ) < 1e-2 assert abs(result_mean.item() - 0.4_982 ) < 1e-3 def __lowerCAmelCase ( self : str ): '''simple docstring''' __a : List[str] = self.full_loop() __a : Tuple = torch.sum(torch.abs(SCREAMING_SNAKE_CASE__ ) ) __a : int = torch.mean(torch.abs(SCREAMING_SNAKE_CASE__ ) ) assert abs(result_sum.item() - 172.0_067 ) < 1e-2 assert abs(result_mean.item() - 0.223_967 ) < 1e-3 def __lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' __a : Optional[int] = self.full_loop(prediction_type='v_prediction' ) __a : str = torch.sum(torch.abs(SCREAMING_SNAKE_CASE__ ) ) __a : Union[str, Any] = torch.mean(torch.abs(SCREAMING_SNAKE_CASE__ ) ) assert abs(result_sum.item() - 52.5_302 ) < 1e-2 assert abs(result_mean.item() - 0.0_684 ) < 1e-3 def __lowerCAmelCase ( self : int ): '''simple docstring''' __a : Union[str, Any] = self.full_loop(set_alpha_to_one=SCREAMING_SNAKE_CASE__ , beta_start=0.01 ) __a : Optional[int] = torch.sum(torch.abs(SCREAMING_SNAKE_CASE__ ) ) __a : Optional[int] = torch.mean(torch.abs(SCREAMING_SNAKE_CASE__ ) ) assert abs(result_sum.item() - 149.8_295 ) < 1e-2 assert abs(result_mean.item() - 0.1_951 ) < 1e-3 def __lowerCAmelCase ( self : str ): '''simple docstring''' __a : Dict = self.full_loop(set_alpha_to_one=SCREAMING_SNAKE_CASE__ , beta_start=0.01 ) __a : str = torch.sum(torch.abs(SCREAMING_SNAKE_CASE__ ) ) __a : Tuple = torch.mean(torch.abs(SCREAMING_SNAKE_CASE__ ) ) assert abs(result_sum.item() - 149.0_784 ) < 1e-2 assert abs(result_mean.item() - 0.1_941 ) < 1e-3
47
0
from typing import List, Optional from tokenizers import ByteLevelBPETokenizer from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_blenderbot_small import BlenderbotSmallTokenizer __a: Union[str, Any] = logging.get_logger(__name__) __a: str = { '''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_config_file''': '''tokenizer_config.json''', } __a: Dict = { '''vocab_file''': { '''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json''' }, '''merges_file''': { '''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt''' }, '''tokenizer_config_file''': { '''facebook/blenderbot_small-90M''': ( '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json''' ) }, } __a: Union[str, Any] = { '''facebook/blenderbot_small-90M''': 512, } class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ): '''simple docstring''' _lowerCamelCase = VOCAB_FILES_NAMES _lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP _lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _lowerCamelCase = BlenderbotSmallTokenizer def __init__( self : List[str] , lowerCamelCase : List[str]=None , lowerCamelCase : str=None , lowerCamelCase : Optional[Any]="<|endoftext|>" , lowerCamelCase : Dict="<|endoftext|>" , lowerCamelCase : str="<|endoftext|>" , lowerCamelCase : str=False , lowerCamelCase : Tuple=True , **lowerCamelCase : List[str] , ) -> List[Any]: """simple docstring""" super().__init__( ByteLevelBPETokenizer( vocab=lowerCamelCase , merges=lowerCamelCase , add_prefix_space=lowerCamelCase , trim_offsets=lowerCamelCase , ) , bos_token=lowerCamelCase , eos_token=lowerCamelCase , unk_token=lowerCamelCase , **lowerCamelCase , ) _UpperCAmelCase = add_prefix_space def lowerCamelCase ( self : Union[str, Any] , lowerCamelCase : str , lowerCamelCase : Union[str, Any]=None ) -> str: """simple docstring""" _UpperCAmelCase = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def lowerCamelCase ( self : Any , lowerCamelCase : List[int] , lowerCamelCase : Optional[List[int]] = None ) -> List[int]: """simple docstring""" _UpperCAmelCase = [self.sep_token_id] _UpperCAmelCase = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
108
def UpperCAmelCase__ ( lowerCamelCase_ : list[int] , lowerCamelCase_ : list[int] ): # Check if the input is valid if not len(lowerCamelCase_ ) == len(lowerCamelCase_ ) == 3: raise ValueError('Please enter a valid equation.' ) if equationa[0] == equationa[1] == equationa[0] == equationa[1] == 0: raise ValueError('Both a & b of two equations can\'t be zero.' ) # Extract the coefficients __a , __a , __a : Optional[Any] = equationa __a , __a , __a : Optional[int] = equationa # Calculate the determinants of the matrices __a : str = aa * ba - aa * ba __a : Tuple = ca * ba - ca * ba __a : Union[str, Any] = aa * ca - aa * ca # Check if the system of linear equations has a solution (using Cramer's rule) if determinant == 0: if determinant_x == determinant_y == 0: raise ValueError('Infinite solutions. (Consistent system)' ) else: raise ValueError('No solution. (Inconsistent system)' ) else: if determinant_x == determinant_y == 0: # Trivial solution (Inconsistent system) return (0.0, 0.0) else: __a : Any = determinant_x / determinant __a : Optional[Any] = determinant_y / determinant # Non-Trivial Solution (Consistent system) return (x, y)
47
0
'''simple docstring''' import gc import unittest from transformers import MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, FillMaskPipeline, pipeline from transformers.pipelines import PipelineException from transformers.testing_utils import ( is_pipeline_test, is_torch_available, nested_simplify, require_tf, require_torch, require_torch_gpu, slow, ) from .test_pipelines_common import ANY @is_pipeline_test class __a ( unittest.TestCase ): __UpperCamelCase : str = MODEL_FOR_MASKED_LM_MAPPING __UpperCamelCase : Dict = TF_MODEL_FOR_MASKED_LM_MAPPING def UpperCAmelCase__ ( self : Dict ): '''simple docstring''' super().tearDown() # clean-up as much as possible GPU memory occupied by PyTorch gc.collect() if is_torch_available(): import torch torch.cuda.empty_cache() @require_tf def UpperCAmelCase__ ( self : str ): '''simple docstring''' __SCREAMING_SNAKE_CASE = pipeline(task="""fill-mask""" ,model="""sshleifer/tiny-distilroberta-base""" ,top_k=2 ,framework="""tf""" ) __SCREAMING_SNAKE_CASE = unmasker("""My name is <mask>""" ) self.assertEqual( nested_simplify(lowerCamelCase ,decimals=6 ) ,[ {"""sequence""": """My name is grouped""", """score""": 2.1E-0_5, """token""": 3_8015, """token_str""": """ grouped"""}, {"""sequence""": """My name is accuser""", """score""": 2.1E-0_5, """token""": 2_5506, """token_str""": """ accuser"""}, ] ,) __SCREAMING_SNAKE_CASE = unmasker("""The largest city in France is <mask>""" ) self.assertEqual( nested_simplify(lowerCamelCase ,decimals=6 ) ,[ { """sequence""": """The largest city in France is grouped""", """score""": 2.1E-0_5, """token""": 3_8015, """token_str""": """ grouped""", }, { """sequence""": """The largest city in France is accuser""", """score""": 2.1E-0_5, """token""": 2_5506, """token_str""": """ accuser""", }, ] ,) __SCREAMING_SNAKE_CASE = unmasker("""My name is <mask>""" ,targets=[""" Patrick""", """ Clara""", """ Teven"""] ,top_k=3 ) self.assertEqual( nested_simplify(lowerCamelCase ,decimals=6 ) ,[ {"""sequence""": """My name is Clara""", """score""": 2E-0_5, """token""": 1_3606, """token_str""": """ Clara"""}, {"""sequence""": """My name is Patrick""", """score""": 2E-0_5, """token""": 3499, """token_str""": """ Patrick"""}, {"""sequence""": """My name is Te""", """score""": 1.9E-0_5, """token""": 2941, """token_str""": """ Te"""}, ] ,) @require_torch def UpperCAmelCase__ ( self : str ): '''simple docstring''' __SCREAMING_SNAKE_CASE = pipeline(task="""fill-mask""" ,model="""sshleifer/tiny-distilroberta-base""" ,top_k=2 ,framework="""pt""" ) __SCREAMING_SNAKE_CASE = unmasker("""My name is <mask>""" ) self.assertEqual( nested_simplify(lowerCamelCase ,decimals=6 ) ,[ {"""sequence""": """My name is Maul""", """score""": 2.2E-0_5, """token""": 3_5676, """token_str""": """ Maul"""}, {"""sequence""": """My name isELS""", """score""": 2.2E-0_5, """token""": 1_6416, """token_str""": """ELS"""}, ] ,) __SCREAMING_SNAKE_CASE = unmasker("""The largest city in France is <mask>""" ) self.assertEqual( nested_simplify(lowerCamelCase ,decimals=6 ) ,[ { """sequence""": """The largest city in France is Maul""", """score""": 2.2E-0_5, """token""": 3_5676, """token_str""": """ Maul""", }, {"""sequence""": """The largest city in France isELS""", """score""": 2.2E-0_5, """token""": 1_6416, """token_str""": """ELS"""}, ] ,) __SCREAMING_SNAKE_CASE = unmasker("""My name is <mask>""" ,targets=[""" Patrick""", """ Clara""", """ Teven"""] ,top_k=3 ) self.assertEqual( nested_simplify(lowerCamelCase ,decimals=6 ) ,[ {"""sequence""": """My name is Patrick""", """score""": 2.1E-0_5, """token""": 3499, """token_str""": """ Patrick"""}, {"""sequence""": """My name is Te""", """score""": 2E-0_5, """token""": 2941, """token_str""": """ Te"""}, {"""sequence""": """My name is Clara""", """score""": 2E-0_5, """token""": 1_3606, """token_str""": """ Clara"""}, ] ,) __SCREAMING_SNAKE_CASE = unmasker("""My name is <mask> <mask>""" ,top_k=2 ) self.assertEqual( nested_simplify(lowerCamelCase ,decimals=6 ) ,[ [ { """score""": 2.2E-0_5, """token""": 3_5676, """token_str""": """ Maul""", """sequence""": """<s>My name is Maul<mask></s>""", }, {"""score""": 2.2E-0_5, """token""": 1_6416, """token_str""": """ELS""", """sequence""": """<s>My name isELS<mask></s>"""}, ], [ { """score""": 2.2E-0_5, """token""": 3_5676, """token_str""": """ Maul""", """sequence""": """<s>My name is<mask> Maul</s>""", }, {"""score""": 2.2E-0_5, """token""": 1_6416, """token_str""": """ELS""", """sequence""": """<s>My name is<mask>ELS</s>"""}, ], ] ,) @require_torch_gpu def UpperCAmelCase__ ( self : Union[str, Any] ): '''simple docstring''' __SCREAMING_SNAKE_CASE = pipeline("""fill-mask""" ,model="""hf-internal-testing/tiny-random-distilbert""" ,device=0 ,framework="""pt""" ) # convert model to fp16 pipe.model.half() __SCREAMING_SNAKE_CASE = pipe("""Paris is the [MASK] of France.""" ) # We actually don't care about the result, we just want to make sure # it works, meaning the float16 tensor got casted back to float32 # for postprocessing. self.assertIsInstance(lowerCamelCase ,lowerCamelCase ) @slow @require_torch def UpperCAmelCase__ ( self : str ): '''simple docstring''' __SCREAMING_SNAKE_CASE = pipeline(task="""fill-mask""" ,model="""distilroberta-base""" ,top_k=2 ,framework="""pt""" ) self.run_large_test(lowerCamelCase ) @slow @require_tf def UpperCAmelCase__ ( self : List[Any] ): '''simple docstring''' __SCREAMING_SNAKE_CASE = pipeline(task="""fill-mask""" ,model="""distilroberta-base""" ,top_k=2 ,framework="""tf""" ) self.run_large_test(lowerCamelCase ) def UpperCAmelCase__ ( self : List[Any] ,lowerCamelCase : int ): '''simple docstring''' __SCREAMING_SNAKE_CASE = unmasker("""My name is <mask>""" ) self.assertEqual( nested_simplify(lowerCamelCase ) ,[ {"""sequence""": """My name is John""", """score""": 0.008, """token""": 610, """token_str""": """ John"""}, {"""sequence""": """My name is Chris""", """score""": 0.007, """token""": 1573, """token_str""": """ Chris"""}, ] ,) __SCREAMING_SNAKE_CASE = unmasker("""The largest city in France is <mask>""" ) self.assertEqual( nested_simplify(lowerCamelCase ) ,[ { """sequence""": """The largest city in France is Paris""", """score""": 0.251, """token""": 2201, """token_str""": """ Paris""", }, { """sequence""": """The largest city in France is Lyon""", """score""": 0.214, """token""": 1_2790, """token_str""": """ Lyon""", }, ] ,) __SCREAMING_SNAKE_CASE = unmasker("""My name is <mask>""" ,targets=[""" Patrick""", """ Clara""", """ Teven"""] ,top_k=3 ) self.assertEqual( nested_simplify(lowerCamelCase ) ,[ {"""sequence""": """My name is Patrick""", """score""": 0.005, """token""": 3499, """token_str""": """ Patrick"""}, {"""sequence""": """My name is Clara""", """score""": 0.000, """token""": 1_3606, """token_str""": """ Clara"""}, {"""sequence""": """My name is Te""", """score""": 0.000, """token""": 2941, """token_str""": """ Te"""}, ] ,) @require_torch def UpperCAmelCase__ ( self : int ): '''simple docstring''' __SCREAMING_SNAKE_CASE = pipeline(task="""fill-mask""" ,model="""sshleifer/tiny-distilroberta-base""" ,framework="""pt""" ) __SCREAMING_SNAKE_CASE = None __SCREAMING_SNAKE_CASE = None self.run_pipeline_test(lowerCamelCase ,[] ) @require_tf def UpperCAmelCase__ ( self : int ): '''simple docstring''' __SCREAMING_SNAKE_CASE = pipeline(task="""fill-mask""" ,model="""sshleifer/tiny-distilroberta-base""" ,framework="""tf""" ) __SCREAMING_SNAKE_CASE = None __SCREAMING_SNAKE_CASE = None self.run_pipeline_test(lowerCamelCase ,[] ) def UpperCAmelCase__ ( self : Union[str, Any] ,lowerCamelCase : Union[str, Any] ,lowerCamelCase : Tuple ,lowerCamelCase : List[Any] ): '''simple docstring''' if tokenizer is None or tokenizer.mask_token_id is None: self.skipTest("""The provided tokenizer has no mask token, (probably reformer or wav2vec2)""" ) __SCREAMING_SNAKE_CASE = FillMaskPipeline(model=lowerCamelCase ,tokenizer=lowerCamelCase ) __SCREAMING_SNAKE_CASE = [ f"""This is another {tokenizer.mask_token} test""", ] return fill_masker, examples def UpperCAmelCase__ ( self : Optional[int] ,lowerCamelCase : Optional[Any] ,lowerCamelCase : str ): '''simple docstring''' __SCREAMING_SNAKE_CASE = fill_masker.tokenizer __SCREAMING_SNAKE_CASE = fill_masker.model __SCREAMING_SNAKE_CASE = fill_masker( f"""This is a {tokenizer.mask_token}""" ,) self.assertEqual( lowerCamelCase ,[ {"""sequence""": ANY(lowerCamelCase ), """score""": ANY(lowerCamelCase ), """token""": ANY(lowerCamelCase ), """token_str""": ANY(lowerCamelCase )}, {"""sequence""": ANY(lowerCamelCase ), """score""": ANY(lowerCamelCase ), """token""": ANY(lowerCamelCase ), """token_str""": ANY(lowerCamelCase )}, {"""sequence""": ANY(lowerCamelCase ), """score""": ANY(lowerCamelCase ), """token""": ANY(lowerCamelCase ), """token_str""": ANY(lowerCamelCase )}, {"""sequence""": ANY(lowerCamelCase ), """score""": ANY(lowerCamelCase ), """token""": ANY(lowerCamelCase ), """token_str""": ANY(lowerCamelCase )}, {"""sequence""": ANY(lowerCamelCase ), """score""": ANY(lowerCamelCase ), """token""": ANY(lowerCamelCase ), """token_str""": ANY(lowerCamelCase )}, ] ,) __SCREAMING_SNAKE_CASE = fill_masker([f"""This is a {tokenizer.mask_token}"""] ) self.assertEqual( lowerCamelCase ,[ {"""sequence""": ANY(lowerCamelCase ), """score""": ANY(lowerCamelCase ), """token""": ANY(lowerCamelCase ), """token_str""": ANY(lowerCamelCase )}, {"""sequence""": ANY(lowerCamelCase ), """score""": ANY(lowerCamelCase ), """token""": ANY(lowerCamelCase ), """token_str""": ANY(lowerCamelCase )}, {"""sequence""": ANY(lowerCamelCase ), """score""": ANY(lowerCamelCase ), """token""": ANY(lowerCamelCase ), """token_str""": ANY(lowerCamelCase )}, {"""sequence""": ANY(lowerCamelCase ), """score""": ANY(lowerCamelCase ), """token""": ANY(lowerCamelCase ), """token_str""": ANY(lowerCamelCase )}, {"""sequence""": ANY(lowerCamelCase ), """score""": ANY(lowerCamelCase ), """token""": ANY(lowerCamelCase ), """token_str""": ANY(lowerCamelCase )}, ] ,) __SCREAMING_SNAKE_CASE = fill_masker([f"""This is a {tokenizer.mask_token}""", f"""Another {tokenizer.mask_token} great test."""] ) self.assertEqual( lowerCamelCase ,[ [ {"""sequence""": ANY(lowerCamelCase ), """score""": ANY(lowerCamelCase ), """token""": ANY(lowerCamelCase ), """token_str""": ANY(lowerCamelCase )}, {"""sequence""": ANY(lowerCamelCase ), """score""": ANY(lowerCamelCase ), """token""": ANY(lowerCamelCase ), """token_str""": ANY(lowerCamelCase )}, {"""sequence""": ANY(lowerCamelCase ), """score""": ANY(lowerCamelCase ), """token""": ANY(lowerCamelCase ), """token_str""": ANY(lowerCamelCase )}, {"""sequence""": ANY(lowerCamelCase ), """score""": ANY(lowerCamelCase ), """token""": ANY(lowerCamelCase ), """token_str""": ANY(lowerCamelCase )}, {"""sequence""": ANY(lowerCamelCase ), """score""": ANY(lowerCamelCase ), """token""": ANY(lowerCamelCase ), """token_str""": ANY(lowerCamelCase )}, ], [ {"""sequence""": ANY(lowerCamelCase ), """score""": ANY(lowerCamelCase ), """token""": ANY(lowerCamelCase ), """token_str""": ANY(lowerCamelCase )}, {"""sequence""": ANY(lowerCamelCase ), """score""": ANY(lowerCamelCase ), """token""": ANY(lowerCamelCase ), """token_str""": ANY(lowerCamelCase )}, {"""sequence""": ANY(lowerCamelCase ), """score""": ANY(lowerCamelCase ), """token""": ANY(lowerCamelCase ), """token_str""": ANY(lowerCamelCase )}, {"""sequence""": ANY(lowerCamelCase ), """score""": ANY(lowerCamelCase ), """token""": ANY(lowerCamelCase ), """token_str""": ANY(lowerCamelCase )}, {"""sequence""": ANY(lowerCamelCase ), """score""": ANY(lowerCamelCase ), """token""": ANY(lowerCamelCase ), """token_str""": ANY(lowerCamelCase )}, ], ] ,) with self.assertRaises(lowerCamelCase ): fill_masker([None] ) # No mask_token is not supported with self.assertRaises(lowerCamelCase ): fill_masker("""This is""" ) self.run_test_top_k(lowerCamelCase ,lowerCamelCase ) self.run_test_targets(lowerCamelCase ,lowerCamelCase ) self.run_test_top_k_targets(lowerCamelCase ,lowerCamelCase ) self.fill_mask_with_duplicate_targets_and_top_k(lowerCamelCase ,lowerCamelCase ) self.fill_mask_with_multiple_masks(lowerCamelCase ,lowerCamelCase ) def UpperCAmelCase__ ( self : Union[str, Any] ,lowerCamelCase : Union[str, Any] ,lowerCamelCase : Union[str, Any] ): '''simple docstring''' __SCREAMING_SNAKE_CASE = tokenizer.get_vocab() __SCREAMING_SNAKE_CASE = sorted(vocab.keys() )[:2] # Pipeline argument __SCREAMING_SNAKE_CASE = FillMaskPipeline(model=lowerCamelCase ,tokenizer=lowerCamelCase ,targets=lowerCamelCase ) __SCREAMING_SNAKE_CASE = fill_masker(f"""This is a {tokenizer.mask_token}""" ) self.assertEqual( lowerCamelCase ,[ {"""sequence""": ANY(lowerCamelCase ), """score""": ANY(lowerCamelCase ), """token""": ANY(lowerCamelCase ), """token_str""": ANY(lowerCamelCase )}, {"""sequence""": ANY(lowerCamelCase ), """score""": ANY(lowerCamelCase ), """token""": ANY(lowerCamelCase ), """token_str""": ANY(lowerCamelCase )}, ] ,) __SCREAMING_SNAKE_CASE = {vocab[el] for el in targets} self.assertEqual({el["""token"""] for el in outputs} ,lowerCamelCase ) __SCREAMING_SNAKE_CASE = [tokenizer.decode([x] ) for x in target_ids] self.assertEqual({el["""token_str"""] for el in outputs} ,set(lowerCamelCase ) ) # Call argument __SCREAMING_SNAKE_CASE = FillMaskPipeline(model=lowerCamelCase ,tokenizer=lowerCamelCase ) __SCREAMING_SNAKE_CASE = fill_masker(f"""This is a {tokenizer.mask_token}""" ,targets=lowerCamelCase ) self.assertEqual( lowerCamelCase ,[ {"""sequence""": ANY(lowerCamelCase ), """score""": ANY(lowerCamelCase ), """token""": ANY(lowerCamelCase ), """token_str""": ANY(lowerCamelCase )}, {"""sequence""": ANY(lowerCamelCase ), """score""": ANY(lowerCamelCase ), """token""": ANY(lowerCamelCase ), """token_str""": ANY(lowerCamelCase )}, ] ,) __SCREAMING_SNAKE_CASE = {vocab[el] for el in targets} self.assertEqual({el["""token"""] for el in outputs} ,lowerCamelCase ) __SCREAMING_SNAKE_CASE = [tokenizer.decode([x] ) for x in target_ids] self.assertEqual({el["""token_str"""] for el in outputs} ,set(lowerCamelCase ) ) # Score equivalence __SCREAMING_SNAKE_CASE = fill_masker(f"""This is a {tokenizer.mask_token}""" ,targets=lowerCamelCase ) __SCREAMING_SNAKE_CASE = [top_mask["""token_str"""] for top_mask in outputs] __SCREAMING_SNAKE_CASE = [top_mask["""score"""] for top_mask in outputs] # For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`. if set(lowerCamelCase ) == set(lowerCamelCase ): __SCREAMING_SNAKE_CASE = fill_masker(f"""This is a {tokenizer.mask_token}""" ,targets=lowerCamelCase ) __SCREAMING_SNAKE_CASE = [top_mask["""score"""] for top_mask in unmasked_targets] self.assertEqual(nested_simplify(lowerCamelCase ) ,nested_simplify(lowerCamelCase ) ) # Raises with invalid with self.assertRaises(lowerCamelCase ): __SCREAMING_SNAKE_CASE = fill_masker(f"""This is a {tokenizer.mask_token}""" ,targets=[] ) # For some tokenizers, `""` is actually in the vocabulary and the expected error won't raised if "" not in tokenizer.get_vocab(): with self.assertRaises(lowerCamelCase ): __SCREAMING_SNAKE_CASE = fill_masker(f"""This is a {tokenizer.mask_token}""" ,targets=[""""""] ) with self.assertRaises(lowerCamelCase ): __SCREAMING_SNAKE_CASE = fill_masker(f"""This is a {tokenizer.mask_token}""" ,targets="""""" ) def UpperCAmelCase__ ( self : Union[str, Any] ,lowerCamelCase : Optional[Any] ,lowerCamelCase : List[str] ): '''simple docstring''' __SCREAMING_SNAKE_CASE = FillMaskPipeline(model=lowerCamelCase ,tokenizer=lowerCamelCase ,top_k=2 ) __SCREAMING_SNAKE_CASE = fill_masker(f"""This is a {tokenizer.mask_token}""" ) self.assertEqual( lowerCamelCase ,[ {"""sequence""": ANY(lowerCamelCase ), """score""": ANY(lowerCamelCase ), """token""": ANY(lowerCamelCase ), """token_str""": ANY(lowerCamelCase )}, {"""sequence""": ANY(lowerCamelCase ), """score""": ANY(lowerCamelCase ), """token""": ANY(lowerCamelCase ), """token_str""": ANY(lowerCamelCase )}, ] ,) __SCREAMING_SNAKE_CASE = FillMaskPipeline(model=lowerCamelCase ,tokenizer=lowerCamelCase ) __SCREAMING_SNAKE_CASE = fill_masker(f"""This is a {tokenizer.mask_token}""" ,top_k=2 ) self.assertEqual( lowerCamelCase ,[ {"""sequence""": ANY(lowerCamelCase ), """score""": ANY(lowerCamelCase ), """token""": ANY(lowerCamelCase ), """token_str""": ANY(lowerCamelCase )}, {"""sequence""": ANY(lowerCamelCase ), """score""": ANY(lowerCamelCase ), """token""": ANY(lowerCamelCase ), """token_str""": ANY(lowerCamelCase )}, ] ,) self.assertEqual(nested_simplify(lowerCamelCase ) ,nested_simplify(lowerCamelCase ) ) def UpperCAmelCase__ ( self : List[Any] ,lowerCamelCase : Tuple ,lowerCamelCase : Optional[Any] ): '''simple docstring''' __SCREAMING_SNAKE_CASE = tokenizer.get_vocab() __SCREAMING_SNAKE_CASE = FillMaskPipeline(model=lowerCamelCase ,tokenizer=lowerCamelCase ) # top_k=2, ntargets=3 __SCREAMING_SNAKE_CASE = sorted(vocab.keys() )[:3] __SCREAMING_SNAKE_CASE = fill_masker(f"""This is a {tokenizer.mask_token}""" ,top_k=2 ,targets=lowerCamelCase ) # If we use the most probably targets, and filter differently, we should still # have the same results __SCREAMING_SNAKE_CASE = [el["""token_str"""] for el in sorted(lowerCamelCase ,key=lambda lowerCamelCase : x["score"] ,reverse=lowerCamelCase )] # For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`. if set(lowerCamelCase ).issubset(lowerCamelCase ): __SCREAMING_SNAKE_CASE = fill_masker(f"""This is a {tokenizer.mask_token}""" ,top_k=3 ,targets=lowerCamelCase ) # They should yield exactly the same result self.assertEqual(nested_simplify(lowerCamelCase ) ,nested_simplify(lowerCamelCase ) ) def UpperCAmelCase__ ( self : Tuple ,lowerCamelCase : Dict ,lowerCamelCase : Any ): '''simple docstring''' __SCREAMING_SNAKE_CASE = FillMaskPipeline(model=lowerCamelCase ,tokenizer=lowerCamelCase ) __SCREAMING_SNAKE_CASE = tokenizer.get_vocab() # String duplicates + id duplicates __SCREAMING_SNAKE_CASE = sorted(vocab.keys() )[:3] __SCREAMING_SNAKE_CASE = [targets[0], targets[1], targets[0], targets[2], targets[1]] __SCREAMING_SNAKE_CASE = fill_masker(f"""My name is {tokenizer.mask_token}""" ,targets=lowerCamelCase ,top_k=10 ) # The target list contains duplicates, so we can't output more # than them self.assertEqual(len(lowerCamelCase ) ,3 ) def UpperCAmelCase__ ( self : int ,lowerCamelCase : Optional[int] ,lowerCamelCase : List[str] ): '''simple docstring''' __SCREAMING_SNAKE_CASE = FillMaskPipeline(model=lowerCamelCase ,tokenizer=lowerCamelCase ) __SCREAMING_SNAKE_CASE = fill_masker( f"""This is a {tokenizer.mask_token} {tokenizer.mask_token} {tokenizer.mask_token}""" ,top_k=2 ) self.assertEqual( lowerCamelCase ,[ [ {"""sequence""": ANY(lowerCamelCase ), """score""": ANY(lowerCamelCase ), """token""": ANY(lowerCamelCase ), """token_str""": ANY(lowerCamelCase )}, {"""sequence""": ANY(lowerCamelCase ), """score""": ANY(lowerCamelCase ), """token""": ANY(lowerCamelCase ), """token_str""": ANY(lowerCamelCase )}, ], [ {"""sequence""": ANY(lowerCamelCase ), """score""": ANY(lowerCamelCase ), """token""": ANY(lowerCamelCase ), """token_str""": ANY(lowerCamelCase )}, {"""sequence""": ANY(lowerCamelCase ), """score""": ANY(lowerCamelCase ), """token""": ANY(lowerCamelCase ), """token_str""": ANY(lowerCamelCase )}, ], [ {"""sequence""": ANY(lowerCamelCase ), """score""": ANY(lowerCamelCase ), """token""": ANY(lowerCamelCase ), """token_str""": ANY(lowerCamelCase )}, {"""sequence""": ANY(lowerCamelCase ), """score""": ANY(lowerCamelCase ), """token""": ANY(lowerCamelCase ), """token_str""": ANY(lowerCamelCase )}, ], ] ,)
109
from ...utils import is_note_seq_available, is_transformers_available, is_torch_available from ...utils import OptionalDependencyNotAvailable try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .notes_encoder import SpectrogramNotesEncoder from .continous_encoder import SpectrogramContEncoder from .pipeline_spectrogram_diffusion import ( SpectrogramContEncoder, SpectrogramDiffusionPipeline, TaFilmDecoder, ) try: if not (is_transformers_available() and is_torch_available() and is_note_seq_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403 else: from .midi_utils import MidiProcessor
47
0
"""simple docstring""" import inspect import unittest from transformers import RegNetConfig from transformers.file_utils import cached_property, is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_vision, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import RegNetForImageClassification, RegNetModel from transformers.models.regnet.modeling_regnet import REGNET_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class a : def __init__( self , UpperCamelCase_ , UpperCamelCase_=3 , UpperCamelCase_=32 , UpperCamelCase_=3 , UpperCamelCase_=10 , UpperCamelCase_=[10, 20, 30, 40] , UpperCamelCase_=[1, 1, 2, 1] , UpperCamelCase_=True , UpperCamelCase_=True , UpperCamelCase_="relu" , UpperCamelCase_=3 , UpperCamelCase_=None , ): UpperCAmelCase__ : List[Any] = parent UpperCAmelCase__ : List[Any] = batch_size UpperCAmelCase__ : int = image_size UpperCAmelCase__ : int = num_channels UpperCAmelCase__ : Any = embeddings_size UpperCAmelCase__ : List[Any] = hidden_sizes UpperCAmelCase__ : Dict = depths UpperCAmelCase__ : Any = is_training UpperCAmelCase__ : Union[str, Any] = use_labels UpperCAmelCase__ : List[Any] = hidden_act UpperCAmelCase__ : Optional[int] = num_labels UpperCAmelCase__ : Optional[Any] = scope UpperCAmelCase__ : Optional[int] = len(UpperCamelCase_ ) def __snake_case ( self ): UpperCAmelCase__ : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) UpperCAmelCase__ : Optional[int] = None if self.use_labels: UpperCAmelCase__ : Optional[int] = ids_tensor([self.batch_size] , self.num_labels ) UpperCAmelCase__ : int = self.get_config() return config, pixel_values, labels def __snake_case ( self ): return RegNetConfig( num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , ) def __snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ): UpperCAmelCase__ : Optional[Any] = RegNetModel(config=UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.eval() UpperCAmelCase__ : Optional[int] = model(UpperCamelCase_ ) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def __snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ): UpperCAmelCase__ : Optional[Any] = self.num_labels UpperCAmelCase__ : List[Any] = RegNetForImageClassification(UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.eval() UpperCAmelCase__ : Union[str, Any] = model(UpperCamelCase_ , labels=UpperCamelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __snake_case ( self ): UpperCAmelCase__ : Union[str, Any] = self.prepare_config_and_inputs() UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Tuple = config_and_inputs UpperCAmelCase__ : Optional[Any] = {'pixel_values': pixel_values} return config, inputs_dict @require_torch class a ( lowercase , lowercase , unittest.TestCase ): UpperCamelCase : Tuple = (RegNetModel, RegNetForImageClassification) if is_torch_available() else () UpperCamelCase : List[Any] = ( {"""feature-extraction""": RegNetModel, """image-classification""": RegNetForImageClassification} if is_torch_available() else {} ) UpperCamelCase : Dict = False UpperCamelCase : Optional[Any] = False UpperCamelCase : Dict = False UpperCamelCase : Optional[Any] = False def __snake_case ( self ): UpperCAmelCase__ : Union[str, Any] = RegNetModelTester(self ) UpperCAmelCase__ : Any = ConfigTester(self , config_class=UpperCamelCase_ , has_text_modality=UpperCamelCase_ ) def __snake_case ( self ): self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def __snake_case ( self ): return @unittest.skip(reason='RegNet does not use inputs_embeds' ) def __snake_case ( self ): pass @unittest.skip(reason='RegNet does not support input and output embeddings' ) def __snake_case ( self ): pass def __snake_case ( self ): UpperCAmelCase__ , UpperCAmelCase__ : int = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase__ : Optional[int] = model_class(UpperCamelCase_ ) UpperCAmelCase__ : Tuple = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCAmelCase__ : List[str] = [*signature.parameters.keys()] UpperCAmelCase__ : Optional[Any] = ['pixel_values'] self.assertListEqual(arg_names[:1] , UpperCamelCase_ ) def __snake_case ( self ): UpperCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCamelCase_ ) def __snake_case ( self ): UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase__ : Union[str, Any] = model_class(config=UpperCamelCase_ ) for name, module in model.named_modules(): if isinstance(UpperCamelCase_ , (nn.BatchNormad, nn.GroupNorm) ): self.assertTrue( torch.all(module.weight == 1 ) , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , ) self.assertTrue( torch.all(module.bias == 0 ) , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , ) def __snake_case ( self ): def check_hidden_states_output(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ): UpperCAmelCase__ : List[str] = model_class(UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.eval() with torch.no_grad(): UpperCAmelCase__ : List[Any] = model(**self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ ) ) UpperCAmelCase__ : List[str] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states UpperCAmelCase__ : List[Any] = self.model_tester.num_stages self.assertEqual(len(UpperCamelCase_ ) , expected_num_stages + 1 ) # RegNet's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , ) UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase__ : Union[str, Any] = ['basic', 'bottleneck'] for model_class in self.all_model_classes: for layer_type in layers_type: UpperCAmelCase__ : str = layer_type UpperCAmelCase__ : int = True check_hidden_states_output(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] UpperCAmelCase__ : str = True check_hidden_states_output(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) def __snake_case ( self ): UpperCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*UpperCamelCase_ ) @slow def __snake_case ( self ): for model_name in REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase__ : Dict = RegNetModel.from_pretrained(UpperCamelCase_ ) self.assertIsNotNone(UpperCamelCase_ ) def lowerCamelCase ( ): UpperCAmelCase__ : int = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_torch @require_vision class a ( unittest.TestCase ): @cached_property def __snake_case ( self ): return ( AutoImageProcessor.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None ) @slow def __snake_case ( self ): UpperCAmelCase__ : Optional[Any] = RegNetForImageClassification.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(UpperCamelCase_ ) UpperCAmelCase__ : int = self.default_image_processor UpperCAmelCase__ : int = prepare_img() UpperCAmelCase__ : List[str] = image_processor(images=UpperCamelCase_ , return_tensors='pt' ).to(UpperCamelCase_ ) # forward pass with torch.no_grad(): UpperCAmelCase__ : List[Any] = model(**UpperCamelCase_ ) # verify the logits UpperCAmelCase__ : Dict = torch.Size((1, 1_000) ) self.assertEqual(outputs.logits.shape , UpperCamelCase_ ) UpperCAmelCase__ : Optional[Any] = torch.tensor([-0.4180, -1.5051, -3.4836] ).to(UpperCamelCase_ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCamelCase_ , atol=1E-4 ) )
110
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available SCREAMING_SNAKE_CASE__ = { '''configuration_bridgetower''': [ '''BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BridgeTowerConfig''', '''BridgeTowerTextConfig''', '''BridgeTowerVisionConfig''', ], '''processing_bridgetower''': ['''BridgeTowerProcessor'''], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ = ['''BridgeTowerImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ = [ '''BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''BridgeTowerForContrastiveLearning''', '''BridgeTowerForImageAndTextRetrieval''', '''BridgeTowerForMaskedLM''', '''BridgeTowerModel''', '''BridgeTowerPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_bridgetower import ( BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP, BridgeTowerConfig, BridgeTowerTextConfig, BridgeTowerVisionConfig, ) from .processing_bridgetower import BridgeTowerProcessor try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_bridgetower import BridgeTowerImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_bridgetower import ( BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST, BridgeTowerForContrastiveLearning, BridgeTowerForImageAndTextRetrieval, BridgeTowerForMaskedLM, BridgeTowerModel, BridgeTowerPreTrainedModel, ) else: import sys SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
47
0
"""simple docstring""" from abc import ABC, abstractmethod from typing import Optional, Union from .. import Dataset, DatasetDict, Features, IterableDataset, IterableDatasetDict, NamedSplit from ..utils.typing import NestedDataStructureLike, PathLike class UpperCAmelCase_ ( __lowerCamelCase ): def __init__( self , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = False , UpperCamelCase_ = False , UpperCamelCase_ = None , **UpperCamelCase_ , ) -> List[Any]: __lowercase : Any = path_or_paths __lowercase : List[Any] = split if split or isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else 'train' __lowercase : Any = features __lowercase : str = cache_dir __lowercase : List[str] = keep_in_memory __lowercase : Dict = streaming __lowercase : str = num_proc __lowercase : List[str] = kwargs @abstractmethod def _lowerCamelCase ( self ) -> Dict: pass class UpperCAmelCase_ ( __lowerCamelCase ): def __init__( self , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = False , UpperCamelCase_ = False , UpperCamelCase_ = None , **UpperCamelCase_ , ) -> List[Any]: __lowercase : Tuple = features __lowercase : Union[str, Any] = cache_dir __lowercase : Optional[Any] = keep_in_memory __lowercase : Tuple = streaming __lowercase : Tuple = num_proc __lowercase : Optional[int] = kwargs @abstractmethod def _lowerCamelCase ( self ) -> Union[str, Any]: pass
76
from string import ascii_lowercase, ascii_uppercase def UpperCAmelCase__ ( lowerCamelCase_ : str ): if not sentence: return "" __a : Union[str, Any] = dict(zip(lowerCamelCase_ , lowerCamelCase_ ) ) return lower_to_upper.get(sentence[0] , sentence[0] ) + sentence[1:] if __name__ == "__main__": from doctest import testmod testmod()
47
0
"""simple docstring""" import os import tempfile import unittest from transformers import NezhaConfig, is_torch_available from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_PRETRAINING_MAPPING, NezhaForMaskedLM, NezhaForMultipleChoice, NezhaForNextSentencePrediction, NezhaForPreTraining, NezhaForQuestionAnswering, NezhaForSequenceClassification, NezhaForTokenClassification, NezhaModel, ) from transformers.models.nezha.modeling_nezha import NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST class _lowerCAmelCase : """simple docstring""" def __init__( self , _lowercase , _lowercase=1_3 , _lowercase=7 , _lowercase=True , _lowercase=True , _lowercase=True , _lowercase=True , _lowercase=9_9 , _lowercase=3_2 , _lowercase=5 , _lowercase=4 , _lowercase=3_7 , _lowercase="gelu" , _lowercase=0.1 , _lowercase=0.1 , _lowercase=1_2_8 , _lowercase=3_2 , _lowercase=1_6 , _lowercase=2 , _lowercase=0.02 , _lowercase=3 , _lowercase=4 , _lowercase=None , ) -> Dict: '''simple docstring''' snake_case_ : Union[str, Any] = parent snake_case_ : str = batch_size snake_case_ : List[Any] = seq_length snake_case_ : List[Any] = is_training snake_case_ : str = use_input_mask snake_case_ : Union[str, Any] = use_token_type_ids snake_case_ : Union[str, Any] = use_labels snake_case_ : Dict = vocab_size snake_case_ : Tuple = hidden_size snake_case_ : List[str] = num_hidden_layers snake_case_ : List[Any] = num_attention_heads snake_case_ : Union[str, Any] = intermediate_size snake_case_ : Tuple = hidden_act snake_case_ : int = hidden_dropout_prob snake_case_ : Dict = attention_probs_dropout_prob snake_case_ : Any = max_position_embeddings snake_case_ : Any = type_vocab_size snake_case_ : Dict = type_sequence_label_size snake_case_ : Optional[int] = initializer_range snake_case_ : Tuple = num_labels snake_case_ : Any = num_choices snake_case_ : Any = scope def UpperCAmelCase__ ( self ) -> str: '''simple docstring''' snake_case_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) snake_case_ : List[str] = None if self.use_input_mask: snake_case_ : List[Any] = random_attention_mask([self.batch_size, self.seq_length] ) snake_case_ : str = None if self.use_token_type_ids: snake_case_ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) snake_case_ : Dict = None snake_case_ : Union[str, Any] = None snake_case_ : Union[str, Any] = None if self.use_labels: snake_case_ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size ) snake_case_ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) snake_case_ : List[str] = ids_tensor([self.batch_size] , self.num_choices ) snake_case_ : Optional[Any] = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def UpperCAmelCase__ ( self ) -> Optional[int]: '''simple docstring''' return NezhaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=SCREAMING_SNAKE_CASE__ , initializer_range=self.initializer_range , ) def UpperCAmelCase__ ( self ) -> List[str]: '''simple docstring''' ( snake_case_ ) : Tuple = self.prepare_config_and_inputs() snake_case_ : str = True snake_case_ : Any = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) snake_case_ : int = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) return ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) def UpperCAmelCase__ ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) -> int: '''simple docstring''' snake_case_ : Optional[Any] = NezhaModel(config=SCREAMING_SNAKE_CASE__ ) model.to(SCREAMING_SNAKE_CASE__ ) model.eval() snake_case_ : Optional[Any] = model(SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ ) snake_case_ : str = model(SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ ) snake_case_ : Any = model(SCREAMING_SNAKE_CASE__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def UpperCAmelCase__ ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , ) -> List[str]: '''simple docstring''' snake_case_ : Optional[Any] = True snake_case_ : List[Any] = NezhaModel(SCREAMING_SNAKE_CASE__ ) model.to(SCREAMING_SNAKE_CASE__ ) model.eval() snake_case_ : Optional[Any] = model( SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ , encoder_hidden_states=SCREAMING_SNAKE_CASE__ , encoder_attention_mask=SCREAMING_SNAKE_CASE__ , ) snake_case_ : Any = model( SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ , encoder_hidden_states=SCREAMING_SNAKE_CASE__ , ) snake_case_ : str = model(SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def UpperCAmelCase__ ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) -> int: '''simple docstring''' snake_case_ : Dict = NezhaForMaskedLM(config=SCREAMING_SNAKE_CASE__ ) model.to(SCREAMING_SNAKE_CASE__ ) model.eval() snake_case_ : str = model(SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def UpperCAmelCase__ ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) -> int: '''simple docstring''' snake_case_ : Tuple = NezhaForNextSentencePrediction(config=SCREAMING_SNAKE_CASE__ ) model.to(SCREAMING_SNAKE_CASE__ ) model.eval() snake_case_ : Dict = model( SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) ) def UpperCAmelCase__ ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) -> str: '''simple docstring''' snake_case_ : Union[str, Any] = NezhaForPreTraining(config=SCREAMING_SNAKE_CASE__ ) model.to(SCREAMING_SNAKE_CASE__ ) model.eval() snake_case_ : List[str] = model( SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ , next_sentence_label=SCREAMING_SNAKE_CASE__ , ) self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) ) def UpperCAmelCase__ ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) -> List[str]: '''simple docstring''' snake_case_ : Any = NezhaForQuestionAnswering(config=SCREAMING_SNAKE_CASE__ ) model.to(SCREAMING_SNAKE_CASE__ ) model.eval() snake_case_ : int = model( SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ , start_positions=SCREAMING_SNAKE_CASE__ , end_positions=SCREAMING_SNAKE_CASE__ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def UpperCAmelCase__ ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) -> Dict: '''simple docstring''' snake_case_ : int = self.num_labels snake_case_ : Any = NezhaForSequenceClassification(SCREAMING_SNAKE_CASE__ ) model.to(SCREAMING_SNAKE_CASE__ ) model.eval() snake_case_ : Any = model(SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def UpperCAmelCase__ ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) -> str: '''simple docstring''' snake_case_ : Any = self.num_labels snake_case_ : Tuple = NezhaForTokenClassification(config=SCREAMING_SNAKE_CASE__ ) model.to(SCREAMING_SNAKE_CASE__ ) model.eval() snake_case_ : Optional[Any] = model(SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def UpperCAmelCase__ ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) -> Union[str, Any]: '''simple docstring''' snake_case_ : List[str] = self.num_choices snake_case_ : str = NezhaForMultipleChoice(config=SCREAMING_SNAKE_CASE__ ) model.to(SCREAMING_SNAKE_CASE__ ) model.eval() snake_case_ : Dict = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() snake_case_ : Tuple = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() snake_case_ : Any = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() snake_case_ : str = model( SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def UpperCAmelCase__ ( self ) -> Tuple: '''simple docstring''' snake_case_ : Tuple = self.prepare_config_and_inputs() ( snake_case_ ) : int = config_and_inputs snake_case_ : List[Any] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class _lowerCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , unittest.TestCase ): """simple docstring""" _lowerCamelCase = ( ( NezhaModel, NezhaForMaskedLM, NezhaForMultipleChoice, NezhaForNextSentencePrediction, NezhaForPreTraining, NezhaForQuestionAnswering, NezhaForSequenceClassification, NezhaForTokenClassification, ) if is_torch_available() else () ) _lowerCamelCase = ( { '''feature-extraction''': NezhaModel, '''fill-mask''': NezhaForMaskedLM, '''question-answering''': NezhaForQuestionAnswering, '''text-classification''': NezhaForSequenceClassification, '''token-classification''': NezhaForTokenClassification, '''zero-shot''': NezhaForSequenceClassification, } if is_torch_available() else {} ) _lowerCamelCase = True def UpperCAmelCase__ ( self , _lowercase , _lowercase , _lowercase=False ) -> List[Any]: '''simple docstring''' snake_case_ : List[Any] = super()._prepare_for_class(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , return_labels=SCREAMING_SNAKE_CASE__ ) if return_labels: if model_class in get_values(SCREAMING_SNAKE_CASE__ ): snake_case_ : List[str] = torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=SCREAMING_SNAKE_CASE__ ) snake_case_ : Optional[int] = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=SCREAMING_SNAKE_CASE__ ) return inputs_dict def UpperCAmelCase__ ( self ) -> Dict: '''simple docstring''' snake_case_ : int = NezhaModelTester(self ) snake_case_ : List[Any] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE__ , hidden_size=3_7 ) def UpperCAmelCase__ ( self ) -> List[str]: '''simple docstring''' self.config_tester.run_common_tests() def UpperCAmelCase__ ( self ) -> Optional[Any]: '''simple docstring''' snake_case_ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE__ ) def UpperCAmelCase__ ( self ) -> Dict: '''simple docstring''' snake_case_ : str = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(*SCREAMING_SNAKE_CASE__ ) def UpperCAmelCase__ ( self ) -> Tuple: '''simple docstring''' ( snake_case_ ) : List[Any] = self.model_tester.prepare_config_and_inputs_for_decoder() snake_case_ : Dict = None self.model_tester.create_and_check_model_as_decoder( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , ) def UpperCAmelCase__ ( self ) -> Tuple: '''simple docstring''' snake_case_ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*SCREAMING_SNAKE_CASE__ ) def UpperCAmelCase__ ( self ) -> Tuple: '''simple docstring''' snake_case_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*SCREAMING_SNAKE_CASE__ ) def UpperCAmelCase__ ( self ) -> Union[str, Any]: '''simple docstring''' snake_case_ : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_next_sequence_prediction(*SCREAMING_SNAKE_CASE__ ) def UpperCAmelCase__ ( self ) -> Union[str, Any]: '''simple docstring''' snake_case_ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*SCREAMING_SNAKE_CASE__ ) def UpperCAmelCase__ ( self ) -> Optional[int]: '''simple docstring''' snake_case_ : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*SCREAMING_SNAKE_CASE__ ) def UpperCAmelCase__ ( self ) -> int: '''simple docstring''' snake_case_ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*SCREAMING_SNAKE_CASE__ ) def UpperCAmelCase__ ( self ) -> str: '''simple docstring''' snake_case_ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*SCREAMING_SNAKE_CASE__ ) @slow def UpperCAmelCase__ ( self ) -> str: '''simple docstring''' for model_name in NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: snake_case_ : Optional[Any] = NezhaModel.from_pretrained(SCREAMING_SNAKE_CASE__ ) self.assertIsNotNone(SCREAMING_SNAKE_CASE__ ) @slow @require_torch_gpu def UpperCAmelCase__ ( self ) -> List[Any]: '''simple docstring''' snake_case_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # NezhaForMultipleChoice behaves incorrectly in JIT environments. if model_class == NezhaForMultipleChoice: return snake_case_ : Union[str, Any] = True snake_case_ : Tuple = model_class(config=SCREAMING_SNAKE_CASE__ ) snake_case_ : Optional[Any] = self._prepare_for_class(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) snake_case_ : Tuple = torch.jit.trace( SCREAMING_SNAKE_CASE__ , (inputs_dict["""input_ids"""].to("""cpu""" ), inputs_dict["""attention_mask"""].to("""cpu""" )) ) with tempfile.TemporaryDirectory() as tmp: torch.jit.save(SCREAMING_SNAKE_CASE__ , os.path.join(SCREAMING_SNAKE_CASE__ , """bert.pt""" ) ) snake_case_ : str = torch.jit.load(os.path.join(SCREAMING_SNAKE_CASE__ , """bert.pt""" ) , map_location=SCREAMING_SNAKE_CASE__ ) loaded(inputs_dict["""input_ids"""].to(SCREAMING_SNAKE_CASE__ ) , inputs_dict["""attention_mask"""].to(SCREAMING_SNAKE_CASE__ ) ) @require_torch class _lowerCAmelCase ( unittest.TestCase ): """simple docstring""" @slow def UpperCAmelCase__ ( self ) -> Tuple: '''simple docstring''' snake_case_ : List[str] = NezhaModel.from_pretrained("""sijunhe/nezha-cn-base""" ) snake_case_ : Dict = torch.tensor([[0, 1, 2, 3, 4, 5]] ) snake_case_ : Optional[int] = torch.tensor([[0, 1, 1, 1, 1, 1]] ) with torch.no_grad(): snake_case_ : List[str] = model(SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ )[0] snake_case_ : Dict = torch.Size((1, 6, 7_6_8) ) self.assertEqual(output.shape , SCREAMING_SNAKE_CASE__ ) snake_case_ : Union[str, Any] = torch.tensor([[[0.0685, 0.2441, 0.1102], [0.0600, 0.1906, 0.1349], [0.0221, 0.0819, 0.0586]]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , SCREAMING_SNAKE_CASE__ , atol=1E-4 ) ) @slow def UpperCAmelCase__ ( self ) -> Optional[Any]: '''simple docstring''' snake_case_ : Any = NezhaForMaskedLM.from_pretrained("""sijunhe/nezha-cn-base""" ) snake_case_ : Optional[Any] = torch.tensor([[0, 1, 2, 3, 4, 5]] ) snake_case_ : int = torch.tensor([[1, 1, 1, 1, 1, 1]] ) with torch.no_grad(): snake_case_ : Dict = model(SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ )[0] snake_case_ : int = torch.Size((1, 6, 2_1_1_2_8) ) self.assertEqual(output.shape , SCREAMING_SNAKE_CASE__ ) snake_case_ : str = torch.tensor( [[-2.7939, -1.7902, -2.2189], [-2.8585, -1.8908, -2.3723], [-2.6499, -1.7750, -2.2558]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , SCREAMING_SNAKE_CASE__ , atol=1E-4 ) )
58
import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ = { '''asapp/sew-d-tiny-100k''': '''https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json''', # See all SEW-D models at https://huggingface.co/models?filter=sew-d } class _UpperCamelCase( __lowerCamelCase ): __SCREAMING_SNAKE_CASE : Optional[Any] = '''sew-d''' def __init__( self : Dict , SCREAMING_SNAKE_CASE__ : Dict=3_2 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=7_6_8 , SCREAMING_SNAKE_CASE__ : Optional[Any]=1_2 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=1_2 , SCREAMING_SNAKE_CASE__ : str=3_0_7_2 , SCREAMING_SNAKE_CASE__ : str=2 , SCREAMING_SNAKE_CASE__ : Optional[Any]=5_1_2 , SCREAMING_SNAKE_CASE__ : List[str]=2_5_6 , SCREAMING_SNAKE_CASE__ : Optional[Any]=True , SCREAMING_SNAKE_CASE__ : Tuple=True , SCREAMING_SNAKE_CASE__ : List[str]=("p2c", "c2p") , SCREAMING_SNAKE_CASE__ : str="layer_norm" , SCREAMING_SNAKE_CASE__ : Tuple="gelu_python" , SCREAMING_SNAKE_CASE__ : Tuple=0.1 , SCREAMING_SNAKE_CASE__ : Optional[int]=0.1 , SCREAMING_SNAKE_CASE__ : Any=0.1 , SCREAMING_SNAKE_CASE__ : Any=0.0 , SCREAMING_SNAKE_CASE__ : str=0.1 , SCREAMING_SNAKE_CASE__ : Optional[int]=0.02 , SCREAMING_SNAKE_CASE__ : int=1e-7 , SCREAMING_SNAKE_CASE__ : Any=1e-5 , SCREAMING_SNAKE_CASE__ : Optional[int]="group" , SCREAMING_SNAKE_CASE__ : Optional[Any]="gelu" , SCREAMING_SNAKE_CASE__ : Optional[int]=(6_4, 1_2_8, 1_2_8, 1_2_8, 1_2_8, 2_5_6, 2_5_6, 2_5_6, 2_5_6, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , SCREAMING_SNAKE_CASE__ : List[Any]=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , SCREAMING_SNAKE_CASE__ : str=(1_0, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , SCREAMING_SNAKE_CASE__ : Optional[int]=False , SCREAMING_SNAKE_CASE__ : Optional[int]=1_2_8 , SCREAMING_SNAKE_CASE__ : Tuple=1_6 , SCREAMING_SNAKE_CASE__ : Any=True , SCREAMING_SNAKE_CASE__ : List[Any]=0.05 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=1_0 , SCREAMING_SNAKE_CASE__ : List[Any]=2 , SCREAMING_SNAKE_CASE__ : int=0.0 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=1_0 , SCREAMING_SNAKE_CASE__ : Optional[int]=0 , SCREAMING_SNAKE_CASE__ : Optional[int]="mean" , SCREAMING_SNAKE_CASE__ : List[Any]=False , SCREAMING_SNAKE_CASE__ : List[str]=False , SCREAMING_SNAKE_CASE__ : str=2_5_6 , SCREAMING_SNAKE_CASE__ : str=0 , SCREAMING_SNAKE_CASE__ : List[Any]=1 , SCREAMING_SNAKE_CASE__ : List[Any]=2 , **SCREAMING_SNAKE_CASE__ : Any , ): '''simple docstring''' super().__init__(**SCREAMING_SNAKE_CASE__ , pad_token_id=SCREAMING_SNAKE_CASE__ , bos_token_id=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ ) __a : Optional[int] = hidden_size __a : Optional[Any] = feat_extract_norm __a : List[str] = feat_extract_activation __a : Dict = list(SCREAMING_SNAKE_CASE__ ) __a : Union[str, Any] = list(SCREAMING_SNAKE_CASE__ ) __a : List[str] = list(SCREAMING_SNAKE_CASE__ ) __a : int = conv_bias __a : Tuple = num_conv_pos_embeddings __a : List[str] = num_conv_pos_embedding_groups __a : Optional[Any] = len(self.conv_dim ) __a : Union[str, Any] = num_hidden_layers __a : Optional[Any] = intermediate_size __a : Union[str, Any] = squeeze_factor __a : List[Any] = max_position_embeddings __a : Tuple = position_buckets __a : Optional[int] = share_att_key __a : List[str] = relative_attention __a : Any = norm_rel_ebd __a : Any = list(SCREAMING_SNAKE_CASE__ ) __a : Union[str, Any] = hidden_act __a : str = num_attention_heads __a : Union[str, Any] = hidden_dropout __a : Optional[int] = attention_dropout __a : List[str] = activation_dropout __a : int = feat_proj_dropout __a : int = final_dropout __a : Dict = layer_norm_eps __a : Tuple = feature_layer_norm_eps __a : str = initializer_range __a : Tuple = vocab_size if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( 'Configuration for convolutional layers is incorrect.' 'It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,' f'''but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)''' f'''= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 __a : Tuple = apply_spec_augment __a : Optional[Any] = mask_time_prob __a : Any = mask_time_length __a : List[str] = mask_time_min_masks __a : List[str] = mask_feature_prob __a : Tuple = mask_feature_length __a : Any = mask_feature_min_masks # ctc loss __a : Optional[int] = ctc_loss_reduction __a : List[Any] = ctc_zero_infinity # sequence classification __a : Dict = use_weighted_layer_sum __a : Optional[Any] = classifier_proj_size @property def __lowerCAmelCase ( self : Optional[Any] ): '''simple docstring''' return functools.reduce(operator.mul , self.conv_stride , 1 )
47
0
'''simple docstring''' import argparse import json import os import fairseq import torch from fairseq.data import Dictionary from transformers import ( HubertConfig, HubertForCTC, HubertModel, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaProcessor, logging, ) logging.set_verbosity_info() _SCREAMING_SNAKE_CASE : List[str] = logging.get_logger(__name__) _SCREAMING_SNAKE_CASE : Union[str, Any] = { "post_extract_proj": "feature_projection.projection", "encoder.pos_conv.0": "encoder.pos_conv_embed.conv", "self_attn.k_proj": "encoder.layers.*.attention.k_proj", "self_attn.v_proj": "encoder.layers.*.attention.v_proj", "self_attn.q_proj": "encoder.layers.*.attention.q_proj", "self_attn.out_proj": "encoder.layers.*.attention.out_proj", "self_attn_layer_norm": "encoder.layers.*.layer_norm", "fc1": "encoder.layers.*.feed_forward.intermediate_dense", "fc2": "encoder.layers.*.feed_forward.output_dense", "final_layer_norm": "encoder.layers.*.final_layer_norm", "encoder.layer_norm": "encoder.layer_norm", "w2v_model.layer_norm": "feature_projection.layer_norm", "w2v_encoder.proj": "lm_head", "mask_emb": "masked_spec_embed", } def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ): """simple docstring""" for attribute in key.split("." ): __magic_name__ : Dict = getattr(lowerCamelCase_ , lowerCamelCase_ ) if weight_type is not None: __magic_name__ : int = getattr(lowerCamelCase_ , lowerCamelCase_ ).shape else: __magic_name__ : Optional[Any] = hf_pointer.shape assert hf_shape == value.shape, ( F"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be""" F""" {value.shape} for {full_name}""" ) if weight_type == "weight": __magic_name__ : Optional[int] = value elif weight_type == "weight_g": __magic_name__ : str = value elif weight_type == "weight_v": __magic_name__ : Optional[Any] = value elif weight_type == "bias": __magic_name__ : str = value else: __magic_name__ : int = value logger.info(F"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" ) def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ): """simple docstring""" __magic_name__ : str = [] __magic_name__ : int = fairseq_model.state_dict() __magic_name__ : Dict = hf_model.hubert.feature_extractor if is_finetuned else hf_model.feature_extractor for name, value in fairseq_dict.items(): __magic_name__ : Tuple = False if "conv_layers" in name: load_conv_layer( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , hf_model.config.feat_extract_norm == "group" , ) __magic_name__ : List[Any] = True else: for key, mapped_key in MAPPING.items(): __magic_name__ : List[str] = 'hubert.' + mapped_key if (is_finetuned and mapped_key != 'lm_head') else mapped_key if key in name or (key.split("w2v_model." )[-1] == name.split("." )[0] and not is_finetuned): __magic_name__ : List[str] = True if "*" in mapped_key: __magic_name__ : Optional[Any] = name.split(lowerCamelCase_ )[0].split("." )[-2] __magic_name__ : Optional[int] = mapped_key.replace("*" , lowerCamelCase_ ) if "weight_g" in name: __magic_name__ : Optional[int] = 'weight_g' elif "weight_v" in name: __magic_name__ : List[str] = 'weight_v' elif "weight" in name: __magic_name__ : str = 'weight' elif "bias" in name: __magic_name__ : Dict = 'bias' else: __magic_name__ : Tuple = None set_recursively(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) continue if not is_used: unused_weights.append(lowerCamelCase_ ) logger.warning(F"""Unused weights: {unused_weights}""" ) def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ): """simple docstring""" __magic_name__ : Union[str, Any] = full_name.split("conv_layers." )[-1] __magic_name__ : Union[str, Any] = name.split("." ) __magic_name__ : List[Any] = int(items[0] ) __magic_name__ : List[str] = int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" ) __magic_name__ : List[str] = value logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" ) __magic_name__ : Tuple = value logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( F"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was""" " found." ) __magic_name__ : str = value logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.""" ) __magic_name__ : List[str] = value logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) else: unused_weights.append(lowerCamelCase_ ) @torch.no_grad() def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=True ): """simple docstring""" if config_path is not None: __magic_name__ : Optional[Any] = HubertConfig.from_pretrained(lowerCamelCase_ ) else: __magic_name__ : str = HubertConfig() if is_finetuned: if dict_path: __magic_name__ : Dict = Dictionary.load(lowerCamelCase_ ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq __magic_name__ : List[Any] = target_dict.pad_index __magic_name__ : List[Any] = target_dict.bos_index __magic_name__ : Tuple = target_dict.eos_index __magic_name__ : List[str] = len(target_dict.symbols ) __magic_name__ : Union[str, Any] = os.path.join(lowerCamelCase_ , "vocab.json" ) if not os.path.isdir(lowerCamelCase_ ): logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(lowerCamelCase_ ) ) return os.makedirs(lowerCamelCase_ , exist_ok=lowerCamelCase_ ) with open(lowerCamelCase_ , "w" , encoding="utf-8" ) as vocab_handle: json.dump(target_dict.indices , lowerCamelCase_ ) __magic_name__ : Optional[int] = WavaVecaCTCTokenizer( lowerCamelCase_ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="|" , do_lower_case=lowerCamelCase_ , ) __magic_name__ : Dict = True if config.feat_extract_norm == 'layer' else False __magic_name__ : Tuple = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=lowerCamelCase_ , return_attention_mask=lowerCamelCase_ , ) __magic_name__ : List[Any] = WavaVecaProcessor(feature_extractor=lowerCamelCase_ , tokenizer=lowerCamelCase_ ) processor.save_pretrained(lowerCamelCase_ ) __magic_name__ : Union[str, Any] = HubertForCTC(lowerCamelCase_ ) else: __magic_name__ : int = HubertModel(lowerCamelCase_ ) if is_finetuned: __magic_name__ : Any = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} ) else: __magic_name__ : Tuple = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] ) __magic_name__ : Optional[Any] = model[0].eval() recursively_load_weights(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) hf_wavavec.save_pretrained(lowerCamelCase_ ) if __name__ == "__main__": _SCREAMING_SNAKE_CASE : Tuple = argparse.ArgumentParser() parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint") parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model") parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert") parser.add_argument( "--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not" ) _SCREAMING_SNAKE_CASE : Optional[Any] = parser.parse_args() convert_hubert_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned )
436
from __future__ import annotations from sys import maxsize from typing import Generic, TypeVar SCREAMING_SNAKE_CASE__ = TypeVar('''T''') def UpperCAmelCase__ ( lowerCamelCase_ : int ): return (position - 1) // 2 def UpperCAmelCase__ ( lowerCamelCase_ : int ): return (2 * position) + 1 def UpperCAmelCase__ ( lowerCamelCase_ : int ): return (2 * position) + 2 class _UpperCamelCase( Generic[T] ): def __init__( self : List[str] ): '''simple docstring''' __a : list[tuple[T, int]] = [] __a : dict[T, int] = {} __a : int = 0 def __len__( self : Any ): '''simple docstring''' return self.elements def __repr__( self : Any ): '''simple docstring''' return str(self.heap ) def __lowerCAmelCase ( self : Optional[Any] ): '''simple docstring''' return self.elements == 0 def __lowerCAmelCase ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : T , SCREAMING_SNAKE_CASE__ : int ): '''simple docstring''' self.heap.append((elem, weight) ) __a : List[Any] = self.elements self.elements += 1 self._bubble_up(SCREAMING_SNAKE_CASE__ ) def __lowerCAmelCase ( self : Tuple ): '''simple docstring''' if self.elements > 1: self._swap_nodes(0 , self.elements - 1 ) __a , __a : Union[str, Any] = self.heap.pop() del self.position_map[elem] self.elements -= 1 if self.elements > 0: __a , __a : Dict = self.heap[0] self._bubble_down(SCREAMING_SNAKE_CASE__ ) return elem def __lowerCAmelCase ( self : List[str] , SCREAMING_SNAKE_CASE__ : T , SCREAMING_SNAKE_CASE__ : int ): '''simple docstring''' __a : List[Any] = self.position_map[elem] __a : str = (elem, weight) if position > 0: __a : Tuple = get_parent_position(SCREAMING_SNAKE_CASE__ ) __a , __a : Dict = self.heap[parent_position] if parent_weight > weight: self._bubble_up(SCREAMING_SNAKE_CASE__ ) else: self._bubble_down(SCREAMING_SNAKE_CASE__ ) else: self._bubble_down(SCREAMING_SNAKE_CASE__ ) def __lowerCAmelCase ( self : Any , SCREAMING_SNAKE_CASE__ : T ): '''simple docstring''' __a : List[Any] = self.position_map[elem] if curr_pos == 0: return None __a : List[str] = get_parent_position(SCREAMING_SNAKE_CASE__ ) __a , __a : str = self.heap[curr_pos] __a , __a : Optional[int] = self.heap[parent_position] if parent_weight > weight: self._swap_nodes(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) return self._bubble_up(SCREAMING_SNAKE_CASE__ ) return None def __lowerCAmelCase ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : T ): '''simple docstring''' __a : int = self.position_map[elem] __a , __a : Optional[Any] = self.heap[curr_pos] __a : Tuple = get_child_left_position(SCREAMING_SNAKE_CASE__ ) __a : Optional[Any] = get_child_right_position(SCREAMING_SNAKE_CASE__ ) if child_left_position < self.elements and child_right_position < self.elements: __a , __a : str = self.heap[child_left_position] __a , __a : List[str] = self.heap[child_right_position] if child_right_weight < child_left_weight and child_right_weight < weight: self._swap_nodes(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) return self._bubble_down(SCREAMING_SNAKE_CASE__ ) if child_left_position < self.elements: __a , __a : Any = self.heap[child_left_position] if child_left_weight < weight: self._swap_nodes(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) return self._bubble_down(SCREAMING_SNAKE_CASE__ ) else: return None if child_right_position < self.elements: __a , __a : Union[str, Any] = self.heap[child_right_position] if child_right_weight < weight: self._swap_nodes(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) return self._bubble_down(SCREAMING_SNAKE_CASE__ ) return None def __lowerCAmelCase ( self : List[Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ): '''simple docstring''' __a : Optional[Any] = self.heap[nodea_pos][0] __a : str = self.heap[nodea_pos][0] __a , __a : int = ( self.heap[nodea_pos], self.heap[nodea_pos], ) __a : str = nodea_pos __a : Optional[int] = nodea_pos class _UpperCamelCase( Generic[T] ): def __init__( self : List[Any] ): '''simple docstring''' __a : dict[T, dict[T, int]] = {} __a : int = 0 def __repr__( self : Tuple ): '''simple docstring''' return str(self.connections ) def __len__( self : Dict ): '''simple docstring''' return self.nodes def __lowerCAmelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : T ): '''simple docstring''' if node not in self.connections: __a : Tuple = {} self.nodes += 1 def __lowerCAmelCase ( self : Any , SCREAMING_SNAKE_CASE__ : T , SCREAMING_SNAKE_CASE__ : T , SCREAMING_SNAKE_CASE__ : int ): '''simple docstring''' self.add_node(SCREAMING_SNAKE_CASE__ ) self.add_node(SCREAMING_SNAKE_CASE__ ) __a : Optional[Any] = weight __a : Any = weight def UpperCAmelCase__ ( lowerCamelCase_ : GraphUndirectedWeighted[T] , ): __a : dict[T, int] = {node: maxsize for node in graph.connections} __a : dict[T, T | None] = {node: None for node in graph.connections} __a : MinPriorityQueue[T] = MinPriorityQueue() for node, weight in dist.items(): priority_queue.push(lowerCamelCase_ , lowerCamelCase_ ) if priority_queue.is_empty(): return dist, parent # initialization __a : Optional[int] = priority_queue.extract_min() __a : int = 0 for neighbour in graph.connections[node]: if dist[neighbour] > dist[node] + graph.connections[node][neighbour]: __a : str = dist[node] + graph.connections[node][neighbour] priority_queue.update_key(lowerCamelCase_ , dist[neighbour] ) __a : Optional[int] = node # running prim's algorithm while not priority_queue.is_empty(): __a : Any = priority_queue.extract_min() for neighbour in graph.connections[node]: if dist[neighbour] > dist[node] + graph.connections[node][neighbour]: __a : Tuple = dist[node] + graph.connections[node][neighbour] priority_queue.update_key(lowerCamelCase_ , dist[neighbour] ) __a : Dict = node return dist, parent
47
0
'''simple docstring''' import os from shutil import copyfile from typing import List, Optional, Tuple from tokenizers import processors from ...tokenization_utils import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_nllb import NllbTokenizer else: _lowercase : str = None _lowercase : Dict = logging.get_logger(__name__) _lowercase : List[str] = {"""vocab_file""": """sentencepiece.bpe.model""", """tokenizer_file""": """tokenizer.json"""} _lowercase : int = { """vocab_file""": { """facebook/nllb-200-distilled-600M""": ( """https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model""" ), }, """tokenizer_file""": { """facebook/nllb-200-distilled-600M""": ( """https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json""" ), }, } _lowercase : Any = { """facebook/nllb-large-en-ro""": 1024, """facebook/nllb-200-distilled-600M""": 1024, } # fmt: off _lowercase : Any = ["""ace_Arab""", """ace_Latn""", """acm_Arab""", """acq_Arab""", """aeb_Arab""", """afr_Latn""", """ajp_Arab""", """aka_Latn""", """amh_Ethi""", """apc_Arab""", """arb_Arab""", """ars_Arab""", """ary_Arab""", """arz_Arab""", """asm_Beng""", """ast_Latn""", """awa_Deva""", """ayr_Latn""", """azb_Arab""", """azj_Latn""", """bak_Cyrl""", """bam_Latn""", """ban_Latn""", """bel_Cyrl""", """bem_Latn""", """ben_Beng""", """bho_Deva""", """bjn_Arab""", """bjn_Latn""", """bod_Tibt""", """bos_Latn""", """bug_Latn""", """bul_Cyrl""", """cat_Latn""", """ceb_Latn""", """ces_Latn""", """cjk_Latn""", """ckb_Arab""", """crh_Latn""", """cym_Latn""", """dan_Latn""", """deu_Latn""", """dik_Latn""", """dyu_Latn""", """dzo_Tibt""", """ell_Grek""", """eng_Latn""", """epo_Latn""", """est_Latn""", """eus_Latn""", """ewe_Latn""", """fao_Latn""", """pes_Arab""", """fij_Latn""", """fin_Latn""", """fon_Latn""", """fra_Latn""", """fur_Latn""", """fuv_Latn""", """gla_Latn""", """gle_Latn""", """glg_Latn""", """grn_Latn""", """guj_Gujr""", """hat_Latn""", """hau_Latn""", """heb_Hebr""", """hin_Deva""", """hne_Deva""", """hrv_Latn""", """hun_Latn""", """hye_Armn""", """ibo_Latn""", """ilo_Latn""", """ind_Latn""", """isl_Latn""", """ita_Latn""", """jav_Latn""", """jpn_Jpan""", """kab_Latn""", """kac_Latn""", """kam_Latn""", """kan_Knda""", """kas_Arab""", """kas_Deva""", """kat_Geor""", """knc_Arab""", """knc_Latn""", """kaz_Cyrl""", """kbp_Latn""", """kea_Latn""", """khm_Khmr""", """kik_Latn""", """kin_Latn""", """kir_Cyrl""", """kmb_Latn""", """kon_Latn""", """kor_Hang""", """kmr_Latn""", """lao_Laoo""", """lvs_Latn""", """lij_Latn""", """lim_Latn""", """lin_Latn""", """lit_Latn""", """lmo_Latn""", """ltg_Latn""", """ltz_Latn""", """lua_Latn""", """lug_Latn""", """luo_Latn""", """lus_Latn""", """mag_Deva""", """mai_Deva""", """mal_Mlym""", """mar_Deva""", """min_Latn""", """mkd_Cyrl""", """plt_Latn""", """mlt_Latn""", """mni_Beng""", """khk_Cyrl""", """mos_Latn""", """mri_Latn""", """zsm_Latn""", """mya_Mymr""", """nld_Latn""", """nno_Latn""", """nob_Latn""", """npi_Deva""", """nso_Latn""", """nus_Latn""", """nya_Latn""", """oci_Latn""", """gaz_Latn""", """ory_Orya""", """pag_Latn""", """pan_Guru""", """pap_Latn""", """pol_Latn""", """por_Latn""", """prs_Arab""", """pbt_Arab""", """quy_Latn""", """ron_Latn""", """run_Latn""", """rus_Cyrl""", """sag_Latn""", """san_Deva""", """sat_Beng""", """scn_Latn""", """shn_Mymr""", """sin_Sinh""", """slk_Latn""", """slv_Latn""", """smo_Latn""", """sna_Latn""", """snd_Arab""", """som_Latn""", """sot_Latn""", """spa_Latn""", """als_Latn""", """srd_Latn""", """srp_Cyrl""", """ssw_Latn""", """sun_Latn""", """swe_Latn""", """swh_Latn""", """szl_Latn""", """tam_Taml""", """tat_Cyrl""", """tel_Telu""", """tgk_Cyrl""", """tgl_Latn""", """tha_Thai""", """tir_Ethi""", """taq_Latn""", """taq_Tfng""", """tpi_Latn""", """tsn_Latn""", """tso_Latn""", """tuk_Latn""", """tum_Latn""", """tur_Latn""", """twi_Latn""", """tzm_Tfng""", """uig_Arab""", """ukr_Cyrl""", """umb_Latn""", """urd_Arab""", """uzn_Latn""", """vec_Latn""", """vie_Latn""", """war_Latn""", """wol_Latn""", """xho_Latn""", """ydd_Hebr""", """yor_Latn""", """yue_Hant""", """zho_Hans""", """zho_Hant""", """zul_Latn"""] class UpperCamelCase__( __lowerCamelCase ): __magic_name__ : Tuple = VOCAB_FILES_NAMES __magic_name__ : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __magic_name__ : List[Any] = PRETRAINED_VOCAB_FILES_MAP __magic_name__ : Optional[int] = ['''input_ids''', '''attention_mask'''] __magic_name__ : Union[str, Any] = NllbTokenizer __magic_name__ : List[int] = [] __magic_name__ : List[int] = [] def __init__( self : Union[str, Any] , lowerCAmelCase : Any=None , lowerCAmelCase : Dict=None , lowerCAmelCase : Any="<s>" , lowerCAmelCase : Dict="</s>" , lowerCAmelCase : Union[str, Any]="</s>" , lowerCAmelCase : Optional[Any]="<s>" , lowerCAmelCase : List[Any]="<unk>" , lowerCAmelCase : Optional[Any]="<pad>" , lowerCAmelCase : Optional[int]="<mask>" , lowerCAmelCase : Optional[int]=None , lowerCAmelCase : Dict=None , lowerCAmelCase : Any=None , lowerCAmelCase : Optional[Any]=False , **lowerCAmelCase : Union[str, Any] , )-> List[Any]: """simple docstring""" UpperCAmelCase = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else mask_token UpperCAmelCase = legacy_behaviour super().__init__( vocab_file=SCREAMING_SNAKE_CASE__ , tokenizer_file=SCREAMING_SNAKE_CASE__ , bos_token=SCREAMING_SNAKE_CASE__ , eos_token=SCREAMING_SNAKE_CASE__ , sep_token=SCREAMING_SNAKE_CASE__ , cls_token=SCREAMING_SNAKE_CASE__ , unk_token=SCREAMING_SNAKE_CASE__ , pad_token=SCREAMING_SNAKE_CASE__ , mask_token=SCREAMING_SNAKE_CASE__ , src_lang=SCREAMING_SNAKE_CASE__ , tgt_lang=SCREAMING_SNAKE_CASE__ , additional_special_tokens=SCREAMING_SNAKE_CASE__ , legacy_behaviour=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , ) UpperCAmelCase = vocab_file UpperCAmelCase = False if not self.vocab_file else True UpperCAmelCase = FAIRSEQ_LANGUAGE_CODES.copy() if additional_special_tokens is not None: # Only add those special tokens if they are not already there. _additional_special_tokens.extend( [t for t in additional_special_tokens if t not in _additional_special_tokens] ) self.add_special_tokens({'''additional_special_tokens''': _additional_special_tokens} ) UpperCAmelCase = { lang_code: self.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ ) for lang_code in FAIRSEQ_LANGUAGE_CODES } UpperCAmelCase = src_lang if src_lang is not None else 'eng_Latn' UpperCAmelCase = self.convert_tokens_to_ids(self._src_lang ) UpperCAmelCase = tgt_lang self.set_src_lang_special_tokens(self._src_lang ) @property def a__( self : Tuple )-> Dict: """simple docstring""" return self._src_lang @src_lang.setter def a__( self : Union[str, Any] , lowerCAmelCase : str )-> Union[str, Any]: """simple docstring""" UpperCAmelCase = new_src_lang self.set_src_lang_special_tokens(self._src_lang ) def a__( self : List[Any] , lowerCAmelCase : List[int] , lowerCAmelCase : Optional[List[int]] = None )-> Optional[int]: """simple docstring""" if token_ids_a is None: return self.prefix_tokens + token_ids_a + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens def a__( self : List[Any] , lowerCAmelCase : List[int] , lowerCAmelCase : Optional[List[int]] = None )-> Optional[int]: """simple docstring""" UpperCAmelCase = [self.sep_token_id] UpperCAmelCase = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def a__( self : Union[str, Any] , lowerCAmelCase : List[str] , lowerCAmelCase : str , lowerCAmelCase : Optional[str] , lowerCAmelCase : Optional[str] , **lowerCAmelCase : Any )-> Union[str, Any]: """simple docstring""" if src_lang is None or tgt_lang is None: raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' ) UpperCAmelCase = src_lang UpperCAmelCase = self(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) UpperCAmelCase = self.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ ) UpperCAmelCase = tgt_lang_id return inputs def a__( self : int , lowerCAmelCase : List[str] , lowerCAmelCase : str = "eng_Latn" , lowerCAmelCase : Optional[List[str]] = None , lowerCAmelCase : str = "fra_Latn" , **lowerCAmelCase : Optional[int] , )-> Union[str, Any]: """simple docstring""" UpperCAmelCase = src_lang UpperCAmelCase = tgt_lang return super().prepare_seqaseq_batch(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) def a__( self : List[Any] )-> Dict: """simple docstring""" return self.set_src_lang_special_tokens(self.src_lang ) def a__( self : Dict )-> Optional[Any]: """simple docstring""" return self.set_tgt_lang_special_tokens(self.tgt_lang ) def a__( self : Optional[int] , lowerCAmelCase : int )-> int: """simple docstring""" UpperCAmelCase = self.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ ) if self.legacy_behaviour: UpperCAmelCase = [] UpperCAmelCase = [self.eos_token_id, self.cur_lang_code] else: UpperCAmelCase = [self.cur_lang_code] UpperCAmelCase = [self.eos_token_id] UpperCAmelCase = self.convert_ids_to_tokens(self.prefix_tokens ) UpperCAmelCase = self.convert_ids_to_tokens(self.suffix_tokens ) UpperCAmelCase = processors.TemplateProcessing( single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , ) def a__( self : Any , lowerCAmelCase : str )-> Optional[Any]: """simple docstring""" UpperCAmelCase = self.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ ) if self.legacy_behaviour: UpperCAmelCase = [] UpperCAmelCase = [self.eos_token_id, self.cur_lang_code] else: UpperCAmelCase = [self.cur_lang_code] UpperCAmelCase = [self.eos_token_id] UpperCAmelCase = self.convert_ids_to_tokens(self.prefix_tokens ) UpperCAmelCase = self.convert_ids_to_tokens(self.suffix_tokens ) UpperCAmelCase = processors.TemplateProcessing( single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , ) def a__( self : Union[str, Any] , lowerCAmelCase : str , lowerCAmelCase : Optional[str] = None )-> Optional[Any]: """simple docstring""" if not self.can_save_slow_tokenizer: raise ValueError( '''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow ''' '''tokenizer.''' ) if not os.path.isdir(SCREAMING_SNAKE_CASE__ ): logger.error(F"""Vocabulary path ({save_directory}) should be a directory.""" ) return UpperCAmelCase = os.path.join( SCREAMING_SNAKE_CASE__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE__ ): copyfile(self.vocab_file , SCREAMING_SNAKE_CASE__ ) return (out_vocab_file,)
210
from collections.abc import Sequence from queue import Queue class _UpperCamelCase: def __init__( self : Tuple , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : int=None , SCREAMING_SNAKE_CASE__ : Tuple=None ): '''simple docstring''' __a : Tuple = start __a : Dict = end __a : List[str] = val __a : List[Any] = (start + end) // 2 __a : Optional[Any] = left __a : List[str] = right def __repr__( self : Dict ): '''simple docstring''' return f'''SegmentTreeNode(start={self.start}, end={self.end}, val={self.val})''' class _UpperCamelCase: def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : Sequence , SCREAMING_SNAKE_CASE__ : Optional[Any] ): '''simple docstring''' __a : Tuple = collection __a : Dict = function if self.collection: __a : int = self._build_tree(0 , len(SCREAMING_SNAKE_CASE__ ) - 1 ) def __lowerCAmelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : int ): '''simple docstring''' self._update_tree(self.root , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) def __lowerCAmelCase ( self : str , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[str] ): '''simple docstring''' return self._query_range(self.root , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) def __lowerCAmelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Dict ): '''simple docstring''' if start == end: return SegmentTreeNode(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , self.collection[start] ) __a : Tuple = (start + end) // 2 __a : Optional[int] = self._build_tree(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) __a : Tuple = self._build_tree(mid + 1 , SCREAMING_SNAKE_CASE__ ) return SegmentTreeNode(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , self.fn(left.val , right.val ) , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) def __lowerCAmelCase ( self : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : List[Any] ): '''simple docstring''' if node.start == i and node.end == i: __a : Optional[Any] = val return if i <= node.mid: self._update_tree(node.left , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else: self._update_tree(node.right , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) __a : int = self.fn(node.left.val , node.right.val ) def __lowerCAmelCase ( self : Any , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[Any] ): '''simple docstring''' if node.start == i and node.end == j: return node.val if i <= node.mid: if j <= node.mid: # range in left child tree return self._query_range(node.left , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else: # range in left child tree and right child tree return self.fn( self._query_range(node.left , SCREAMING_SNAKE_CASE__ , node.mid ) , self._query_range(node.right , node.mid + 1 , SCREAMING_SNAKE_CASE__ ) , ) else: # range in right child tree return self._query_range(node.right , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) def __lowerCAmelCase ( self : List[Any] ): '''simple docstring''' if self.root is not None: __a : Tuple = Queue() queue.put(self.root ) while not queue.empty(): __a : Tuple = queue.get() yield node if node.left is not None: queue.put(node.left ) if node.right is not None: queue.put(node.right ) if __name__ == "__main__": import operator for fn in [operator.add, max, min]: print('''*''' * 50) SCREAMING_SNAKE_CASE__ = SegmentTree([2, 1, 5, 3, 4], fn) for node in arr.traverse(): print(node) print() arr.update(1, 5) for node in arr.traverse(): print(node) print() print(arr.query_range(3, 4)) # 7 print(arr.query_range(2, 2)) # 5 print(arr.query_range(1, 3)) # 13 print()
47
0
import pickle import unittest import torch from accelerate import Accelerator from accelerate.state import AcceleratorState from accelerate.test_utils import require_cpu @require_cpu class A( unittest.TestCase ): '''simple docstring''' def a__ ( self : Union[str, Any] ) -> Dict: """simple docstring""" lowerCamelCase_ = torch.nn.Linear(10 , 10 ) lowerCamelCase_ = torch.optim.SGD(model.parameters() , 0.1 ) lowerCamelCase_ = Accelerator() lowerCamelCase_ = accelerator.prepare(SCREAMING_SNAKE_CASE__ ) try: pickle.loads(pickle.dumps(SCREAMING_SNAKE_CASE__ ) ) except Exception as e: self.fail(f"""Accelerated optimizer pickling failed with {e}""" ) AcceleratorState._reset_state()
70
import os import posixpath import uuid from dataclasses import dataclass from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union import numpy as np import pyarrow as pa import datasets from datasets.arrow_writer import ArrowWriter, ParquetWriter from datasets.config import MAX_SHARD_SIZE from datasets.filesystems import ( is_remote_filesystem, rename, ) from datasets.iterable_dataset import _BaseExamplesIterable from datasets.utils.py_utils import convert_file_size_to_int SCREAMING_SNAKE_CASE__ = datasets.utils.logging.get_logger(__name__) if TYPE_CHECKING: import pyspark @dataclass class _UpperCamelCase( datasets.BuilderConfig ): __SCREAMING_SNAKE_CASE : Optional[datasets.Features] = None def UpperCAmelCase__ ( lowerCamelCase_ : "pyspark.sql.DataFrame" , lowerCamelCase_ : List[int] , ): import pyspark def generate_fn(): __a : List[Any] = df.select('*' , pyspark.sql.functions.spark_partition_id().alias('part_id' ) ) for partition_id in partition_order: __a : Optional[int] = df_with_partition_id.select('*' ).where(f'''part_id = {partition_id}''' ).drop('part_id' ) __a : Optional[Any] = partition_df.collect() __a : Union[str, Any] = 0 for row in rows: yield f'''{partition_id}_{row_id}''', row.asDict() row_id += 1 return generate_fn class _UpperCamelCase( _BaseExamplesIterable ): def __init__( self : Any , SCREAMING_SNAKE_CASE__ : "pyspark.sql.DataFrame" , SCREAMING_SNAKE_CASE__ : Dict=None , ): '''simple docstring''' __a : List[str] = df __a : Tuple = partition_order or range(self.df.rdd.getNumPartitions() ) __a : List[Any] = _generate_iterable_examples(self.df , self.partition_order ) def __iter__( self : Tuple ): '''simple docstring''' yield from self.generate_examples_fn() def __lowerCAmelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : np.random.Generator ): '''simple docstring''' __a : Union[str, Any] = list(range(self.df.rdd.getNumPartitions() ) ) generator.shuffle(SCREAMING_SNAKE_CASE__ ) return SparkExamplesIterable(self.df , partition_order=SCREAMING_SNAKE_CASE__ ) def __lowerCAmelCase ( self : str , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ): '''simple docstring''' __a : Union[str, Any] = self.split_shard_indices_by_worker(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) return SparkExamplesIterable(self.df , partition_order=SCREAMING_SNAKE_CASE__ ) @property def __lowerCAmelCase ( self : Dict ): '''simple docstring''' return len(self.partition_order ) class _UpperCamelCase( datasets.DatasetBuilder ): __SCREAMING_SNAKE_CASE : List[str] = SparkConfig def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : "pyspark.sql.DataFrame" , SCREAMING_SNAKE_CASE__ : str = None , SCREAMING_SNAKE_CASE__ : str = None , **SCREAMING_SNAKE_CASE__ : Optional[int] , ): '''simple docstring''' import pyspark __a : int = pyspark.sql.SparkSession.builder.getOrCreate() __a : Optional[int] = df __a : List[Any] = working_dir super().__init__( cache_dir=SCREAMING_SNAKE_CASE__ , config_name=str(self.df.semanticHash() ) , **SCREAMING_SNAKE_CASE__ , ) def __lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' def create_cache_and_write_probe(SCREAMING_SNAKE_CASE__ : List[str] ): # makedirs with exist_ok will recursively create the directory. It will not throw an error if directories # already exist. os.makedirs(self._cache_dir , exist_ok=SCREAMING_SNAKE_CASE__ ) __a : List[Any] = os.path.join(self._cache_dir , 'fs_test' + uuid.uuida().hex ) # Opening the file in append mode will create a new file unless it already exists, in which case it will not # change the file contents. open(SCREAMING_SNAKE_CASE__ , 'a' ) return [probe_file] if self._spark.conf.get('spark.master' , '' ).startswith('local' ): return # If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS # accessible to the driver. # TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error. if self._cache_dir: __a : List[Any] = ( self._spark.sparkContext.parallelize(range(1 ) , 1 ).mapPartitions(SCREAMING_SNAKE_CASE__ ).collect() ) if os.path.isfile(probe[0] ): return raise ValueError( 'When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir' ) def __lowerCAmelCase ( self : List[str] ): '''simple docstring''' return datasets.DatasetInfo(features=self.config.features ) def __lowerCAmelCase ( self : int , SCREAMING_SNAKE_CASE__ : datasets.download.download_manager.DownloadManager ): '''simple docstring''' return [datasets.SplitGenerator(name=datasets.Split.TRAIN )] def __lowerCAmelCase ( self : List[str] , SCREAMING_SNAKE_CASE__ : Any ): '''simple docstring''' import pyspark def get_arrow_batch_size(SCREAMING_SNAKE_CASE__ : int ): for batch in it: yield pa.RecordBatch.from_pydict({'batch_bytes': [batch.nbytes]} ) __a : List[str] = self.df.count() __a : Dict = df_num_rows if df_num_rows <= 1_0_0 else 1_0_0 # Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample. __a : List[str] = ( self.df.limit(SCREAMING_SNAKE_CASE__ ) .repartition(1 ) .mapInArrow(SCREAMING_SNAKE_CASE__ , 'batch_bytes: long' ) .agg(pyspark.sql.functions.sum('batch_bytes' ).alias('sample_bytes' ) ) .collect()[0] .sample_bytes / sample_num_rows ) __a : Dict = approx_bytes_per_row * df_num_rows if approx_total_size > max_shard_size: # Make sure there is at least one row per partition. __a : Union[str, Any] = min(SCREAMING_SNAKE_CASE__ , int(approx_total_size / max_shard_size ) ) __a : int = self.df.repartition(SCREAMING_SNAKE_CASE__ ) def __lowerCAmelCase ( self : Tuple , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : int , ): '''simple docstring''' import pyspark __a : Any = ParquetWriter if file_format == 'parquet' else ArrowWriter __a : Union[str, Any] = os.path.join(self._working_dir , os.path.basename(SCREAMING_SNAKE_CASE__ ) ) if self._working_dir else fpath __a : Optional[int] = file_format == 'parquet' # Define these so that we don't reference self in write_arrow, which will result in a pickling error due to # pickling the SparkContext. __a : List[str] = self.config.features __a : int = self._writer_batch_size __a : Union[str, Any] = self._fs.storage_options def write_arrow(SCREAMING_SNAKE_CASE__ : Optional[int] ): # Within the same SparkContext, no two task attempts will share the same attempt ID. __a : Any = pyspark.TaskContext().taskAttemptId() __a : str = next(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) if first_batch is None: # Some partitions might not receive any data. return pa.RecordBatch.from_arrays( [[task_id], [0], [0]] , names=['task_id', 'num_examples', 'num_bytes'] , ) __a : Any = 0 __a : List[str] = writer_class( features=SCREAMING_SNAKE_CASE__ , path=working_fpath.replace('SSSSS' , f'''{shard_id:05d}''' ).replace('TTTTT' , f'''{task_id:05d}''' ) , writer_batch_size=SCREAMING_SNAKE_CASE__ , storage_options=SCREAMING_SNAKE_CASE__ , embed_local_files=SCREAMING_SNAKE_CASE__ , ) __a : Optional[Any] = pa.Table.from_batches([first_batch] ) writer.write_table(SCREAMING_SNAKE_CASE__ ) for batch in it: if max_shard_size is not None and writer._num_bytes >= max_shard_size: __a , __a : Optional[int] = writer.finalize() writer.close() yield pa.RecordBatch.from_arrays( [[task_id], [num_examples], [num_bytes]] , names=['task_id', 'num_examples', 'num_bytes'] , ) shard_id += 1 __a : Optional[Any] = writer_class( features=writer._features , path=working_fpath.replace('SSSSS' , f'''{shard_id:05d}''' ).replace('TTTTT' , f'''{task_id:05d}''' ) , writer_batch_size=SCREAMING_SNAKE_CASE__ , storage_options=SCREAMING_SNAKE_CASE__ , embed_local_files=SCREAMING_SNAKE_CASE__ , ) __a : Union[str, Any] = pa.Table.from_batches([batch] ) writer.write_table(SCREAMING_SNAKE_CASE__ ) if writer._num_bytes > 0: __a , __a : str = writer.finalize() writer.close() yield pa.RecordBatch.from_arrays( [[task_id], [num_examples], [num_bytes]] , names=['task_id', 'num_examples', 'num_bytes'] , ) if working_fpath != fpath: for file in os.listdir(os.path.dirname(SCREAMING_SNAKE_CASE__ ) ): __a : Any = os.path.join(os.path.dirname(SCREAMING_SNAKE_CASE__ ) , os.path.basename(SCREAMING_SNAKE_CASE__ ) ) shutil.move(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) __a : Dict = ( self.df.mapInArrow(SCREAMING_SNAKE_CASE__ , 'task_id: long, num_examples: long, num_bytes: long' ) .groupBy('task_id' ) .agg( pyspark.sql.functions.sum('num_examples' ).alias('total_num_examples' ) , pyspark.sql.functions.sum('num_bytes' ).alias('total_num_bytes' ) , pyspark.sql.functions.count('num_bytes' ).alias('num_shards' ) , pyspark.sql.functions.collect_list('num_examples' ).alias('shard_lengths' ) , ) .collect() ) for row in stats: yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths) def __lowerCAmelCase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : "datasets.SplitGenerator" , SCREAMING_SNAKE_CASE__ : str = "arrow" , SCREAMING_SNAKE_CASE__ : Optional[Union[str, int]] = None , SCREAMING_SNAKE_CASE__ : Optional[int] = None , **SCREAMING_SNAKE_CASE__ : Optional[Any] , ): '''simple docstring''' self._validate_cache_dir() __a : List[str] = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE ) self._repartition_df_if_needed(SCREAMING_SNAKE_CASE__ ) __a : Union[str, Any] = not is_remote_filesystem(self._fs ) __a : Optional[Any] = os.path.join if is_local else posixpath.join __a : Any = '-TTTTT-SSSSS-of-NNNNN' __a : Union[str, Any] = f'''{self.name}-{split_generator.name}{SUFFIX}.{file_format}''' __a : Any = path_join(self._output_dir , SCREAMING_SNAKE_CASE__ ) __a : Any = 0 __a : Dict = 0 __a : int = 0 __a : List[str] = [] __a : Optional[int] = [] for task_id, content in self._prepare_split_single(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): ( ( __a ) , ( __a ) , ( __a ) , ( __a ) , ) : Optional[int] = content if num_bytes > 0: total_num_examples += num_examples total_num_bytes += num_bytes total_shards += num_shards task_id_and_num_shards.append((task_id, num_shards) ) all_shard_lengths.extend(SCREAMING_SNAKE_CASE__ ) __a : List[str] = total_num_examples __a : Optional[int] = total_num_bytes # should rename everything at the end logger.debug(f'''Renaming {total_shards} shards.''' ) if total_shards > 1: __a : Any = all_shard_lengths # Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a # pickling error due to pickling the SparkContext. __a : Dict = self._fs # use the -SSSSS-of-NNNNN pattern def _rename_shard( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , ): rename( SCREAMING_SNAKE_CASE__ , fpath.replace('SSSSS' , f'''{shard_id:05d}''' ).replace('TTTTT' , f'''{task_id:05d}''' ) , fpath.replace('TTTTT-SSSSS' , f'''{global_shard_id:05d}''' ).replace('NNNNN' , f'''{total_shards:05d}''' ) , ) __a : Union[str, Any] = [] __a : List[str] = 0 for i in range(len(SCREAMING_SNAKE_CASE__ ) ): __a , __a : Union[str, Any] = task_id_and_num_shards[i] for shard_id in range(SCREAMING_SNAKE_CASE__ ): args.append([task_id, shard_id, global_shard_id] ) global_shard_id += 1 self._spark.sparkContext.parallelize(SCREAMING_SNAKE_CASE__ , len(SCREAMING_SNAKE_CASE__ ) ).map(lambda SCREAMING_SNAKE_CASE__ : _rename_shard(*SCREAMING_SNAKE_CASE__ ) ).collect() else: # don't use any pattern __a : List[Any] = 0 __a : Any = task_id_and_num_shards[0][0] self._rename( fpath.replace('SSSSS' , f'''{shard_id:05d}''' ).replace('TTTTT' , f'''{task_id:05d}''' ) , fpath.replace(SCREAMING_SNAKE_CASE__ , '' ) , ) def __lowerCAmelCase ( self : str , SCREAMING_SNAKE_CASE__ : "datasets.SplitGenerator" , ): '''simple docstring''' return SparkExamplesIterable(self.df )
47
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_torch_available, ) __SCREAMING_SNAKE_CASE = { 'configuration_speecht5': [ 'SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP', 'SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP', 'SpeechT5Config', 'SpeechT5HifiGanConfig', ], 'feature_extraction_speecht5': ['SpeechT5FeatureExtractor'], 'processing_speecht5': ['SpeechT5Processor'], } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __SCREAMING_SNAKE_CASE = ['SpeechT5Tokenizer'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __SCREAMING_SNAKE_CASE = [ 'SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST', 'SpeechT5ForSpeechToText', 'SpeechT5ForSpeechToSpeech', 'SpeechT5ForTextToSpeech', 'SpeechT5Model', 'SpeechT5PreTrainedModel', 'SpeechT5HifiGan', ] if TYPE_CHECKING: from .configuration_speechta import ( SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP, SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP, SpeechTaConfig, SpeechTaHifiGanConfig, ) from .feature_extraction_speechta import SpeechTaFeatureExtractor from .processing_speechta import SpeechTaProcessor try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_speechta import SpeechTaTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_speechta import ( SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST, SpeechTaForSpeechToSpeech, SpeechTaForSpeechToText, SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaModel, SpeechTaPreTrainedModel, ) else: import sys __SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
553
import argparse import logging import os from datetime import datetime import numpy as np import torch from torch import nn from torch.utils.data import DataLoader, RandomSampler, TensorDataset from tqdm import tqdm from transformers import GPTaLMHeadModel SCREAMING_SNAKE_CASE__ = logging.getLogger(__name__) def UpperCAmelCase__ ( lowerCamelCase_ : str , lowerCamelCase_ : int ): # save results if os.path.exists(lowerCamelCase_ ): if os.path.exists(os.path.join(lowerCamelCase_ , 'config.json' ) ) and os.path.isfile( os.path.join(lowerCamelCase_ , 'config.json' ) ): os.remove(os.path.join(lowerCamelCase_ , 'config.json' ) ) if os.path.exists(os.path.join(lowerCamelCase_ , 'pytorch_model.bin' ) ) and os.path.isfile( os.path.join(lowerCamelCase_ , 'pytorch_model.bin' ) ): os.remove(os.path.join(lowerCamelCase_ , 'pytorch_model.bin' ) ) else: os.makedirs(lowerCamelCase_ ) model.save_pretrained(lowerCamelCase_ ) def UpperCAmelCase__ ( lowerCamelCase_ : int , lowerCamelCase_ : Any=False ): __a : Dict = 2 if unlogit: __a : Optional[Any] = torch.pow(lowerCamelCase_ , lowerCamelCase_ ) __a : Any = p * torch.log(lowerCamelCase_ ) __a : Union[str, Any] = 0 return -plogp.sum(dim=-1 ) def UpperCAmelCase__ ( lowerCamelCase_ : Any ): logger.info('lv, h >\t' + '\t'.join(f'''{x + 1}''' for x in range(len(lowerCamelCase_ ) ) ) ) for row in range(len(lowerCamelCase_ ) ): if tensor.dtype != torch.long: logger.info(f'''layer {row + 1}:\t''' + '\t'.join(f'''{x:.5f}''' for x in tensor[row].cpu().data ) ) else: logger.info(f'''layer {row + 1}:\t''' + '\t'.join(f'''{x:d}''' for x in tensor[row].cpu().data ) ) def UpperCAmelCase__ ( lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Any , lowerCamelCase_ : int , lowerCamelCase_ : int=True , lowerCamelCase_ : Optional[Any]=True , lowerCamelCase_ : List[Any]=None , lowerCamelCase_ : List[Any]=False ): __a , __a : Optional[int] = model.config.num_hidden_layers, model.config.num_attention_heads __a : str = torch.zeros(lowerCamelCase_ , lowerCamelCase_ ).to(args.device ) __a : int = torch.zeros(lowerCamelCase_ , lowerCamelCase_ ).to(args.device ) if head_mask is None: __a : Union[str, Any] = torch.ones(lowerCamelCase_ , lowerCamelCase_ ).to(args.device ) head_mask.requires_grad_(requires_grad=lowerCamelCase_ ) # If actually pruned attention multi-head, set head mask to None to avoid shape mismatch if actually_pruned: __a : Any = None __a : Optional[int] = 0.0 __a : Optional[Any] = 0.0 for step, inputs in enumerate(tqdm(lowerCamelCase_ , desc='Iteration' , disable=args.local_rank not in [-1, 0] ) ): __a : Dict = tuple(t.to(args.device ) for t in inputs ) ((__a) , ) : Dict = inputs # Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below) __a : List[Any] = model(lowerCamelCase_ , labels=lowerCamelCase_ , head_mask=lowerCamelCase_ ) # (loss), lm_logits, presents, (all hidden_states), (attentions) __a , __a , __a : int = ( outputs[0], outputs[1], outputs[-1], ) # Loss and logits are the first, attention the last loss.backward() # Backpropagate to populate the gradients in the head mask total_loss += loss.detach().cpu().numpy() if compute_entropy: for layer, attn in enumerate(lowerCamelCase_ ): __a : List[str] = entropy(attn.detach() , lowerCamelCase_ ) attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach() if compute_importance: head_importance += head_mask.grad.abs().detach() tot_tokens += torch.ones_like(lowerCamelCase_ ).float().detach().sum().data # Normalize attn_entropy /= tot_tokens head_importance /= tot_tokens # Layerwise importance normalization if not args.dont_normalize_importance_by_layer: __a : Optional[Any] = 2 __a : Union[str, Any] = torch.pow(torch.pow(lowerCamelCase_ , lowerCamelCase_ ).sum(-1 ) , 1 / exponent ) head_importance /= norm_by_layer.unsqueeze(-1 ) + 1e-20 if not args.dont_normalize_global_importance: __a : List[str] = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min()) # Print matrices if compute_entropy: logger.info('Attention entropies' ) print_ad_tensor(lowerCamelCase_ ) if compute_importance: logger.info('Head importance scores' ) print_ad_tensor(lowerCamelCase_ ) logger.info('Head ranked by importance scores' ) __a : Optional[Any] = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device ) __a : str = torch.arange( head_importance.numel() , device=args.device ) __a : Tuple = head_ranks.view_as(lowerCamelCase_ ) print_ad_tensor(lowerCamelCase_ ) return attn_entropy, head_importance, total_loss def UpperCAmelCase__ ( lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Tuple , lowerCamelCase_ : int ): __a , __a , __a : Optional[int] = compute_heads_importance(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , compute_entropy=lowerCamelCase_ ) __a : Tuple = 1 / loss # instead of downsteam score use the LM loss logger.info('Pruning: original score: %f, threshold: %f' , lowerCamelCase_ , original_score * args.masking_threshold ) __a : Tuple = torch.ones_like(lowerCamelCase_ ) __a : int = max(1 , int(new_head_mask.numel() * args.masking_amount ) ) __a : Tuple = original_score while current_score >= original_score * args.masking_threshold: __a : Optional[Any] = new_head_mask.clone().detach() # save current head mask # heads from least important to most - keep only not-masked heads __a : List[str] = float('Inf' ) __a : List[Any] = head_importance.view(-1 ).sort()[1] if len(lowerCamelCase_ ) <= num_to_mask: print('BREAK BY num_to_mask' ) break # mask heads __a : Any = current_heads_to_mask[:num_to_mask] logger.info('Heads to mask: %s' , str(current_heads_to_mask.tolist() ) ) __a : int = new_head_mask.view(-1 ) __a : Tuple = 0.0 __a : int = new_head_mask.view_as(lowerCamelCase_ ) __a : Optional[int] = new_head_mask.clone().detach() print_ad_tensor(lowerCamelCase_ ) # Compute metric and head importance again __a , __a , __a : int = compute_heads_importance( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , compute_entropy=lowerCamelCase_ , head_mask=lowerCamelCase_ ) __a : List[Any] = 1 / loss logger.info( 'Masking: current score: %f, remaining heads %d (%.1f percents)' , lowerCamelCase_ , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 1_0_0 , ) logger.info('Final head mask' ) print_ad_tensor(lowerCamelCase_ ) np.save(os.path.join(args.output_dir , 'head_mask.npy' ) , head_mask.detach().cpu().numpy() ) return head_mask def UpperCAmelCase__ ( lowerCamelCase_ : Dict , lowerCamelCase_ : Tuple , lowerCamelCase_ : Any , lowerCamelCase_ : Union[str, Any] ): __a : List[Any] = datetime.now() __a , __a , __a : List[str] = compute_heads_importance( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , compute_entropy=lowerCamelCase_ , compute_importance=lowerCamelCase_ , head_mask=lowerCamelCase_ ) __a : List[str] = 1 / loss __a : List[Any] = datetime.now() - before_time __a : List[str] = sum(p.numel() for p in model.parameters() ) __a : Dict = { layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(lowerCamelCase_ ) ) } for k, v in heads_to_prune.items(): if isinstance(lowerCamelCase_ , lowerCamelCase_ ): __a : Tuple = [ v, ] assert sum(len(lowerCamelCase_ ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item() model.prune_heads(lowerCamelCase_ ) __a : Optional[Any] = sum(p.numel() for p in model.parameters() ) __a : Tuple = datetime.now() __a , __a , __a : Tuple = compute_heads_importance( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , compute_entropy=lowerCamelCase_ , compute_importance=lowerCamelCase_ , head_mask=lowerCamelCase_ , actually_pruned=lowerCamelCase_ , ) __a : Optional[Any] = 1 / loss __a : List[Any] = datetime.now() - before_time logger.info( 'Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)' , lowerCamelCase_ , lowerCamelCase_ , pruned_num_params / original_num_params * 1_0_0 , ) logger.info('Pruning: score with masking: %f score with pruning: %f' , lowerCamelCase_ , lowerCamelCase_ ) logger.info('Pruning: speed ratio (original timing / new timing): %f percents' , original_time / new_time * 1_0_0 ) save_model(lowerCamelCase_ , args.output_dir ) def UpperCAmelCase__ ( ): __a : List[str] = argparse.ArgumentParser() # Required parameters parser.add_argument( '--data_dir' , default=lowerCamelCase_ , type=lowerCamelCase_ , required=lowerCamelCase_ , help='The input data dir. Should contain the .tsv files (or other data files) for the task.' , ) parser.add_argument( '--model_name_or_path' , default=lowerCamelCase_ , type=lowerCamelCase_ , required=lowerCamelCase_ , help='Path to pretrained model or model identifier from huggingface.co/models' , ) parser.add_argument( '--output_dir' , default=lowerCamelCase_ , type=lowerCamelCase_ , required=lowerCamelCase_ , help='The output directory where the model predictions and checkpoints will be written.' , ) # Other parameters parser.add_argument( '--config_name' , default='' , type=lowerCamelCase_ , help='Pretrained config name or path if not the same as model_name_or_path' , ) parser.add_argument( '--tokenizer_name' , default='' , type=lowerCamelCase_ , help='Pretrained tokenizer name or path if not the same as model_name_or_path' , ) parser.add_argument( '--cache_dir' , default=lowerCamelCase_ , type=lowerCamelCase_ , help='Where do you want to store the pre-trained models downloaded from s3' , ) parser.add_argument( '--data_subset' , type=lowerCamelCase_ , default=-1 , help='If > 0: limit the data to a subset of data_subset instances.' ) parser.add_argument( '--overwrite_output_dir' , action='store_true' , help='Whether to overwrite data in output directory' ) parser.add_argument( '--overwrite_cache' , action='store_true' , help='Overwrite the cached training and evaluation sets' ) parser.add_argument( '--dont_normalize_importance_by_layer' , action='store_true' , help='Don\'t normalize importance score by layers' ) parser.add_argument( '--dont_normalize_global_importance' , action='store_true' , help='Don\'t normalize all importance scores between 0 and 1' , ) parser.add_argument( '--try_masking' , action='store_true' , help='Whether to try to mask head until a threshold of accuracy.' ) parser.add_argument( '--masking_threshold' , default=0.9 , type=lowerCamelCase_ , help='masking threshold in term of metrics (stop masking when metric < threshold * original metric value).' , ) parser.add_argument( '--masking_amount' , default=0.1 , type=lowerCamelCase_ , help='Amount to heads to masking at each masking step.' ) parser.add_argument('--metric_name' , default='acc' , type=lowerCamelCase_ , help='Metric to use for head masking.' ) parser.add_argument( '--max_seq_length' , default=1_2_8 , type=lowerCamelCase_ , help=( 'The maximum total input sequence length after WordPiece tokenization. \n' 'Sequences longer than this will be truncated, sequences shorter padded.' ) , ) parser.add_argument('--batch_size' , default=1 , type=lowerCamelCase_ , help='Batch size.' ) parser.add_argument('--seed' , type=lowerCamelCase_ , default=4_2 ) parser.add_argument('--local_rank' , type=lowerCamelCase_ , default=-1 , help='local_rank for distributed training on gpus' ) parser.add_argument('--no_cuda' , action='store_true' , help='Whether not to use CUDA when available' ) parser.add_argument('--server_ip' , type=lowerCamelCase_ , default='' , help='Can be used for distant debugging.' ) parser.add_argument('--server_port' , type=lowerCamelCase_ , default='' , help='Can be used for distant debugging.' ) __a : Optional[Any] = parser.parse_args() if args.server_ip and args.server_port: # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script import ptvsd print('Waiting for debugger attach' ) ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=lowerCamelCase_ ) ptvsd.wait_for_attach() # Setup devices and distributed training if args.local_rank == -1 or args.no_cuda: __a : List[str] = torch.device('cuda' if torch.cuda.is_available() and not args.no_cuda else 'cpu' ) __a : Tuple = 0 if args.no_cuda else torch.cuda.device_count() else: torch.cuda.set_device(args.local_rank ) __a : Union[str, Any] = torch.device('cuda' , args.local_rank ) __a : Any = 1 torch.distributed.init_process_group(backend='nccl' ) # Initializes the distributed backend # Setup logging logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN ) logger.info('device: {} n_gpu: {}, distributed: {}'.format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) ) __a : Optional[Any] = GPTaLMHeadModel.from_pretrained(args.model_name_or_path ) # Distributed and parallel training model.to(args.device ) if args.local_rank != -1: __a : List[Any] = nn.parallel.DistributedDataParallel( lowerCamelCase_ , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=lowerCamelCase_ ) elif args.n_gpu > 1: __a : Union[str, Any] = nn.DataParallel(lowerCamelCase_ ) # Print/save training arguments os.makedirs(args.output_dir , exist_ok=lowerCamelCase_ ) torch.save(lowerCamelCase_ , os.path.join(args.output_dir , 'run_args.bin' ) ) logger.info('Training/evaluation parameters %s' , lowerCamelCase_ ) # Prepare dataset __a : Tuple = np.concatenate( [ np.loadtxt(args.data_dir , dtype=np.intaa ), ] ) __a : str = (torch.from_numpy(lowerCamelCase_ ),) __a : List[str] = TensorDataset(*lowerCamelCase_ ) __a : Optional[Any] = RandomSampler(lowerCamelCase_ ) __a : Union[str, Any] = DataLoader(lowerCamelCase_ , sampler=lowerCamelCase_ , batch_size=args.batch_size ) # Compute head entropy and importance score compute_heads_importance(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) # Try head masking (set heads to zero until the score goes under a threshole) # and head pruning (remove masked heads and see the effect on the network) if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0: __a : Union[str, Any] = mask_heads(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) prune_heads(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) if __name__ == "__main__": main()
47
0
'''simple docstring''' from math import atan, cos, radians, sin, tan from .haversine_distance import haversine_distance lowerCAmelCase_ : Optional[int] = 6378137.0 lowerCAmelCase_ : Tuple = 6356752.314245 lowerCAmelCase_ : Any = 6378137 def __A ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): _UpperCAmelCase : List[Any] = (AXIS_A - AXIS_B) / AXIS_A # Parametric latitudes # https://en.wikipedia.org/wiki/Latitude#Parametric_(or_reduced)_latitude _UpperCAmelCase : Tuple = atan((1 - flattening) * tan(radians(lowerCamelCase_ ) ) ) _UpperCAmelCase : Any = atan((1 - flattening) * tan(radians(lowerCamelCase_ ) ) ) # Compute central angle between two points # using haversine theta. sigma = haversine_distance / equatorial radius _UpperCAmelCase : Optional[int] = haversine_distance(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) / EQUATORIAL_RADIUS # Intermediate P and Q values _UpperCAmelCase : Tuple = (b_lata + b_lata) / 2 _UpperCAmelCase : str = (b_lata - b_lata) / 2 # Intermediate X value # X = (sigma - sin(sigma)) * sin^2Pcos^2Q / cos^2(sigma/2) _UpperCAmelCase : Optional[Any] = (sin(lowerCamelCase_ ) ** 2) * (cos(lowerCamelCase_ ) ** 2) _UpperCAmelCase : int = cos(sigma / 2 ) ** 2 _UpperCAmelCase : Any = (sigma - sin(lowerCamelCase_ )) * (x_numerator / x_demonimator) # Intermediate Y value # Y = (sigma + sin(sigma)) * cos^2Psin^2Q / sin^2(sigma/2) _UpperCAmelCase : List[Any] = (cos(lowerCamelCase_ ) ** 2) * (sin(lowerCamelCase_ ) ** 2) _UpperCAmelCase : int = sin(sigma / 2 ) ** 2 _UpperCAmelCase : List[Any] = (sigma + sin(lowerCamelCase_ )) * (y_numerator / y_denominator) return EQUATORIAL_RADIUS * (sigma - ((flattening / 2) * (x_value + y_value))) if __name__ == "__main__": import doctest doctest.testmod()
414
import argparse import os import gluonnlp as nlp import mxnet as mx import numpy as np import torch from gluonnlp.base import get_home_dir from gluonnlp.model.bert import BERTEncoder from gluonnlp.model.utils import _load_vocab from gluonnlp.vocab import Vocab from packaging import version from torch import nn from transformers import BertConfig, BertForMaskedLM, BertModel, RobertaTokenizer from transformers.models.bert.modeling_bert import ( BertIntermediate, BertLayer, BertOutput, BertSelfAttention, BertSelfOutput, ) from transformers.utils import logging if version.parse(nlp.__version__) != version.parse('''0.8.3'''): raise Exception('''requires gluonnlp == 0.8.3''') if version.parse(mx.__version__) != version.parse('''1.5.0'''): raise Exception('''requires mxnet == 1.5.0''') logging.set_verbosity_info() SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ = '''The Nymphenburg Palace is a beautiful palace in Munich!''' def UpperCAmelCase__ ( lowerCamelCase_ : str , lowerCamelCase_ : str ): __a : List[Any] = { 'attention_cell': 'multi_head', 'num_layers': 4, 'units': 1_0_2_4, 'hidden_size': 7_6_8, 'max_length': 5_1_2, 'num_heads': 8, 'scaled': True, 'dropout': 0.1, 'use_residual': True, 'embed_size': 1_0_2_4, 'embed_dropout': 0.1, 'word_embed': None, 'layer_norm_eps': 1e-5, 'token_type_vocab_size': 2, } __a : Optional[int] = bort_4_8_768_1024_hparams # Let's construct the original Bort model here # Taken from official BERT implementation, see: # https://github.com/alexa/bort/blob/master/bort/bort.py __a : List[str] = BERTEncoder( attention_cell=predefined_args['attention_cell'] , num_layers=predefined_args['num_layers'] , units=predefined_args['units'] , hidden_size=predefined_args['hidden_size'] , max_length=predefined_args['max_length'] , num_heads=predefined_args['num_heads'] , scaled=predefined_args['scaled'] , dropout=predefined_args['dropout'] , output_attention=lowerCamelCase_ , output_all_encodings=lowerCamelCase_ , use_residual=predefined_args['use_residual'] , activation=predefined_args.get('activation' , 'gelu' ) , layer_norm_eps=predefined_args.get('layer_norm_eps' , lowerCamelCase_ ) , ) # Vocab information needs to be fetched first # It's the same as RoBERTa, so RobertaTokenizer can be used later __a : int = 'openwebtext_ccnews_stories_books_cased' # Specify download folder to Gluonnlp's vocab __a : Optional[Any] = os.path.join(get_home_dir() , 'models' ) __a : Optional[Any] = _load_vocab(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , cls=lowerCamelCase_ ) __a : Any = nlp.model.BERTModel( lowerCamelCase_ , len(lowerCamelCase_ ) , units=predefined_args['units'] , embed_size=predefined_args['embed_size'] , embed_dropout=predefined_args['embed_dropout'] , word_embed=predefined_args['word_embed'] , use_pooler=lowerCamelCase_ , use_token_type_embed=lowerCamelCase_ , token_type_vocab_size=predefined_args['token_type_vocab_size'] , use_classifier=lowerCamelCase_ , use_decoder=lowerCamelCase_ , ) original_bort.load_parameters(lowerCamelCase_ , cast_dtype=lowerCamelCase_ , ignore_extra=lowerCamelCase_ ) __a : Dict = original_bort._collect_params_with_prefix() # Build our config 🤗 __a : Optional[Any] = { 'architectures': ['BertForMaskedLM'], 'attention_probs_dropout_prob': predefined_args['dropout'], 'hidden_act': 'gelu', 'hidden_dropout_prob': predefined_args['dropout'], 'hidden_size': predefined_args['embed_size'], 'initializer_range': 0.02, 'intermediate_size': predefined_args['hidden_size'], 'layer_norm_eps': predefined_args['layer_norm_eps'], 'max_position_embeddings': predefined_args['max_length'], 'model_type': 'bort', 'num_attention_heads': predefined_args['num_heads'], 'num_hidden_layers': predefined_args['num_layers'], 'pad_token_id': 1, # 2 = BERT, 1 = RoBERTa 'type_vocab_size': 1, # 2 = BERT, 1 = RoBERTa 'vocab_size': len(lowerCamelCase_ ), } __a : str = BertConfig.from_dict(lowerCamelCase_ ) __a : Optional[int] = BertForMaskedLM(lowerCamelCase_ ) hf_bort_model.eval() # Parameter mapping table (Gluonnlp to Transformers) # * denotes layer index # # | Gluon Parameter | Transformers Parameter # | -------------------------------------------------------------- | ---------------------- # | `encoder.layer_norm.beta` | `bert.embeddings.LayerNorm.bias` # | `encoder.layer_norm.gamma` | `bert.embeddings.LayerNorm.weight` # | `encoder.position_weight` | `bert.embeddings.position_embeddings.weight` # | `word_embed.0.weight` | `bert.embeddings.word_embeddings.weight` # | `encoder.transformer_cells.*.attention_cell.proj_key.bias` | `bert.encoder.layer.*.attention.self.key.bias` # | `encoder.transformer_cells.*.attention_cell.proj_key.weight` | `bert.encoder.layer.*.attention.self.key.weight` # | `encoder.transformer_cells.*.attention_cell.proj_query.bias` | `bert.encoder.layer.*.attention.self.query.bias` # | `encoder.transformer_cells.*.attention_cell.proj_query.weight` | `bert.encoder.layer.*.attention.self.query.weight` # | `encoder.transformer_cells.*.attention_cell.proj_value.bias` | `bert.encoder.layer.*.attention.self.value.bias` # | `encoder.transformer_cells.*.attention_cell.proj_value.weight` | `bert.encoder.layer.*.attention.self.value.weight` # | `encoder.transformer_cells.*.ffn.ffn_2.bias` | `bert.encoder.layer.*.attention.output.dense.bias` # | `encoder.transformer_cells.*.ffn.ffn_2.weight` | `bert.encoder.layer.*.attention.output.dense.weight` # | `encoder.transformer_cells.*.layer_norm.beta` | `bert.encoder.layer.*.attention.output.LayerNorm.bias` # | `encoder.transformer_cells.*.layer_norm.gamma` | `bert.encoder.layer.*.attention.output.LayerNorm.weight` # | `encoder.transformer_cells.*.ffn.ffn_1.bias` | `bert.encoder.layer.*.intermediate.dense.bias` # | `encoder.transformer_cells.*.ffn.ffn_1.weight` | `bert.encoder.layer.*.intermediate.dense.weight` # | `encoder.transformer_cells.*.ffn.layer_norm.beta` | `bert.encoder.layer.*.output.LayerNorm.bias` # | `encoder.transformer_cells.*.ffn.layer_norm.gamma` | `bert.encoder.layer.*.output.LayerNorm.weight` # | `encoder.transformer_cells.*.proj.bias` | `bert.encoder.layer.*.output.dense.bias` # | `encoder.transformer_cells.*.proj.weight` | `bert.encoder.layer.*.output.dense.weight` # Helper function to convert MXNET Arrays to PyTorch def to_torch(lowerCamelCase_ : Optional[Any] ) -> nn.Parameter: return nn.Parameter(torch.FloatTensor(mx_array.data().asnumpy() ) ) # Check param shapes and map new HF param back def check_and_map_params(lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : List[str] ): __a : Optional[int] = hf_param.shape __a : int = to_torch(params[gluon_param] ) __a : int = gluon_param.shape assert ( shape_hf == shape_gluon ), f'''The gluon parameter {gluon_param} has shape {shape_gluon}, but expects shape {shape_hf} for Transformers''' return gluon_param __a : str = check_and_map_params( hf_bort_model.bert.embeddings.word_embeddings.weight , 'word_embed.0.weight' ) __a : str = check_and_map_params( hf_bort_model.bert.embeddings.position_embeddings.weight , 'encoder.position_weight' ) __a : Tuple = check_and_map_params( hf_bort_model.bert.embeddings.LayerNorm.bias , 'encoder.layer_norm.beta' ) __a : Union[str, Any] = check_and_map_params( hf_bort_model.bert.embeddings.LayerNorm.weight , 'encoder.layer_norm.gamma' ) # Inspired by RoBERTa conversion script, we just zero them out (Bort does not use them) __a : Union[str, Any] = torch.zeros_like( hf_bort_model.bert.embeddings.token_type_embeddings.weight.data ) for i in range(hf_bort_config.num_hidden_layers ): __a : BertLayer = hf_bort_model.bert.encoder.layer[i] # self attention __a : BertSelfAttention = layer.attention.self __a : Optional[int] = check_and_map_params( self_attn.key.bias.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_key.bias''' ) __a : str = check_and_map_params( self_attn.key.weight.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_key.weight''' ) __a : List[str] = check_and_map_params( self_attn.query.bias.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_query.bias''' ) __a : str = check_and_map_params( self_attn.query.weight.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_query.weight''' ) __a : Dict = check_and_map_params( self_attn.value.bias.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_value.bias''' ) __a : str = check_and_map_params( self_attn.value.weight.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_value.weight''' ) # self attention output __a : BertSelfOutput = layer.attention.output __a : Tuple = check_and_map_params( self_output.dense.bias , f'''encoder.transformer_cells.{i}.proj.bias''' ) __a : Dict = check_and_map_params( self_output.dense.weight , f'''encoder.transformer_cells.{i}.proj.weight''' ) __a : Optional[Any] = check_and_map_params( self_output.LayerNorm.bias , f'''encoder.transformer_cells.{i}.layer_norm.beta''' ) __a : Optional[Any] = check_and_map_params( self_output.LayerNorm.weight , f'''encoder.transformer_cells.{i}.layer_norm.gamma''' ) # intermediate __a : BertIntermediate = layer.intermediate __a : List[str] = check_and_map_params( intermediate.dense.bias , f'''encoder.transformer_cells.{i}.ffn.ffn_1.bias''' ) __a : Optional[Any] = check_and_map_params( intermediate.dense.weight , f'''encoder.transformer_cells.{i}.ffn.ffn_1.weight''' ) # output __a : BertOutput = layer.output __a : str = check_and_map_params( bert_output.dense.bias , f'''encoder.transformer_cells.{i}.ffn.ffn_2.bias''' ) __a : List[Any] = check_and_map_params( bert_output.dense.weight , f'''encoder.transformer_cells.{i}.ffn.ffn_2.weight''' ) __a : str = check_and_map_params( bert_output.LayerNorm.bias , f'''encoder.transformer_cells.{i}.ffn.layer_norm.beta''' ) __a : List[str] = check_and_map_params( bert_output.LayerNorm.weight , f'''encoder.transformer_cells.{i}.ffn.layer_norm.gamma''' ) # Save space and energy 🎄 hf_bort_model.half() # Compare output of both models __a : Union[str, Any] = RobertaTokenizer.from_pretrained('roberta-base' ) __a : Union[str, Any] = tokenizer.encode_plus(lowerCamelCase_ )['input_ids'] # Get gluon output __a : Optional[int] = mx.nd.array([input_ids] ) __a : Tuple = original_bort(inputs=lowerCamelCase_ , token_types=[] ) # Get Transformer output (save and reload model again) hf_bort_model.save_pretrained(lowerCamelCase_ ) __a : Optional[Any] = BertModel.from_pretrained(lowerCamelCase_ ) hf_bort_model.eval() __a : Union[str, Any] = tokenizer.encode_plus(lowerCamelCase_ , return_tensors='pt' ) __a : int = hf_bort_model(**lowerCamelCase_ )[0] __a : Dict = output_gluon[0].asnumpy() __a : str = output_hf[0].detach().numpy() __a : List[Any] = np.max(np.abs(hf_layer - gluon_layer ) ).item() __a : str = np.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1e-3 ) if success: print('✔️ Both model do output the same tensors' ) else: print('❌ Both model do **NOT** output the same tensors' ) print('Absolute difference is:' , lowerCamelCase_ ) if __name__ == "__main__": SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--bort_checkpoint_path''', default=None, type=str, required=True, help='''Path the official Bort params file.''' ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) SCREAMING_SNAKE_CASE__ = parser.parse_args() convert_bort_checkpoint_to_pytorch(args.bort_checkpoint_path, args.pytorch_dump_folder_path)
47
0
'''simple docstring''' from __future__ import annotations from sys import maxsize from typing import Generic, TypeVar _UpperCAmelCase : str = TypeVar('''T''') def _SCREAMING_SNAKE_CASE ( __snake_case : int ): return (position - 1) // 2 def _SCREAMING_SNAKE_CASE ( __snake_case : int ): return (2 * position) + 1 def _SCREAMING_SNAKE_CASE ( __snake_case : int ): return (2 * position) + 2 class lowercase_ ( Generic[T] ): """simple docstring""" def __init__( self : List[str] ) -> Dict: _A = [] _A = {} _A = 0 def __len__( self : Any ) -> Dict: return self.elements def __repr__( self : Any ) -> Any: return str(self.heap ) def __UpperCAmelCase ( self : Optional[Any] ) -> Optional[int]: return self.elements == 0 def __UpperCAmelCase ( self : Optional[Any], UpperCamelCase__ : T, UpperCamelCase__ : int ) -> List[str]: self.heap.append((elem, weight) ) _A = self.elements self.elements += 1 self._bubble_up(SCREAMING_SNAKE_CASE__ ) def __UpperCAmelCase ( self : Tuple ) -> Tuple: if self.elements > 1: self._swap_nodes(0, self.elements - 1 ) _A = self.heap.pop() del self.position_map[elem] self.elements -= 1 if self.elements > 0: _A = self.heap[0] self._bubble_down(SCREAMING_SNAKE_CASE__ ) return elem def __UpperCAmelCase ( self : List[str], UpperCamelCase__ : T, UpperCamelCase__ : int ) -> int: _A = self.position_map[elem] _A = (elem, weight) if position > 0: _A = get_parent_position(SCREAMING_SNAKE_CASE__ ) _A = self.heap[parent_position] if parent_weight > weight: self._bubble_up(SCREAMING_SNAKE_CASE__ ) else: self._bubble_down(SCREAMING_SNAKE_CASE__ ) else: self._bubble_down(SCREAMING_SNAKE_CASE__ ) def __UpperCAmelCase ( self : Any, UpperCamelCase__ : T ) -> Any: _A = self.position_map[elem] if curr_pos == 0: return None _A = get_parent_position(SCREAMING_SNAKE_CASE__ ) _A = self.heap[curr_pos] _A = self.heap[parent_position] if parent_weight > weight: self._swap_nodes(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) return self._bubble_up(SCREAMING_SNAKE_CASE__ ) return None def __UpperCAmelCase ( self : Optional[Any], UpperCamelCase__ : T ) -> int: _A = self.position_map[elem] _A = self.heap[curr_pos] _A = get_child_left_position(SCREAMING_SNAKE_CASE__ ) _A = get_child_right_position(SCREAMING_SNAKE_CASE__ ) if child_left_position < self.elements and child_right_position < self.elements: _A = self.heap[child_left_position] _A = self.heap[child_right_position] if child_right_weight < child_left_weight and child_right_weight < weight: self._swap_nodes(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) return self._bubble_down(SCREAMING_SNAKE_CASE__ ) if child_left_position < self.elements: _A = self.heap[child_left_position] if child_left_weight < weight: self._swap_nodes(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) return self._bubble_down(SCREAMING_SNAKE_CASE__ ) else: return None if child_right_position < self.elements: _A = self.heap[child_right_position] if child_right_weight < weight: self._swap_nodes(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) return self._bubble_down(SCREAMING_SNAKE_CASE__ ) return None def __UpperCAmelCase ( self : List[Any], UpperCamelCase__ : int, UpperCamelCase__ : int ) -> Tuple: _A = self.heap[nodea_pos][0] _A = self.heap[nodea_pos][0] _A = ( self.heap[nodea_pos], self.heap[nodea_pos], ) _A = nodea_pos _A = nodea_pos class lowercase_ ( Generic[T] ): """simple docstring""" def __init__( self : List[Any] ) -> Optional[Any]: _A = {} _A = 0 def __repr__( self : Tuple ) -> List[str]: return str(self.connections ) def __len__( self : Dict ) -> List[str]: return self.nodes def __UpperCAmelCase ( self : Optional[int], UpperCamelCase__ : T ) -> Tuple: if node not in self.connections: _A = {} self.nodes += 1 def __UpperCAmelCase ( self : Any, UpperCamelCase__ : T, UpperCamelCase__ : T, UpperCamelCase__ : int ) -> Optional[int]: self.add_node(SCREAMING_SNAKE_CASE__ ) self.add_node(SCREAMING_SNAKE_CASE__ ) _A = weight _A = weight def _SCREAMING_SNAKE_CASE ( __snake_case : GraphUndirectedWeighted[T] , ): _A = {node: maxsize for node in graph.connections} _A = {node: None for node in graph.connections} _A = MinPriorityQueue() for node, weight in dist.items(): priority_queue.push(lowerCamelCase_ , lowerCamelCase_ ) if priority_queue.is_empty(): return dist, parent # initialization _A = priority_queue.extract_min() _A = 0 for neighbour in graph.connections[node]: if dist[neighbour] > dist[node] + graph.connections[node][neighbour]: _A = dist[node] + graph.connections[node][neighbour] priority_queue.update_key(lowerCamelCase_ , dist[neighbour] ) _A = node # running prim's algorithm while not priority_queue.is_empty(): _A = priority_queue.extract_min() for neighbour in graph.connections[node]: if dist[neighbour] > dist[node] + graph.connections[node][neighbour]: _A = dist[node] + graph.connections[node][neighbour] priority_queue.update_key(lowerCamelCase_ , dist[neighbour] ) _A = node return dist, parent
107
def UpperCAmelCase__ ( lowerCamelCase_ : Optional[int] , lowerCamelCase_ : List[str] ): __a : Any = '' for i in table: res += inp[i - 1] return res def UpperCAmelCase__ ( lowerCamelCase_ : Optional[Any] ): return data[1:] + data[0] def UpperCAmelCase__ ( lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Optional[int] ): __a : Optional[int] = '' for i in range(len(lowerCamelCase_ ) ): if a[i] == b[i]: res += "0" else: res += "1" return res def UpperCAmelCase__ ( lowerCamelCase_ : List[Any] , lowerCamelCase_ : str ): __a : List[str] = int('0b' + data[0] + data[-1] , 2 ) __a : List[str] = int('0b' + data[1:3] , 2 ) return bin(s[row][col] )[2:] def UpperCAmelCase__ ( lowerCamelCase_ : Any , lowerCamelCase_ : List[str] , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : Optional[Any] ): __a : List[Any] = message[:4] __a : str = message[4:] __a : Any = apply_table(lowerCamelCase_ , lowerCamelCase_ ) __a : int = xor(lowerCamelCase_ , lowerCamelCase_ ) __a : Dict = apply_sbox(lowerCamelCase_ , temp[:4] ) # noqa: E741 __a : Tuple = apply_sbox(lowerCamelCase_ , temp[4:] ) __a : List[Any] = '0' * (2 - len(lowerCamelCase_ )) + l # noqa: E741 __a : List[str] = '0' * (2 - len(lowerCamelCase_ )) + r __a : List[Any] = apply_table(l + r , lowerCamelCase_ ) __a : Dict = xor(lowerCamelCase_ , lowerCamelCase_ ) return temp + right if __name__ == "__main__": SCREAMING_SNAKE_CASE__ = input('''Enter 10 bit key: ''') SCREAMING_SNAKE_CASE__ = input('''Enter 8 bit message: ''') SCREAMING_SNAKE_CASE__ = [6, 3, 7, 4, 8, 5, 10, 9] SCREAMING_SNAKE_CASE__ = [3, 5, 2, 7, 4, 10, 1, 9, 8, 6] SCREAMING_SNAKE_CASE__ = [2, 4, 3, 1] SCREAMING_SNAKE_CASE__ = [2, 6, 3, 1, 4, 8, 5, 7] SCREAMING_SNAKE_CASE__ = [4, 1, 3, 5, 7, 2, 8, 6] SCREAMING_SNAKE_CASE__ = [4, 1, 2, 3, 2, 3, 4, 1] SCREAMING_SNAKE_CASE__ = [[1, 0, 3, 2], [3, 2, 1, 0], [0, 2, 1, 3], [3, 1, 3, 2]] SCREAMING_SNAKE_CASE__ = [[0, 1, 2, 3], [2, 0, 1, 3], [3, 0, 1, 0], [2, 1, 0, 3]] # key generation SCREAMING_SNAKE_CASE__ = apply_table(key, paa_table) SCREAMING_SNAKE_CASE__ = temp[:5] SCREAMING_SNAKE_CASE__ = temp[5:] SCREAMING_SNAKE_CASE__ = left_shift(left) SCREAMING_SNAKE_CASE__ = left_shift(right) SCREAMING_SNAKE_CASE__ = apply_table(left + right, pa_table) SCREAMING_SNAKE_CASE__ = left_shift(left) SCREAMING_SNAKE_CASE__ = left_shift(right) SCREAMING_SNAKE_CASE__ = left_shift(left) SCREAMING_SNAKE_CASE__ = left_shift(right) SCREAMING_SNAKE_CASE__ = apply_table(left + right, pa_table) # encryption SCREAMING_SNAKE_CASE__ = apply_table(message, IP) SCREAMING_SNAKE_CASE__ = function(expansion, sa, sa, keya, temp) SCREAMING_SNAKE_CASE__ = temp[4:] + temp[:4] SCREAMING_SNAKE_CASE__ = function(expansion, sa, sa, keya, temp) SCREAMING_SNAKE_CASE__ = apply_table(temp, IP_inv) print('''Cipher text is:''', CT) # decryption SCREAMING_SNAKE_CASE__ = apply_table(CT, IP) SCREAMING_SNAKE_CASE__ = function(expansion, sa, sa, keya, temp) SCREAMING_SNAKE_CASE__ = temp[4:] + temp[:4] SCREAMING_SNAKE_CASE__ = function(expansion, sa, sa, keya, temp) SCREAMING_SNAKE_CASE__ = apply_table(temp, IP_inv) print('''Plain text after decypting is:''', PT)
47
0
'''simple docstring''' import argparse import numpy as np import torch from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging logging.set_verbosity_info() lowerCAmelCase : str = logging.get_logger("""transformers.models.speecht5""") def _A ( A ,A ,A ) -> int: hf_model.apply_weight_norm() lowercase : int = checkpoint['input_conv.weight_g'] lowercase : Optional[Any] = checkpoint['input_conv.weight_v'] lowercase : str = checkpoint['input_conv.bias'] for i in range(len(config.upsample_rates ) ): lowercase : Tuple = checkpoint[F'''upsamples.{i}.1.weight_g'''] lowercase : Tuple = checkpoint[F'''upsamples.{i}.1.weight_v'''] lowercase : Optional[Any] = checkpoint[F'''upsamples.{i}.1.bias'''] for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ): for j in range(len(config.resblock_dilation_sizes ) ): lowercase : Optional[Any] = checkpoint[F'''blocks.{i}.convs1.{j}.1.weight_g'''] lowercase : Tuple = checkpoint[F'''blocks.{i}.convs1.{j}.1.weight_v'''] lowercase : int = checkpoint[F'''blocks.{i}.convs1.{j}.1.bias'''] lowercase : List[str] = checkpoint[F'''blocks.{i}.convs2.{j}.1.weight_g'''] lowercase : str = checkpoint[F'''blocks.{i}.convs2.{j}.1.weight_v'''] lowercase : Optional[Any] = checkpoint[F'''blocks.{i}.convs2.{j}.1.bias'''] lowercase : List[str] = checkpoint['output_conv.1.weight_g'] lowercase : int = checkpoint['output_conv.1.weight_v'] lowercase : List[str] = checkpoint['output_conv.1.bias'] hf_model.remove_weight_norm() @torch.no_grad() def _A ( A ,A ,A ,A=None ,A=None ,) -> str: if config_path is not None: lowercase : List[Any] = SpeechTaHifiGanConfig.from_pretrained(lowerCamelCase_ ) else: lowercase : Optional[Any] = SpeechTaHifiGanConfig() lowercase : Tuple = SpeechTaHifiGan(lowerCamelCase_ ) lowercase : Optional[int] = torch.load(lowerCamelCase_ ) load_weights(orig_checkpoint["model"]["generator"] ,lowerCamelCase_ ,lowerCamelCase_ ) lowercase : List[Any] = np.load(lowerCamelCase_ ) lowercase : Optional[Any] = stats[0].reshape(-1 ) lowercase : str = stats[1].reshape(-1 ) lowercase : Union[str, Any] = torch.from_numpy(lowerCamelCase_ ).float() lowercase : Optional[Any] = torch.from_numpy(lowerCamelCase_ ).float() model.save_pretrained(lowerCamelCase_ ) if repo_id: print("Pushing to the hub..." ) model.push_to_hub(lowerCamelCase_ ) if __name__ == "__main__": lowerCAmelCase : List[str] = argparse.ArgumentParser() parser.add_argument("""--checkpoint_path""", required=True, default=None, type=str, help="""Path to original checkpoint""") parser.add_argument("""--stats_path""", required=True, default=None, type=str, help="""Path to stats.npy file""") parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""") parser.add_argument( """--pytorch_dump_folder_path""", required=True, default=None, type=str, help="""Path to the output PyTorch model.""" ) parser.add_argument( """--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub.""" ) lowerCAmelCase : List[str] = parser.parse_args() convert_hifigan_checkpoint( args.checkpoint_path, args.stats_path, args.pytorch_dump_folder_path, args.config_path, args.push_to_hub, )
372
import unittest from transformers import is_tf_available from transformers.testing_utils import require_tf if is_tf_available(): import tensorflow as tf from tensorflow.python.eager import context from tensorflow.python.framework import ops from transformers import GradientAccumulator, create_optimizer @require_tf class _UpperCamelCase( unittest.TestCase ): def __lowerCAmelCase ( self : List[str] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Union[str, Any] ): '''simple docstring''' self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , len(SCREAMING_SNAKE_CASE__ ) ) for a, b in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): self.assertAlmostEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , delta=SCREAMING_SNAKE_CASE__ ) def __lowerCAmelCase ( self : Any ): '''simple docstring''' __a : List[Any] = GradientAccumulator() accumulator([tf.constant([1.0, 2.0] )] ) accumulator([tf.constant([-2.0, 1.0] )] ) accumulator([tf.constant([-1.0, 2.0] )] ) with self.assertRaises(SCREAMING_SNAKE_CASE__ ): accumulator([tf.constant([1.0, 1.0] ), tf.constant([2.0, 2.0] )] ) self.assertEqual(accumulator.step , 3 ) self.assertEqual(len(accumulator.gradients ) , 1 ) self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [-2.0, 5.0] , tol=1e-2 ) accumulator.reset() self.assertEqual(accumulator.step , 0 ) self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [0.0, 0.0] , tol=1e-2 ) def __lowerCAmelCase ( self : Dict ): '''simple docstring''' __a : int = None ops.enable_eager_execution_internal() __a : Optional[Any] = tf.config.list_physical_devices('CPU' ) if len(SCREAMING_SNAKE_CASE__ ) == 1: tf.config.set_logical_device_configuration( physical_devices[0] , [tf.config.LogicalDeviceConfiguration(), tf.config.LogicalDeviceConfiguration()] ) __a : int = tf.config.list_logical_devices(device_type='CPU' ) __a : str = tf.distribute.MirroredStrategy(devices=devices[:2] ) with strategy.scope(): __a : List[str] = GradientAccumulator() __a : Tuple = tf.Variable([4.0, 3.0] ) __a , __a : int = create_optimizer(5e-5 , 1_0 , 5 ) __a : List[Any] = tf.Variable([0.0, 0.0] , trainable=SCREAMING_SNAKE_CASE__ ) def accumulate_on_replica(SCREAMING_SNAKE_CASE__ : Optional[Any] ): accumulator([gradient] ) def apply_on_replica(): optimizer.apply_gradients(list(zip(accumulator.gradients , [variable] ) ) ) @tf.function def accumulate(SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Tuple ): with strategy.scope(): __a : Optional[Any] = strategy.experimental_local_results(SCREAMING_SNAKE_CASE__ ) local_variables[0].assign(SCREAMING_SNAKE_CASE__ ) local_variables[1].assign(SCREAMING_SNAKE_CASE__ ) strategy.run(SCREAMING_SNAKE_CASE__ , args=(gradient_placeholder,) ) @tf.function def apply_grad(): with strategy.scope(): strategy.run(SCREAMING_SNAKE_CASE__ ) def _check_local_values(SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : int ): __a : Union[str, Any] = strategy.experimental_local_results(accumulator._gradients[0] ) self.assertListAlmostEqual(values[0].value() , SCREAMING_SNAKE_CASE__ , tol=1e-2 ) self.assertListAlmostEqual(values[1].value() , SCREAMING_SNAKE_CASE__ , tol=1e-2 ) accumulate([1.0, 2.0] , [-1.0, 1.0] ) accumulate([3.0, -1.0] , [-1.0, -1.0] ) accumulate([-2.0, 2.0] , [3.0, -2.0] ) self.assertEqual(accumulator.step , 3 ) _check_local_values([2.0, 3.0] , [1.0, -2.0] ) apply_grad() self.assertListAlmostEqual(variable.value() , [4.0, 3.0] , tol=1e-2 ) accumulator.reset() self.assertEqual(accumulator.step , 0 ) _check_local_values([0.0, 0.0] , [0.0, 0.0] )
47
0
"""simple docstring""" from ..utils import DummyObject, requires_backends class _SCREAMING_SNAKE_CASE ( metaclass=__lowerCamelCase ): """simple docstring""" _a : Optional[Any] = ['''torch''', '''transformers''', '''onnx'''] def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Union[str, Any]: requires_backends(self , ["""torch""", """transformers""", """onnx"""] ) @classmethod def UpperCAmelCase__( cls , *lowerCamelCase__ , **lowerCamelCase__ ) -> Optional[Any]: requires_backends(cls , ["""torch""", """transformers""", """onnx"""] ) @classmethod def UpperCAmelCase__( cls , *lowerCamelCase__ , **lowerCamelCase__ ) -> Union[str, Any]: requires_backends(cls , ["""torch""", """transformers""", """onnx"""] ) class _SCREAMING_SNAKE_CASE ( metaclass=__lowerCamelCase ): """simple docstring""" _a : List[Any] = ['''torch''', '''transformers''', '''onnx'''] def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Union[str, Any]: requires_backends(self , ["""torch""", """transformers""", """onnx"""] ) @classmethod def UpperCAmelCase__( cls , *lowerCamelCase__ , **lowerCamelCase__ ) -> Union[str, Any]: requires_backends(cls , ["""torch""", """transformers""", """onnx"""] ) @classmethod def UpperCAmelCase__( cls , *lowerCamelCase__ , **lowerCamelCase__ ) -> Any: requires_backends(cls , ["""torch""", """transformers""", """onnx"""] ) class _SCREAMING_SNAKE_CASE ( metaclass=__lowerCamelCase ): """simple docstring""" _a : Any = ['''torch''', '''transformers''', '''onnx'''] def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Union[str, Any]: requires_backends(self , ["""torch""", """transformers""", """onnx"""] ) @classmethod def UpperCAmelCase__( cls , *lowerCamelCase__ , **lowerCamelCase__ ) -> Tuple: requires_backends(cls , ["""torch""", """transformers""", """onnx"""] ) @classmethod def UpperCAmelCase__( cls , *lowerCamelCase__ , **lowerCamelCase__ ) -> Dict: requires_backends(cls , ["""torch""", """transformers""", """onnx"""] ) class _SCREAMING_SNAKE_CASE ( metaclass=__lowerCamelCase ): """simple docstring""" _a : Dict = ['''torch''', '''transformers''', '''onnx'''] def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Dict: requires_backends(self , ["""torch""", """transformers""", """onnx"""] ) @classmethod def UpperCAmelCase__( cls , *lowerCamelCase__ , **lowerCamelCase__ ) -> str: requires_backends(cls , ["""torch""", """transformers""", """onnx"""] ) @classmethod def UpperCAmelCase__( cls , *lowerCamelCase__ , **lowerCamelCase__ ) -> Tuple: requires_backends(cls , ["""torch""", """transformers""", """onnx"""] ) class _SCREAMING_SNAKE_CASE ( metaclass=__lowerCamelCase ): """simple docstring""" _a : int = ['''torch''', '''transformers''', '''onnx'''] def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Dict: requires_backends(self , ["""torch""", """transformers""", """onnx"""] ) @classmethod def UpperCAmelCase__( cls , *lowerCamelCase__ , **lowerCamelCase__ ) -> Optional[Any]: requires_backends(cls , ["""torch""", """transformers""", """onnx"""] ) @classmethod def UpperCAmelCase__( cls , *lowerCamelCase__ , **lowerCamelCase__ ) -> Optional[int]: requires_backends(cls , ["""torch""", """transformers""", """onnx"""] ) class _SCREAMING_SNAKE_CASE ( metaclass=__lowerCamelCase ): """simple docstring""" _a : Optional[int] = ['''torch''', '''transformers''', '''onnx'''] def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> int: requires_backends(self , ["""torch""", """transformers""", """onnx"""] ) @classmethod def UpperCAmelCase__( cls , *lowerCamelCase__ , **lowerCamelCase__ ) -> Dict: requires_backends(cls , ["""torch""", """transformers""", """onnx"""] ) @classmethod def UpperCAmelCase__( cls , *lowerCamelCase__ , **lowerCamelCase__ ) -> List[str]: requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
200
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ = { '''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/config.json''', '''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/config.json''', '''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/config.json''', '''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/config.json''', '''roberta-base-openai-detector''': '''https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json''', '''roberta-large-openai-detector''': '''https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json''', } class _UpperCamelCase( __lowerCamelCase ): __SCREAMING_SNAKE_CASE : Union[str, Any] = '''roberta''' def __init__( self : List[Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any]=5_0_2_6_5 , SCREAMING_SNAKE_CASE__ : Optional[int]=7_6_8 , SCREAMING_SNAKE_CASE__ : str=1_2 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=1_2 , SCREAMING_SNAKE_CASE__ : Optional[Any]=3_0_7_2 , SCREAMING_SNAKE_CASE__ : Any="gelu" , SCREAMING_SNAKE_CASE__ : Optional[Any]=0.1 , SCREAMING_SNAKE_CASE__ : str=0.1 , SCREAMING_SNAKE_CASE__ : Optional[Any]=5_1_2 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=2 , SCREAMING_SNAKE_CASE__ : Any=0.02 , SCREAMING_SNAKE_CASE__ : List[str]=1e-12 , SCREAMING_SNAKE_CASE__ : Optional[Any]=1 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=0 , SCREAMING_SNAKE_CASE__ : List[str]=2 , SCREAMING_SNAKE_CASE__ : Tuple="absolute" , SCREAMING_SNAKE_CASE__ : Any=True , SCREAMING_SNAKE_CASE__ : List[str]=None , **SCREAMING_SNAKE_CASE__ : Any , ): '''simple docstring''' super().__init__(pad_token_id=SCREAMING_SNAKE_CASE__ , bos_token_id=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) __a : Optional[Any] = vocab_size __a : Tuple = hidden_size __a : List[str] = num_hidden_layers __a : List[Any] = num_attention_heads __a : str = hidden_act __a : Optional[Any] = intermediate_size __a : Dict = hidden_dropout_prob __a : List[str] = attention_probs_dropout_prob __a : Optional[Any] = max_position_embeddings __a : Dict = type_vocab_size __a : str = initializer_range __a : List[str] = layer_norm_eps __a : Optional[int] = position_embedding_type __a : Union[str, Any] = use_cache __a : str = classifier_dropout class _UpperCamelCase( __lowerCamelCase ): @property def __lowerCAmelCase ( self : Any ): '''simple docstring''' if self.task == "multiple-choice": __a : List[str] = {0: 'batch', 1: 'choice', 2: 'sequence'} else: __a : Dict = {0: 'batch', 1: 'sequence'} return OrderedDict( [ ('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ] )
47
0
# XXX: we want transformers master here - in the absense of conftest manipulating sys.path: # hack it in for now: import sys from pathlib import Path __magic_name__ = Path(__file__).resolve().parents[3] / '''src''' sys.path.insert(1, str(git_repo_path)) import dataclasses # noqa import io # noqa import itertools # noqa import json # noqa import os # noqa import unittest # noqa from copy import deepcopy # noqa from parameterized import parameterized # noqa from transformers import TrainingArguments, is_torch_available # noqa from transformers.deepspeed import is_deepspeed_available # noqa from transformers.file_utils import WEIGHTS_NAME # noqa from transformers.testing_utils import ( # noqa CaptureLogger, ExtendSysPath, TestCasePlus, execute_subprocess_async, get_gpu_count, mockenv_context, require_deepspeed, require_torch_gpu, require_torch_multi_gpu, slow, ) from transformers.trainer_utils import set_seed # noqa set_seed(4_2) __magic_name__ = {'''base''': '''patrickvonplaten/wav2vec2_tiny_random''', '''robust''': '''patrickvonplaten/wav2vec2_tiny_random_robust'''} __magic_name__ = '''zero2''' __magic_name__ = '''zero3''' __magic_name__ = [ZEROa, ZEROa] def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_): '''simple docstring''' lowerCamelCase_ : Optional[int] = parameterized.to_safe_name("_".join(str(lowerCamelCase_) for x in param.args)) return F"""{func.__name__}_{param_based_name}""" # Cartesian-product of zero stages with models to test __magic_name__ = list(itertools.product(stages, models.keys())) @slow @require_deepspeed @require_torch_gpu class lowerCAmelCase__ ( __lowerCamelCase ): """simple docstring""" @parameterized.expand(SCREAMING_SNAKE_CASE__ , name_func=SCREAMING_SNAKE_CASE__ ) def _UpperCamelCase ( self , a_ , a_ ): self.run_and_check( stage=SCREAMING_SNAKE_CASE__ , model=SCREAMING_SNAKE_CASE__ , distributed=SCREAMING_SNAKE_CASE__ , fpaa=SCREAMING_SNAKE_CASE__ , ) @require_torch_multi_gpu @parameterized.expand(SCREAMING_SNAKE_CASE__ , name_func=SCREAMING_SNAKE_CASE__ ) def _UpperCamelCase ( self , a_ , a_ ): self.run_and_check( stage=SCREAMING_SNAKE_CASE__ , model=SCREAMING_SNAKE_CASE__ , distributed=SCREAMING_SNAKE_CASE__ , fpaa=SCREAMING_SNAKE_CASE__ , ) @parameterized.expand(SCREAMING_SNAKE_CASE__ , name_func=SCREAMING_SNAKE_CASE__ ) def _UpperCamelCase ( self , a_ , a_ ): self.run_and_check( stage=SCREAMING_SNAKE_CASE__ , model=SCREAMING_SNAKE_CASE__ , distributed=SCREAMING_SNAKE_CASE__ , fpaa=SCREAMING_SNAKE_CASE__ , ) @require_torch_multi_gpu @parameterized.expand(SCREAMING_SNAKE_CASE__ , name_func=SCREAMING_SNAKE_CASE__ ) def _UpperCamelCase ( self , a_ , a_ ): self.run_and_check( stage=SCREAMING_SNAKE_CASE__ , model=SCREAMING_SNAKE_CASE__ , distributed=SCREAMING_SNAKE_CASE__ , fpaa=SCREAMING_SNAKE_CASE__ , ) def _UpperCamelCase ( self , a_ ): pass def _UpperCamelCase ( self , a_ , a_ , a_ = 10 , a_ = True , a_ = True , a_ = True , ): lowerCamelCase_ : Dict = models[model] lowerCamelCase_ : str = self.run_trainer( stage=SCREAMING_SNAKE_CASE__ , model_name=SCREAMING_SNAKE_CASE__ , eval_steps=SCREAMING_SNAKE_CASE__ , num_train_epochs=1 , distributed=SCREAMING_SNAKE_CASE__ , fpaa=SCREAMING_SNAKE_CASE__ , ) self.do_checks(SCREAMING_SNAKE_CASE__ ) return output_dir def _UpperCamelCase ( self , a_ , a_ , a_ = 10 , a_ = 1 , a_ = True , a_ = True , ): lowerCamelCase_ : Union[str, Any] = self.get_auto_remove_tmp_dir("./xxx" , after=SCREAMING_SNAKE_CASE__ ) lowerCamelCase_ : int = F""" --model_name_or_path {model_name} --dataset_name hf-internal-testing/librispeech_asr_dummy --dataset_config_name clean --train_split_name validation --validation_split_name validation --output_dir {output_dir} --num_train_epochs {str(SCREAMING_SNAKE_CASE__ )} --per_device_train_batch_size 2 --per_device_eval_batch_size 2 --evaluation_strategy steps --learning_rate 5e-4 --warmup_steps 8 --orthography timit --preprocessing_num_workers 1 --group_by_length --freeze_feature_extractor --report_to none --save_steps 0 --eval_steps {eval_steps} --report_to none """.split() if fpaa: args.extend(["--fp16"] ) # currently ds_config_wav2vec2_zero.json requires "zero_optimization.find_unused_parameters": true, # hence the separate config files lowerCamelCase_ : Tuple = F"""--deepspeed {self.test_file_dir_str}/ds_config_wav2vec2_{stage}.json""".split() lowerCamelCase_ : Any = [F"""{self.examples_dir_str}/research_projects/wav2vec2/run_asr.py"""] lowerCamelCase_ : str = self.get_launcher(SCREAMING_SNAKE_CASE__ ) lowerCamelCase_ : int = launcher + script + args + ds_args # keep for quick debug # print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die execute_subprocess_async(SCREAMING_SNAKE_CASE__ , env=self.get_env() ) return output_dir def _UpperCamelCase ( self , a_=False ): lowerCamelCase_ : Optional[int] = min(2 , get_gpu_count() ) if distributed else 1 return F"""deepspeed --num_nodes 1 --num_gpus {num_gpus}""".split()
250
import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ = '''▁''' SCREAMING_SNAKE_CASE__ = {'''vocab_file''': '''sentencepiece.bpe.model'''} SCREAMING_SNAKE_CASE__ = { '''vocab_file''': { '''facebook/xglm-564M''': '''https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model''', } } SCREAMING_SNAKE_CASE__ = { '''facebook/xglm-564M''': 2048, } class _UpperCamelCase( __lowerCamelCase ): __SCREAMING_SNAKE_CASE : Optional[Any] = VOCAB_FILES_NAMES __SCREAMING_SNAKE_CASE : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP __SCREAMING_SNAKE_CASE : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __SCREAMING_SNAKE_CASE : Any = ['''input_ids''', '''attention_mask'''] def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : List[Any]="<s>" , SCREAMING_SNAKE_CASE__ : List[str]="</s>" , SCREAMING_SNAKE_CASE__ : Optional[Any]="</s>" , SCREAMING_SNAKE_CASE__ : Union[str, Any]="<s>" , SCREAMING_SNAKE_CASE__ : str="<unk>" , SCREAMING_SNAKE_CASE__ : Dict="<pad>" , SCREAMING_SNAKE_CASE__ : Optional[Dict[str, Any]] = None , **SCREAMING_SNAKE_CASE__ : List[str] , ): '''simple docstring''' __a : Union[str, Any] = {} if sp_model_kwargs is None else sp_model_kwargs # Compatibility with the original tokenizer __a : Any = 7 __a : Union[str, Any] = [f'''<madeupword{i}>''' for i in range(self.num_madeup_words )] __a : Union[str, Any] = kwargs.get('additional_special_tokens' , [] ) kwargs["additional_special_tokens"] += [ word for word in madeup_words if word not in kwargs["additional_special_tokens"] ] super().__init__( bos_token=SCREAMING_SNAKE_CASE__ , eos_token=SCREAMING_SNAKE_CASE__ , unk_token=SCREAMING_SNAKE_CASE__ , sep_token=SCREAMING_SNAKE_CASE__ , cls_token=SCREAMING_SNAKE_CASE__ , pad_token=SCREAMING_SNAKE_CASE__ , sp_model_kwargs=self.sp_model_kwargs , **SCREAMING_SNAKE_CASE__ , ) __a : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(SCREAMING_SNAKE_CASE__ ) ) __a : str = vocab_file # Original fairseq vocab and spm vocab must be "aligned": # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 # -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ---- # fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-' # spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a' # The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab __a : Any = 1 # Mimic fairseq token-to-id alignment for the first 4 token __a : str = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3} __a : List[str] = len(self.sp_model ) __a : Optional[int] = {f'''<madeupword{i}>''': sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words )} self.fairseq_tokens_to_ids.update(SCREAMING_SNAKE_CASE__ ) __a : Dict = {v: k for k, v in self.fairseq_tokens_to_ids.items()} def __getstate__( self : List[str] ): '''simple docstring''' __a : Tuple = self.__dict__.copy() __a : List[str] = None __a : Optional[int] = self.sp_model.serialized_model_proto() return state def __setstate__( self : List[str] , SCREAMING_SNAKE_CASE__ : Dict ): '''simple docstring''' __a : int = d # for backward compatibility if not hasattr(self , 'sp_model_kwargs' ): __a : Dict = {} __a : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.LoadFromSerializedProto(self.sp_model_proto ) def __lowerCAmelCase ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ): '''simple docstring''' if token_ids_a is None: return [self.sep_token_id] + token_ids_a __a : Optional[Any] = [self.sep_token_id] return sep + token_ids_a + sep + sep + token_ids_a def __lowerCAmelCase ( self : Dict , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None , SCREAMING_SNAKE_CASE__ : bool = False ): '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=SCREAMING_SNAKE_CASE__ , token_ids_a=SCREAMING_SNAKE_CASE__ , already_has_special_tokens=SCREAMING_SNAKE_CASE__ ) if token_ids_a is None: return [1] + ([0] * len(SCREAMING_SNAKE_CASE__ )) return [1] + ([0] * len(SCREAMING_SNAKE_CASE__ )) + [1, 1] + ([0] * len(SCREAMING_SNAKE_CASE__ )) def __lowerCAmelCase ( self : List[Any] , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ): '''simple docstring''' __a : Optional[int] = [self.sep_token_id] if token_ids_a is None: return len(sep + token_ids_a ) * [0] return len(sep + token_ids_a + sep + sep + token_ids_a ) * [0] @property def __lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' return len(self.sp_model ) + self.fairseq_offset + self.num_madeup_words def __lowerCAmelCase ( self : Tuple ): '''simple docstring''' __a : str = {self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __lowerCAmelCase ( self : Any , SCREAMING_SNAKE_CASE__ : str ): '''simple docstring''' return self.sp_model.encode(SCREAMING_SNAKE_CASE__ , out_type=SCREAMING_SNAKE_CASE__ ) def __lowerCAmelCase ( self : List[Any] , SCREAMING_SNAKE_CASE__ : List[Any] ): '''simple docstring''' if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] __a : List[str] = self.sp_model.PieceToId(SCREAMING_SNAKE_CASE__ ) # Need to return unknown token if the SP model returned 0 return spm_id + self.fairseq_offset if spm_id else self.unk_token_id def __lowerCAmelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Dict ): '''simple docstring''' if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset ) def __lowerCAmelCase ( self : Tuple , SCREAMING_SNAKE_CASE__ : List[Any] ): '''simple docstring''' __a : Optional[int] = ''.join(SCREAMING_SNAKE_CASE__ ).replace(SCREAMING_SNAKE_CASE__ , ' ' ).strip() return out_string def __lowerCAmelCase ( self : Dict , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[str] = None ): '''simple docstring''' if not os.path.isdir(SCREAMING_SNAKE_CASE__ ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return __a : Any = os.path.join( SCREAMING_SNAKE_CASE__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE__ ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , SCREAMING_SNAKE_CASE__ ) elif not os.path.isfile(self.vocab_file ): with open(SCREAMING_SNAKE_CASE__ , 'wb' ) as fi: __a : List[Any] = self.sp_model.serialized_model_proto() fi.write(SCREAMING_SNAKE_CASE__ ) return (out_vocab_file,)
47
0
"""simple docstring""" from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available a_ = {'configuration_focalnet': ['FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FocalNetConfig']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ = [ 'FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST', 'FocalNetForImageClassification', 'FocalNetForMaskedImageModeling', 'FocalNetBackbone', 'FocalNetModel', 'FocalNetPreTrainedModel', ] if TYPE_CHECKING: from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_focalnet import ( FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST, FocalNetBackbone, FocalNetForImageClassification, FocalNetForMaskedImageModeling, FocalNetModel, FocalNetPreTrainedModel, ) else: import sys a_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
76
import argparse from collections import OrderedDict from pathlib import Path import torch from transformers import ( VisualBertConfig, VisualBertForMultipleChoice, VisualBertForPreTraining, VisualBertForQuestionAnswering, VisualBertForVisualReasoning, ) from transformers.utils import logging logging.set_verbosity_info() SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ = [ ('''bert.bert''', '''visual_bert'''), ('''bert.cls''', '''cls'''), ('''bert.classifier''', '''cls'''), ('''token_type_embeddings_visual''', '''visual_token_type_embeddings'''), ('''position_embeddings_visual''', '''visual_position_embeddings'''), ('''projection''', '''visual_projection'''), ] SCREAMING_SNAKE_CASE__ = [ '''nlvr2_coco_pre_trained.th''', '''nlvr2_fine_tuned.th''', '''nlvr2_pre_trained.th''', '''vcr_coco_pre_train.th''', '''vcr_fine_tune.th''', '''vcr_pre_train.th''', '''vqa_coco_pre_trained.th''', '''vqa_fine_tuned.th''', '''vqa_pre_trained.th''', ] def UpperCAmelCase__ ( lowerCamelCase_ : Optional[int] ): __a : str = torch.load(lowerCamelCase_ , map_location='cpu' ) return sd def UpperCAmelCase__ ( lowerCamelCase_ : List[Any] , lowerCamelCase_ : int , lowerCamelCase_ : Dict=rename_keys_prefix ): __a : Optional[Any] = OrderedDict() __a : Any = torch.arange(config.max_position_embeddings ).expand((1, -1) ) # detector_d = OrderedDict() for key in d: if "detector" in key: # detector_d[key.replace('detector.','')] = d[key] continue __a : List[Any] = key for name_pair in rename_keys_prefix: __a : List[str] = new_key.replace(name_pair[0] , name_pair[1] ) __a : Any = d[key] if key == "bert.cls.predictions.decoder.weight": # Old bert code didn't have `decoder.bias`, but was added separately __a : int = new_d['cls.predictions.bias'] return new_d @torch.no_grad() def UpperCAmelCase__ ( lowerCamelCase_ : Any , lowerCamelCase_ : Any ): assert ( checkpoint_path.split('/' )[-1] in ACCEPTABLE_CHECKPOINTS ), f'''The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}.''' # Get Config if "pre" in checkpoint_path: __a : Dict = 'pretraining' if "vcr" in checkpoint_path: __a : int = {'visual_embedding_dim': 5_1_2} elif "vqa_advanced" in checkpoint_path: __a : int = {'visual_embedding_dim': 2_0_4_8} elif "vqa" in checkpoint_path: __a : Tuple = {'visual_embedding_dim': 2_0_4_8} elif "nlvr" in checkpoint_path: __a : List[Any] = {'visual_embedding_dim': 1_0_2_4} else: raise NotImplementedError(f'''No implementation found for `{checkpoint_path}`.''' ) else: if "vcr" in checkpoint_path: __a : int = {'visual_embedding_dim': 5_1_2} __a : Any = 'multichoice' elif "vqa_advanced" in checkpoint_path: __a : Any = {'visual_embedding_dim': 2_0_4_8} __a : List[str] = 'vqa_advanced' elif "vqa" in checkpoint_path: __a : List[Any] = {'visual_embedding_dim': 2_0_4_8, 'num_labels': 3_1_2_9} __a : List[Any] = 'vqa' elif "nlvr" in checkpoint_path: __a : Optional[int] = { 'visual_embedding_dim': 1_0_2_4, 'num_labels': 2, } __a : Optional[Any] = 'nlvr' __a : str = VisualBertConfig(**lowerCamelCase_ ) # Load State Dict __a : str = load_state_dict(lowerCamelCase_ ) __a : str = get_new_dict(lowerCamelCase_ , lowerCamelCase_ ) if model_type == "pretraining": __a : Optional[Any] = VisualBertForPreTraining(lowerCamelCase_ ) elif model_type == "vqa": __a : Any = VisualBertForQuestionAnswering(lowerCamelCase_ ) elif model_type == "nlvr": __a : int = VisualBertForVisualReasoning(lowerCamelCase_ ) elif model_type == "multichoice": __a : Optional[int] = VisualBertForMultipleChoice(lowerCamelCase_ ) model.load_state_dict(lowerCamelCase_ ) # Save Checkpoints Path(lowerCamelCase_ ).mkdir(exist_ok=lowerCamelCase_ ) model.save_pretrained(lowerCamelCase_ ) if __name__ == "__main__": SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser() # Required parameters parser.add_argument('''orig_checkpoint_path''', type=str, help='''A path to .th on local filesystem.''') parser.add_argument('''pytorch_dump_folder_path''', type=str, help='''Path to the output PyTorch model.''') SCREAMING_SNAKE_CASE__ = parser.parse_args() convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
47
0
"""simple docstring""" # Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __lowerCAmelCase : Dict = {'''configuration_timm_backbone''': ['''TimmBackboneConfig''']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase : int = ['''TimmBackbone'''] if TYPE_CHECKING: from .configuration_timm_backbone import TimmBackboneConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_timm_backbone import TimmBackbone else: import sys __lowerCAmelCase : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
58
print((lambda quine: quine % quine)('''print((lambda quine: quine %% quine)(%r))'''))
47
0
'''simple docstring''' import glob import os import random from string import ascii_lowercase, digits import cva import numpy as np # Parrameters _SCREAMING_SNAKE_CASE : int = (7_20, 12_80) # Height, Width _SCREAMING_SNAKE_CASE : Optional[int] = (0.4, 0.6) # if height or width lower than this scale, drop it. _SCREAMING_SNAKE_CASE : List[Any] = 1 / 1_00 _SCREAMING_SNAKE_CASE : Tuple = "" _SCREAMING_SNAKE_CASE : List[str] = "" _SCREAMING_SNAKE_CASE : int = "" _SCREAMING_SNAKE_CASE : Dict = 2_50 def _UpperCamelCase ( ): """simple docstring""" __magic_name__ : Optional[int] = get_dataset(lowerCamelCase_ , lowerCamelCase_ ) for index in range(lowerCamelCase_ ): __magic_name__ : Optional[Any] = random.sample(range(len(lowerCamelCase_ ) ) , 4 ) __magic_name__ : int = update_image_and_anno( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , filter_scale=lowerCamelCase_ , ) # Get random string code: '7b7ad245cdff75241935e4dd860f3bad' __magic_name__ : Optional[Any] = random_chars(32 ) __magic_name__ : Tuple = path.split(os.sep )[-1].rsplit("." , 1 )[0] __magic_name__ : Union[str, Any] = F"""{OUTPUT_DIR}/{file_name}_MOSAIC_{letter_code}""" cva.imwrite(F"""{file_root}.jpg""" , lowerCamelCase_ , [cva.IMWRITE_JPEG_QUALITY, 85] ) print(F"""Succeeded {index+1}/{NUMBER_IMAGES} with {file_name}""" ) __magic_name__ : List[Any] = [] for anno in new_annos: __magic_name__ : Optional[int] = anno[3] - anno[1] __magic_name__ : str = anno[4] - anno[2] __magic_name__ : Tuple = anno[1] + width / 2 __magic_name__ : Optional[int] = anno[2] + height / 2 __magic_name__ : Tuple = F"""{anno[0]} {x_center} {y_center} {width} {height}""" annos_list.append(lowerCamelCase_ ) with open(F"""{file_root}.txt""" , "w" ) as outfile: outfile.write("\n".join(line for line in annos_list ) ) def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ ): """simple docstring""" __magic_name__ : List[Any] = [] __magic_name__ : int = [] for label_file in glob.glob(os.path.join(lowerCamelCase_ , "*.txt" ) ): __magic_name__ : Union[str, Any] = label_file.split(os.sep )[-1].rsplit("." , 1 )[0] with open(lowerCamelCase_ ) as in_file: __magic_name__ : Tuple = in_file.readlines() __magic_name__ : Optional[int] = os.path.join(lowerCamelCase_ , F"""{label_name}.jpg""" ) __magic_name__ : Dict = [] for obj_list in obj_lists: __magic_name__ : List[Any] = obj_list.rstrip("\n" ).split(" " ) __magic_name__ : Any = float(obj[1] ) - float(obj[3] ) / 2 __magic_name__ : Optional[int] = float(obj[2] ) - float(obj[4] ) / 2 __magic_name__ : int = float(obj[1] ) + float(obj[3] ) / 2 __magic_name__ : Any = float(obj[2] ) + float(obj[4] ) / 2 boxes.append([int(obj[0] ), xmin, ymin, xmax, ymax] ) if not boxes: continue img_paths.append(lowerCamelCase_ ) labels.append(lowerCamelCase_ ) return img_paths, labels def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = 0.0 , ): """simple docstring""" __magic_name__ : Optional[int] = np.zeros([output_size[0], output_size[1], 3] , dtype=np.uinta ) __magic_name__ : Union[str, Any] = scale_range[0] + random.random() * (scale_range[1] - scale_range[0]) __magic_name__ : List[str] = scale_range[0] + random.random() * (scale_range[1] - scale_range[0]) __magic_name__ : List[str] = int(scale_x * output_size[1] ) __magic_name__ : Dict = int(scale_y * output_size[0] ) __magic_name__ : Optional[int] = [] __magic_name__ : Optional[Any] = [] for i, index in enumerate(lowerCamelCase_ ): __magic_name__ : str = all_img_list[index] path_list.append(lowerCamelCase_ ) __magic_name__ : int = all_annos[index] __magic_name__ : List[Any] = cva.imread(lowerCamelCase_ ) if i == 0: # top-left __magic_name__ : List[Any] = cva.resize(lowerCamelCase_ , (divid_point_x, divid_point_y) ) __magic_name__ : Dict = img for bbox in img_annos: __magic_name__ : int = bbox[1] * scale_x __magic_name__ : Any = bbox[2] * scale_y __magic_name__ : int = bbox[3] * scale_x __magic_name__ : str = bbox[4] * scale_y new_anno.append([bbox[0], xmin, ymin, xmax, ymax] ) elif i == 1: # top-right __magic_name__ : Tuple = cva.resize(lowerCamelCase_ , (output_size[1] - divid_point_x, divid_point_y) ) __magic_name__ : Dict = img for bbox in img_annos: __magic_name__ : List[str] = scale_x + bbox[1] * (1 - scale_x) __magic_name__ : Optional[Any] = bbox[2] * scale_y __magic_name__ : Union[str, Any] = scale_x + bbox[3] * (1 - scale_x) __magic_name__ : Dict = bbox[4] * scale_y new_anno.append([bbox[0], xmin, ymin, xmax, ymax] ) elif i == 2: # bottom-left __magic_name__ : List[Any] = cva.resize(lowerCamelCase_ , (divid_point_x, output_size[0] - divid_point_y) ) __magic_name__ : Optional[Any] = img for bbox in img_annos: __magic_name__ : Optional[int] = bbox[1] * scale_x __magic_name__ : Optional[Any] = scale_y + bbox[2] * (1 - scale_y) __magic_name__ : Union[str, Any] = bbox[3] * scale_x __magic_name__ : List[str] = scale_y + bbox[4] * (1 - scale_y) new_anno.append([bbox[0], xmin, ymin, xmax, ymax] ) else: # bottom-right __magic_name__ : str = cva.resize( lowerCamelCase_ , (output_size[1] - divid_point_x, output_size[0] - divid_point_y) ) __magic_name__ : Tuple = img for bbox in img_annos: __magic_name__ : Dict = scale_x + bbox[1] * (1 - scale_x) __magic_name__ : str = scale_y + bbox[2] * (1 - scale_y) __magic_name__ : Union[str, Any] = scale_x + bbox[3] * (1 - scale_x) __magic_name__ : List[str] = scale_y + bbox[4] * (1 - scale_y) new_anno.append([bbox[0], xmin, ymin, xmax, ymax] ) # Remove bounding box small than scale of filter if filter_scale > 0: __magic_name__ : Optional[Any] = [ anno for anno in new_anno if filter_scale < (anno[3] - anno[1]) and filter_scale < (anno[4] - anno[2]) ] return output_img, new_anno, path_list[0] def _UpperCamelCase ( UpperCamelCase__ ): """simple docstring""" assert number_char > 1, "The number of character should greater than 1" __magic_name__ : Dict = ascii_lowercase + digits return "".join(random.choice(lowerCamelCase_ ) for _ in range(lowerCamelCase_ ) ) if __name__ == "__main__": main() print("DONE ✅")
436
import json import os import shutil import tempfile from unittest import TestCase from transformers import BartTokenizer, BartTokenizerFast, DPRQuestionEncoderTokenizer, DPRQuestionEncoderTokenizerFast from transformers.models.bart.configuration_bart import BartConfig from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES from transformers.models.dpr.configuration_dpr import DPRConfig from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES from transformers.testing_utils import require_faiss, require_tokenizers, require_torch, slow from transformers.utils import is_datasets_available, is_faiss_available, is_torch_available if is_torch_available() and is_datasets_available() and is_faiss_available(): from transformers.models.rag.configuration_rag import RagConfig from transformers.models.rag.tokenization_rag import RagTokenizer @require_faiss @require_torch class _UpperCamelCase( __lowerCamelCase ): def __lowerCAmelCase ( self : List[Any] ): '''simple docstring''' __a : List[Any] = tempfile.mkdtemp() __a : int = 8 # DPR tok __a : Dict = [ '[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing', ',', 'low', 'lowest', ] __a : int = os.path.join(self.tmpdirname , 'dpr_tokenizer' ) os.makedirs(SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ ) __a : Dict = os.path.join(SCREAMING_SNAKE_CASE__ , DPR_VOCAB_FILES_NAMES['vocab_file'] ) with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer: vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) ) # BART tok __a : str = [ 'l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', '\u0120', '\u0120l', '\u0120n', '\u0120lo', '\u0120low', 'er', '\u0120lowest', '\u0120newer', '\u0120wider', '<unk>', ] __a : Optional[int] = dict(zip(SCREAMING_SNAKE_CASE__ , range(len(SCREAMING_SNAKE_CASE__ ) ) ) ) __a : List[str] = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', ''] __a : List[str] = {'unk_token': '<unk>'} __a : Dict = os.path.join(self.tmpdirname , 'bart_tokenizer' ) os.makedirs(SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ ) __a : List[Any] = os.path.join(SCREAMING_SNAKE_CASE__ , BART_VOCAB_FILES_NAMES['vocab_file'] ) __a : Dict = os.path.join(SCREAMING_SNAKE_CASE__ , BART_VOCAB_FILES_NAMES['merges_file'] ) with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp: fp.write(json.dumps(SCREAMING_SNAKE_CASE__ ) + '\n' ) with open(self.merges_file , 'w' , encoding='utf-8' ) as fp: fp.write('\n'.join(SCREAMING_SNAKE_CASE__ ) ) def __lowerCAmelCase ( self : Tuple ): '''simple docstring''' return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , 'dpr_tokenizer' ) ) def __lowerCAmelCase ( self : str ): '''simple docstring''' return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , 'bart_tokenizer' ) ) def __lowerCAmelCase ( self : Union[str, Any] ): '''simple docstring''' shutil.rmtree(self.tmpdirname ) @require_tokenizers def __lowerCAmelCase ( self : Dict ): '''simple docstring''' __a : Tuple = os.path.join(self.tmpdirname , 'rag_tokenizer' ) __a : Optional[Any] = RagConfig(question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() ) __a : Optional[Any] = RagTokenizer(question_encoder=self.get_dpr_tokenizer() , generator=self.get_bart_tokenizer() ) rag_config.save_pretrained(SCREAMING_SNAKE_CASE__ ) rag_tokenizer.save_pretrained(SCREAMING_SNAKE_CASE__ ) __a : List[Any] = RagTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ , config=SCREAMING_SNAKE_CASE__ ) self.assertIsInstance(new_rag_tokenizer.question_encoder , SCREAMING_SNAKE_CASE__ ) self.assertEqual(new_rag_tokenizer.question_encoder.get_vocab() , rag_tokenizer.question_encoder.get_vocab() ) self.assertIsInstance(new_rag_tokenizer.generator , SCREAMING_SNAKE_CASE__ ) self.assertEqual(new_rag_tokenizer.generator.get_vocab() , rag_tokenizer.generator.get_vocab() ) @slow def __lowerCAmelCase ( self : Optional[Any] ): '''simple docstring''' __a : Optional[Any] = RagTokenizer.from_pretrained('facebook/rag-token-nq' ) __a : List[Any] = [ 'who got the first nobel prize in physics', 'when is the next deadpool movie being released', 'which mode is used for short wave broadcast service', 'who is the owner of reading football club', 'when is the next scandal episode coming out', 'when is the last time the philadelphia won the superbowl', 'what is the most current adobe flash player version', 'how many episodes are there in dragon ball z', 'what is the first step in the evolution of the eye', 'where is gall bladder situated in human body', 'what is the main mineral in lithium batteries', 'who is the president of usa right now', 'where do the greasers live in the outsiders', 'panda is a national animal of which country', 'what is the name of manchester united stadium', ] __a : Tuple = tokenizer(SCREAMING_SNAKE_CASE__ ) self.assertIsNotNone(SCREAMING_SNAKE_CASE__ ) @slow def __lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' __a : Any = RagTokenizer.from_pretrained('facebook/rag-sequence-nq' ) __a : Union[str, Any] = [ 'who got the first nobel prize in physics', 'when is the next deadpool movie being released', 'which mode is used for short wave broadcast service', 'who is the owner of reading football club', 'when is the next scandal episode coming out', 'when is the last time the philadelphia won the superbowl', 'what is the most current adobe flash player version', 'how many episodes are there in dragon ball z', 'what is the first step in the evolution of the eye', 'where is gall bladder situated in human body', 'what is the main mineral in lithium batteries', 'who is the president of usa right now', 'where do the greasers live in the outsiders', 'panda is a national animal of which country', 'what is the name of manchester united stadium', ] __a : str = tokenizer(SCREAMING_SNAKE_CASE__ ) self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
47
0
'''simple docstring''' import argparse from tax import checkpoints from transformers import AutoConfig, FlaxAutoModelForSeqaSeqLM def lowerCamelCase__ ( A : Dict , A : Optional[Any] , A : Dict ): '''simple docstring''' UpperCAmelCase = AutoConfig.from_pretrained(lowerCamelCase_ ) UpperCAmelCase = FlaxAutoModelForSeqaSeqLM.from_config(config=lowerCamelCase_ ) UpperCAmelCase = checkpoints.load_tax_checkpoint(lowerCamelCase_ ) UpperCAmelCase = 'wi_0' in tax_model['target']['encoder']['layers_0']['mlp'] if config.model_type == "t5": UpperCAmelCase = 'SelfAttention' if config.model_type == "longt5" and config.encoder_attention_type == "local": UpperCAmelCase = 'LocalSelfAttention' elif config.model_type == "longt5" and config.encoder_attention_type == "transient-global": UpperCAmelCase = 'TransientGlobalSelfAttention' else: raise ValueError( '''Given config is expected to have `model_type=\'t5\'`, or `model_type=\'longt5` with `encoder_attention_type`''' ''' attribute with a value from [\'local\', \'transient-global].''' ) # Encoder for layer_index in range(config.num_layers ): UpperCAmelCase = f"""layers_{str(lowerCamelCase_ )}""" # Self-Attention UpperCAmelCase = tax_model['target']['encoder'][layer_name]['attention']['key']['kernel'] UpperCAmelCase = tax_model['target']['encoder'][layer_name]['attention']['out']['kernel'] UpperCAmelCase = tax_model['target']['encoder'][layer_name]['attention']['query']['kernel'] UpperCAmelCase = tax_model['target']['encoder'][layer_name]['attention']['value']['kernel'] # Global input layer norm if config.model_type == "longt5" and config.encoder_attention_type == "transient-global": UpperCAmelCase = tax_model['target']['encoder'][layer_name]['attention']['T5LayerNorm_0']['scale'] # Layer Normalization UpperCAmelCase = tax_model['target']['encoder'][layer_name]['pre_attention_layer_norm']['scale'] if split_mlp_wi: UpperCAmelCase = tax_model['target']['encoder'][layer_name]['mlp']['wi_0']['kernel'] UpperCAmelCase = tax_model['target']['encoder'][layer_name]['mlp']['wi_1']['kernel'] else: UpperCAmelCase = tax_model['target']['encoder'][layer_name]['mlp']['wi']['kernel'] UpperCAmelCase = tax_model['target']['encoder'][layer_name]['mlp']['wo']['kernel'] # Layer Normalization UpperCAmelCase = tax_model['target']['encoder'][layer_name]['pre_mlp_layer_norm']['scale'] # Assigning UpperCAmelCase = flax_model.params['encoder']['block'][str(lowerCamelCase_ )]['layer'] UpperCAmelCase = tax_attention_key UpperCAmelCase = tax_attention_out UpperCAmelCase = tax_attention_query UpperCAmelCase = tax_attention_value UpperCAmelCase = tax_attention_layer_norm # Global input layer norm if config.model_type == "longt5" and config.encoder_attention_type == "transient-global": UpperCAmelCase = tax_global_layer_norm if split_mlp_wi: UpperCAmelCase = tax_mlp_wi_a UpperCAmelCase = tax_mlp_wi_a else: UpperCAmelCase = tax_mlp_wi UpperCAmelCase = tax_mlp_wo UpperCAmelCase = tax_mlp_layer_norm UpperCAmelCase = flax_model_encoder_layer_block # Only for layer 0: UpperCAmelCase = tax_model['target']['encoder']['relpos_bias']['rel_embedding'].T UpperCAmelCase = tax_encoder_rel_embedding # Side/global relative position_bias + layer norm if config.model_type == "longt5" and config.encoder_attention_type == "transient-global": UpperCAmelCase = tax_model['target']['encoder']['side_relpos_bias']['rel_embedding'].T UpperCAmelCase = tax_encoder_global_rel_embedding # Assigning UpperCAmelCase = tax_model['target']['encoder']['encoder_norm']['scale'] UpperCAmelCase = tax_encoder_norm # Decoder for layer_index in range(config.num_layers ): UpperCAmelCase = f"""layers_{str(lowerCamelCase_ )}""" # Self-Attention UpperCAmelCase = tax_model['target']['decoder'][layer_name]['self_attention']['key']['kernel'] UpperCAmelCase = tax_model['target']['decoder'][layer_name]['self_attention']['out']['kernel'] UpperCAmelCase = tax_model['target']['decoder'][layer_name]['self_attention']['query']['kernel'] UpperCAmelCase = tax_model['target']['decoder'][layer_name]['self_attention']['value']['kernel'] # Layer Normalization UpperCAmelCase = tax_model['target']['decoder'][layer_name]['pre_self_attention_layer_norm'][ 'scale' ] # Encoder-Decoder-Attention UpperCAmelCase = tax_model['target']['decoder'][layer_name]['encoder_decoder_attention'] UpperCAmelCase = tax_enc_dec_attention_module['key']['kernel'] UpperCAmelCase = tax_enc_dec_attention_module['out']['kernel'] UpperCAmelCase = tax_enc_dec_attention_module['query']['kernel'] UpperCAmelCase = tax_enc_dec_attention_module['value']['kernel'] # Layer Normalization UpperCAmelCase = tax_model['target']['decoder'][layer_name]['pre_cross_attention_layer_norm']['scale'] # MLP if split_mlp_wi: UpperCAmelCase = tax_model['target']['decoder'][layer_name]['mlp']['wi_0']['kernel'] UpperCAmelCase = tax_model['target']['decoder'][layer_name]['mlp']['wi_1']['kernel'] else: UpperCAmelCase = tax_model['target']['decoder'][layer_name]['mlp']['wi']['kernel'] UpperCAmelCase = tax_model['target']['decoder'][layer_name]['mlp']['wo']['kernel'] # Layer Normalization UpperCAmelCase = tax_model['target']['decoder'][layer_name]['pre_mlp_layer_norm']['scale'] # Assigning UpperCAmelCase = flax_model.params['decoder']['block'][str(lowerCamelCase_ )]['layer'] UpperCAmelCase = tax_attention_key UpperCAmelCase = tax_attention_out UpperCAmelCase = tax_attention_query UpperCAmelCase = tax_attention_value UpperCAmelCase = tax_pre_attention_layer_norm UpperCAmelCase = tax_enc_dec_attention_key UpperCAmelCase = tax_enc_dec_attention_out UpperCAmelCase = tax_enc_dec_attention_query UpperCAmelCase = tax_enc_dec_attention_value UpperCAmelCase = tax_cross_layer_norm if split_mlp_wi: UpperCAmelCase = tax_mlp_wi_a UpperCAmelCase = tax_mlp_wi_a else: UpperCAmelCase = tax_mlp_wi UpperCAmelCase = tax_mlp_wo UpperCAmelCase = txa_mlp_layer_norm UpperCAmelCase = flax_model_decoder_layer_block # Decoder Normalization UpperCAmelCase = tax_model['target']['decoder']['decoder_norm']['scale'] UpperCAmelCase = txa_decoder_norm # Only for layer 0: UpperCAmelCase = tax_model['target']['decoder']['relpos_bias']['rel_embedding'].T UpperCAmelCase = tax_decoder_rel_embedding # Token Embeddings UpperCAmelCase = tax_model['target']['token_embedder']['embedding'] UpperCAmelCase = txa_token_embeddings # LM Head (only in v1.1 and LongT5 checkpoints) if "logits_dense" in tax_model["target"]["decoder"]: UpperCAmelCase = tax_model['target']['decoder']['logits_dense']['kernel'] flax_model.save_pretrained(lowerCamelCase_ ) print('''T5X Model was sucessfully converted!''' ) if __name__ == "__main__": _lowercase : Optional[int] = argparse.ArgumentParser() # Required parameters parser.add_argument( """--t5x_checkpoint_path""", default=None, type=str, required=True, help="""Path the T5X checkpoint.""" ) parser.add_argument("""--config_name""", default=None, type=str, required=True, help="""Config name of LongT5/T5 model.""") parser.add_argument( """--flax_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output FLAX model.""" ) _lowercase : Any = parser.parse_args() convert_tax_checkpoint_to_flax(args.tax_checkpoint_path, args.config_name, args.flax_dump_folder_path)
210
import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ = {'''vocab_file''': '''spiece.model'''} SCREAMING_SNAKE_CASE__ = { '''vocab_file''': { '''bert_for_seq_generation''': ( '''https://huggingface.co/google/bert_for_seq_generation_L-24_bbc_encoder/resolve/main/spiece.model''' ), } } SCREAMING_SNAKE_CASE__ = {'''bert_for_seq_generation''': 512} class _UpperCamelCase( __lowerCamelCase ): __SCREAMING_SNAKE_CASE : int = VOCAB_FILES_NAMES __SCREAMING_SNAKE_CASE : List[str] = PRETRAINED_VOCAB_FILES_MAP __SCREAMING_SNAKE_CASE : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __SCREAMING_SNAKE_CASE : List[int] = [] __SCREAMING_SNAKE_CASE : int = ['''input_ids''', '''attention_mask'''] def __init__( self : str , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : List[Any]="<s>" , SCREAMING_SNAKE_CASE__ : Tuple="</s>" , SCREAMING_SNAKE_CASE__ : Any="<unk>" , SCREAMING_SNAKE_CASE__ : int="<pad>" , SCREAMING_SNAKE_CASE__ : List[str]="<::::>" , SCREAMING_SNAKE_CASE__ : Optional[Dict[str, Any]] = None , **SCREAMING_SNAKE_CASE__ : Tuple , ): '''simple docstring''' __a : Tuple = {} if sp_model_kwargs is None else sp_model_kwargs # Add extra_ids to the special token list super().__init__( bos_token=SCREAMING_SNAKE_CASE__ , eos_token=SCREAMING_SNAKE_CASE__ , unk_token=SCREAMING_SNAKE_CASE__ , pad_token=SCREAMING_SNAKE_CASE__ , sep_token=SCREAMING_SNAKE_CASE__ , sp_model_kwargs=self.sp_model_kwargs , **SCREAMING_SNAKE_CASE__ , ) __a : int = vocab_file __a : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(SCREAMING_SNAKE_CASE__ ) @property def __lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' return self.sp_model.get_piece_size() def __lowerCAmelCase ( self : int ): '''simple docstring''' __a : Dict = {self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self : Optional[Any] ): '''simple docstring''' __a : Union[str, Any] = self.__dict__.copy() __a : Any = None return state def __setstate__( self : int , SCREAMING_SNAKE_CASE__ : List[str] ): '''simple docstring''' __a : str = d # for backward compatibility if not hasattr(self , 'sp_model_kwargs' ): __a : str = {} __a : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def __lowerCAmelCase ( self : Dict , SCREAMING_SNAKE_CASE__ : str ): '''simple docstring''' return self.sp_model.encode(SCREAMING_SNAKE_CASE__ , out_type=SCREAMING_SNAKE_CASE__ ) def __lowerCAmelCase ( self : str , SCREAMING_SNAKE_CASE__ : Optional[int] ): '''simple docstring''' return self.sp_model.piece_to_id(SCREAMING_SNAKE_CASE__ ) def __lowerCAmelCase ( self : int , SCREAMING_SNAKE_CASE__ : str ): '''simple docstring''' __a : int = self.sp_model.IdToPiece(SCREAMING_SNAKE_CASE__ ) return token def __lowerCAmelCase ( self : Tuple , SCREAMING_SNAKE_CASE__ : Optional[int] ): '''simple docstring''' __a : Optional[Any] = [] __a : Optional[int] = '' for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: out_string += self.sp_model.decode(SCREAMING_SNAKE_CASE__ ) + token __a : Dict = [] else: current_sub_tokens.append(SCREAMING_SNAKE_CASE__ ) out_string += self.sp_model.decode(SCREAMING_SNAKE_CASE__ ) return out_string.strip() def __lowerCAmelCase ( self : List[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[str] = None ): '''simple docstring''' if not os.path.isdir(SCREAMING_SNAKE_CASE__ ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return __a : Tuple = os.path.join( SCREAMING_SNAKE_CASE__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE__ ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , SCREAMING_SNAKE_CASE__ ) elif not os.path.isfile(self.vocab_file ): with open(SCREAMING_SNAKE_CASE__ , 'wb' ) as fi: __a : List[str] = self.sp_model.serialized_model_proto() fi.write(SCREAMING_SNAKE_CASE__ ) return (out_vocab_file,)
47
0
# HF Trainer benchmarking tool # # This tool can be used to run and compare multiple dimensions of the HF Trainers args. # # It then prints a report once in github format with all the information that needs to be shared # with others and second time in a console-friendly format, so it's easier to use for tuning things up. # # The main idea is: # # ./trainer-benchmark.py --base-cmd '<cmd args that don't change>' \ # --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1' \ # --target-metric-key train_samples_per_second # # The variations can be any command line argument that you want to compare and not just dtype as in # the example. # # --variations allows you to compare variations in multiple dimensions. # # as the first dimention has 2 options and the second 3 in our example, this will run the trainer 6 # times adding one of: # # 1. --tf32 0 --fp16 0 # 2. --tf32 0 --fp16 1 # 3. --tf32 0 --bf16 1 # 4. --tf32 1 --fp16 0 # 5. --tf32 1 --fp16 1 # 6. --tf32 1 --bf16 1 # # and print the results. This is just a cartesian product - and more than 2 dimensions can be used. # # If you want to rely on defaults, this: # --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1' # is identical to this: # --variations '--tf32 0|--tf32 1' '|--fp16|--bf16' # # the leading empty variation in the 2nd dimension is a valid variation. # # So here we get the following 6 variations: # # 1. --tf32 0 # 2. --tf32 0 --fp16 # 3. --tf32 0 --bf16 # 4. --tf32 1 # 5. --tf32 1 --fp16 # 6. --tf32 1 --bf16 # # In this particular case we don't know what the default tf32 setting is as it's normally # pytorch-version dependent). That's why it's best to do an explicit setting of each variation: # `--tf32 0|--tf32 1` # # Here is a full example of a train: # # CUDA_VISIBLE_DEVICES=0 python ./scripts/benchmark/trainer-benchmark.py \ # --base-cmd \ # ' examples/pytorch/translation/run_translation.py --model_name_or_path t5-small \ # --output_dir output_dir --do_train --label_smoothing 0.1 --logging_strategy no \ # --save_strategy no --per_device_train_batch_size 32 --max_source_length 512 \ # --max_target_length 512 --num_train_epochs 1 --overwrite_output_dir \ # --source_lang en --target_lang ro --dataset_name wmt16 --dataset_config "ro-en" \ # --source_prefix "translate English to Romanian: " --warmup_steps 50 \ # --max_train_samples 20000 --dataloader_num_workers 2 ' \ # --target-metric-key train_samples_per_second --repeat-times 1 --variations \ # '|--fp16|--bf16' '--tf32 0|--tf32 1' --report-metric-keys train_loss \ # --repeat-times 1 --base-variation '--tf32 0' # # and here is a possible output: # # # | Variation | Train | Diff | Train | # | | samples | % | loss | # | | per | | | # | | second | | | # |:----------------|----------:|-------:|--------:| # | --tf32 0 | 285.11 | 0 | 2.51 | # | --tf32 1 | 342.09 | 20 | 2.51 | # | --fp16 --tf32 0 | 423.49 | 49 | 2.51 | # | --fp16 --tf32 1 | 423.13 | 48 | 2.51 | # | --bf16 --tf32 0 | 416.80 | 46 | 2.52 | # | --bf16 --tf32 1 | 415.87 | 46 | 2.52 | # # # So you can quickly compare the different outcomes. # # Typically running each experiment once is enough, but if the environment is unstable you can # re-run each multiple times, e.g., 3 using --repeat-times 3 and it will report the averaged results. # # By default it'll use the lowest result as the base line to use as 100% and then compare the rest to # it as can be seen from the table above, but you can also specify which combination is the one to use as # the baseline, e.g., to change to another entry use: --base-variation '--tf32 1 --fp16 0' # # --target-metric-key is there to tell the program which metrics to compare - the different metric keys are # inside output_dir/all_results.json. e.g., to measure eval performance instead of train use: # --target-metric-key eval_samples_per_second # but of course you will need to adjust the --base-cmd value in the example to perform evaluation as # well (as currently it doesn't) # import argparse import datetime import io import itertools import json import math import os import platform import re import shlex import subprocess import sys from pathlib import Path from statistics import fmean import pandas as pd import torch from tqdm import tqdm import transformers lowerCamelCase : Tuple = float("nan") class A: '''simple docstring''' def __init__( self : List[Any] , A_ : Optional[int] ) -> Any: """simple docstring""" lowerCamelCase_ = sys.stdout lowerCamelCase_ = open(SCREAMING_SNAKE_CASE__ , 'a' ) def __getattr__( self : str , A_ : List[str] ) -> Dict: """simple docstring""" return getattr(self.stdout , SCREAMING_SNAKE_CASE__ ) def a__ ( self : List[str] , A_ : Dict ) -> Optional[Any]: """simple docstring""" self.stdout.write(SCREAMING_SNAKE_CASE__ ) # strip tqdm codes self.file.write(re.sub(r'^.*\r' , '' , SCREAMING_SNAKE_CASE__ , 0 , re.M ) ) def _SCREAMING_SNAKE_CASE ( lowercase : Dict=80 , lowercase : int=False ): '''simple docstring''' lowerCamelCase_ = [] # deal with critical env vars lowerCamelCase_ = ['CUDA_VISIBLE_DEVICES'] for key in env_keys: lowerCamelCase_ = os.environ.get(lowerCamelCase_ , lowerCamelCase_ ) if val is not None: cmd.append(f"""{key}={val}""" ) # python executable (not always needed if the script is executable) lowerCamelCase_ = sys.executable if full_python_path else sys.executable.split('/' )[-1] cmd.append(lowerCamelCase_ ) # now the normal args cmd += list(map(shlex.quote , sys.argv ) ) # split up into up to MAX_WIDTH lines with shell multi-line escapes lowerCamelCase_ = [] lowerCamelCase_ = '' while len(lowerCamelCase_ ) > 0: current_line += f"""{cmd.pop(0 )} """ if len(lowerCamelCase_ ) == 0 or len(lowerCamelCase_ ) + len(cmd[0] ) + 1 > max_width - 1: lines.append(lowerCamelCase_ ) lowerCamelCase_ = '' return "\\\n".join(lowerCamelCase_ ) def _SCREAMING_SNAKE_CASE ( lowercase : List[Any] , lowercase : Tuple ): '''simple docstring''' lowerCamelCase_ = re.sub(r'[\\\n]+' , ' ' , args.base_cmd ) # remove --output_dir if any and set our own lowerCamelCase_ = re.sub('--output_dir\s+[^\s]+' , '' , args.base_cmd ) args.base_cmd += f""" --output_dir {output_dir}""" # ensure we have --overwrite_output_dir lowerCamelCase_ = re.sub('--overwrite_output_dir\s+' , '' , args.base_cmd ) args.base_cmd += " --overwrite_output_dir" return [sys.executable] + shlex.split(args.base_cmd ) def _SCREAMING_SNAKE_CASE ( lowercase : Optional[Any] , lowercase : Union[str, Any] , lowercase : Optional[int] , lowercase : str , lowercase : Dict , lowercase : List[str] , lowercase : Union[str, Any] ): '''simple docstring''' if 0: import random from time import sleep sleep(0 ) return dict( {k: random.uniform(0 , 1_00 ) for k in metric_keys} , **{target_metric_key: random.choice([nan, 10.31, 100.2, 55.6666, 222.2222_2222] )} , ) lowerCamelCase_ = subprocess.run(lowerCamelCase_ , capture_output=lowerCamelCase_ , text=lowerCamelCase_ ) if verbose: print('STDOUT' , result.stdout ) print('STDERR' , result.stderr ) # save the streams lowerCamelCase_ = variation.replace(' ' , '-' ) with open(Path(lowerCamelCase_ ) / f"""log.{prefix}.stdout.txt""" , 'w' ) as f: f.write(result.stdout ) with open(Path(lowerCamelCase_ ) / f"""log.{prefix}.stderr.txt""" , 'w' ) as f: f.write(result.stderr ) if result.returncode != 0: if verbose: print('failed' ) return {target_metric_key: nan} with io.open(f"""{output_dir}/all_results.json""" , 'r' , encoding='utf-8' ) as f: lowerCamelCase_ = json.load(lowerCamelCase_ ) # filter out just the keys we want return {k: v for k, v in metrics.items() if k in metric_keys} def _SCREAMING_SNAKE_CASE ( lowercase : Dict , lowercase : Tuple , lowercase : Tuple , lowercase : Tuple , lowercase : List[str] , lowercase : List[Any] , lowercase : Tuple , lowercase : Union[str, Any] , lowercase : int , lowercase : Any , ): '''simple docstring''' lowerCamelCase_ = [] lowerCamelCase_ = [] lowerCamelCase_ = f"""{id}: {variation:<{longest_variation_len}}""" lowerCamelCase_ = f"""{preamble}: """ lowerCamelCase_ = set(report_metric_keys + [target_metric_key] ) for i in tqdm(range(lowerCamelCase_ ) , desc=lowerCamelCase_ , leave=lowerCamelCase_ ): lowerCamelCase_ = process_run_single( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) lowerCamelCase_ = single_run_metrics[target_metric_key] if not math.isnan(lowerCamelCase_ ): metrics.append(lowerCamelCase_ ) results.append(lowerCamelCase_ ) outcome += "✓" else: outcome += "✘" lowerCamelCase_ = f"""\33[2K\r{outcome}""" if len(lowerCamelCase_ ) > 0: lowerCamelCase_ = {k: fmean([x[k] for x in metrics] ) for k in metrics[0].keys()} lowerCamelCase_ = round(mean_metrics[target_metric_key] , 2 ) lowerCamelCase_ = f"""{outcome} {mean_target}""" if len(lowerCamelCase_ ) > 1: results_str += f""" {tuple(round(lowerCamelCase_ , 2 ) for x in results )}""" print(lowerCamelCase_ ) lowerCamelCase_ = variation return mean_metrics else: print(lowerCamelCase_ ) return {variation_key: variation, target_metric_key: nan} def _SCREAMING_SNAKE_CASE ( ): '''simple docstring''' lowerCamelCase_ = torch.cuda.get_device_properties(torch.device('cuda' ) ) return f""" Datetime : {datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S" )} Software: transformers: {transformers.__version__} torch : {torch.__version__} cuda : {torch.version.cuda} python : {platform.python_version()} Hardware: {torch.cuda.device_count()} GPUs : {properties.name}, {properties.total_memory/2**30:0.2f}GB """ def _SCREAMING_SNAKE_CASE ( lowercase : Tuple , lowercase : Optional[Any] , lowercase : Optional[int] , lowercase : Optional[int] , lowercase : List[Any] ): '''simple docstring''' lowerCamelCase_ = pd.DataFrame(lowerCamelCase_ ) lowerCamelCase_ = 'variation' lowerCamelCase_ = 'diff_%' lowerCamelCase_ = nan if base_variation is not None and len(df[df[variation_key] == base_variation] ): # this may still return nan lowerCamelCase_ = df.loc[df[variation_key] == base_variation][target_metric_key].item() if math.isnan(lowerCamelCase_ ): # as a fallback, use the minimal value as the sentinel lowerCamelCase_ = df.loc[df[target_metric_key] != nan][target_metric_key].min() # create diff column if possible if not math.isnan(lowerCamelCase_ ): lowerCamelCase_ = df.apply( lambda lowercase : round(1_00 * (r[target_metric_key] - sentinel_value) / sentinel_value ) if not math.isnan(r[target_metric_key] ) else 0 , axis='columns' , ) # re-order columns lowerCamelCase_ = [variation_key, target_metric_key, diff_key, *report_metric_keys] lowerCamelCase_ = df.reindex(lowerCamelCase_ , axis='columns' ) # reorder cols # capitalize lowerCamelCase_ = df.rename(str.capitalize , axis='columns' ) # make the cols as narrow as possible lowerCamelCase_ = df.rename(lambda lowercase : c.replace('_' , '<br>' ) , axis='columns' ) lowerCamelCase_ = df.rename(lambda lowercase : c.replace('_' , '\n' ) , axis='columns' ) lowerCamelCase_ = ['', 'Copy between the cut-here-lines and paste as is to github or a forum'] report += ["----------8<-----------------8<--------"] report += ["*** Results:", df_github.to_markdown(index=lowerCamelCase_ , floatfmt='.2f' )] report += ["```"] report += ["*** Setup:", get_versions()] report += ["*** The benchmark command line was:", get_original_command()] report += ["```"] report += ["----------8<-----------------8<--------"] report += ["*** Results (console):", df_console.to_markdown(index=lowerCamelCase_ , floatfmt='.2f' )] print('\n\n'.join(lowerCamelCase_ ) ) def _SCREAMING_SNAKE_CASE ( ): '''simple docstring''' lowerCamelCase_ = argparse.ArgumentParser() parser.add_argument( '--base-cmd' , default=lowerCamelCase_ , type=lowerCamelCase_ , required=lowerCamelCase_ , help='Base cmd' , ) parser.add_argument( '--variations' , default=lowerCamelCase_ , type=lowerCamelCase_ , nargs='+' , required=lowerCamelCase_ , help='Multi-dimensional variations, example: \'|--fp16|--bf16\' \'|--tf32\'' , ) parser.add_argument( '--base-variation' , default=lowerCamelCase_ , type=lowerCamelCase_ , help='Baseline variation to compare to. if None the minimal target value will be used to compare against' , ) parser.add_argument( '--target-metric-key' , default=lowerCamelCase_ , type=lowerCamelCase_ , required=lowerCamelCase_ , help='Target metric key in output_dir/all_results.json, e.g., train_samples_per_second' , ) parser.add_argument( '--report-metric-keys' , default='' , type=lowerCamelCase_ , help='Report metric keys - other metric keys from output_dir/all_results.json to report, e.g., train_loss. Use a single argument e.g., \'train_loss train_samples' , ) parser.add_argument( '--repeat-times' , default=1 , type=lowerCamelCase_ , help='How many times to re-run each variation - an average will be reported' , ) parser.add_argument( '--output_dir' , default='output_benchmark' , type=lowerCamelCase_ , help='The output directory where all the benchmark reports will go to and additionally this directory will be used to override --output_dir in the script that is being benchmarked' , ) parser.add_argument( '--verbose' , default=lowerCamelCase_ , action='store_true' , help='Whether to show the outputs of each run or just the benchmark progress' , ) lowerCamelCase_ = parser.parse_args() lowerCamelCase_ = args.output_dir Path(lowerCamelCase_ ).mkdir(exist_ok=lowerCamelCase_ ) lowerCamelCase_ = get_base_command(lowerCamelCase_ , lowerCamelCase_ ) # split each dimension into its --foo variations lowerCamelCase_ = [list(map(str.strip , re.split(r'\|' , lowerCamelCase_ ) ) ) for x in args.variations] # build a cartesian product of dimensions and convert those back into cmd-line arg strings, # while stripping white space for inputs that were empty lowerCamelCase_ = list(map(str.strip , map(' '.join , itertools.product(*lowerCamelCase_ ) ) ) ) lowerCamelCase_ = max(len(lowerCamelCase_ ) for x in variations ) # split wanted keys lowerCamelCase_ = args.report_metric_keys.split() # capture prints into a log file for convenience lowerCamelCase_ = f"""benchmark-report-{datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S" )}.txt""" print(f"""\nNote: each run\'s output is also logged under {output_dir}/log.*.std*.txt""" ) print(f"""and this script\'s output is also piped into {report_fn}""" ) lowerCamelCase_ = Tee(lowerCamelCase_ ) print(f"""\n*** Running {len(lowerCamelCase_ )} benchmarks:""" ) print(f"""Base command: {" ".join(lowerCamelCase_ )}""" ) lowerCamelCase_ = 'variation' lowerCamelCase_ = [] for id, variation in enumerate(tqdm(lowerCamelCase_ , desc='Total completion: ' , leave=lowerCamelCase_ ) ): lowerCamelCase_ = base_cmd + variation.split() results.append( process_run( id + 1 , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , args.target_metric_key , lowerCamelCase_ , args.repeat_times , lowerCamelCase_ , args.verbose , ) ) process_results(lowerCamelCase_ , args.target_metric_key , lowerCamelCase_ , args.base_variation , lowerCamelCase_ ) if __name__ == "__main__": main()
70
from ..utils import DummyObject, requires_backends class _UpperCamelCase( metaclass=__lowerCamelCase ): __SCREAMING_SNAKE_CASE : Optional[Any] = ['''torch''', '''transformers''', '''onnx'''] def __init__( self : Dict , *SCREAMING_SNAKE_CASE__ : Any , **SCREAMING_SNAKE_CASE__ : Any ): '''simple docstring''' requires_backends(self , ['torch', 'transformers', 'onnx'] ) @classmethod def __lowerCAmelCase ( cls : Tuple , *SCREAMING_SNAKE_CASE__ : Optional[Any] , **SCREAMING_SNAKE_CASE__ : Optional[int] ): '''simple docstring''' requires_backends(cls , ['torch', 'transformers', 'onnx'] ) @classmethod def __lowerCAmelCase ( cls : Any , *SCREAMING_SNAKE_CASE__ : Optional[Any] , **SCREAMING_SNAKE_CASE__ : Tuple ): '''simple docstring''' requires_backends(cls , ['torch', 'transformers', 'onnx'] ) class _UpperCamelCase( metaclass=__lowerCamelCase ): __SCREAMING_SNAKE_CASE : List[Any] = ['''torch''', '''transformers''', '''onnx'''] def __init__( self : Any , *SCREAMING_SNAKE_CASE__ : int , **SCREAMING_SNAKE_CASE__ : List[Any] ): '''simple docstring''' requires_backends(self , ['torch', 'transformers', 'onnx'] ) @classmethod def __lowerCAmelCase ( cls : List[str] , *SCREAMING_SNAKE_CASE__ : Tuple , **SCREAMING_SNAKE_CASE__ : int ): '''simple docstring''' requires_backends(cls , ['torch', 'transformers', 'onnx'] ) @classmethod def __lowerCAmelCase ( cls : str , *SCREAMING_SNAKE_CASE__ : int , **SCREAMING_SNAKE_CASE__ : List[Any] ): '''simple docstring''' requires_backends(cls , ['torch', 'transformers', 'onnx'] ) class _UpperCamelCase( metaclass=__lowerCamelCase ): __SCREAMING_SNAKE_CASE : Any = ['''torch''', '''transformers''', '''onnx'''] def __init__( self : Tuple , *SCREAMING_SNAKE_CASE__ : List[str] , **SCREAMING_SNAKE_CASE__ : List[Any] ): '''simple docstring''' requires_backends(self , ['torch', 'transformers', 'onnx'] ) @classmethod def __lowerCAmelCase ( cls : str , *SCREAMING_SNAKE_CASE__ : Any , **SCREAMING_SNAKE_CASE__ : Union[str, Any] ): '''simple docstring''' requires_backends(cls , ['torch', 'transformers', 'onnx'] ) @classmethod def __lowerCAmelCase ( cls : Dict , *SCREAMING_SNAKE_CASE__ : List[Any] , **SCREAMING_SNAKE_CASE__ : int ): '''simple docstring''' requires_backends(cls , ['torch', 'transformers', 'onnx'] ) class _UpperCamelCase( metaclass=__lowerCamelCase ): __SCREAMING_SNAKE_CASE : Dict = ['''torch''', '''transformers''', '''onnx'''] def __init__( self : Any , *SCREAMING_SNAKE_CASE__ : Dict , **SCREAMING_SNAKE_CASE__ : int ): '''simple docstring''' requires_backends(self , ['torch', 'transformers', 'onnx'] ) @classmethod def __lowerCAmelCase ( cls : int , *SCREAMING_SNAKE_CASE__ : Tuple , **SCREAMING_SNAKE_CASE__ : Tuple ): '''simple docstring''' requires_backends(cls , ['torch', 'transformers', 'onnx'] ) @classmethod def __lowerCAmelCase ( cls : Any , *SCREAMING_SNAKE_CASE__ : int , **SCREAMING_SNAKE_CASE__ : str ): '''simple docstring''' requires_backends(cls , ['torch', 'transformers', 'onnx'] ) class _UpperCamelCase( metaclass=__lowerCamelCase ): __SCREAMING_SNAKE_CASE : int = ['''torch''', '''transformers''', '''onnx'''] def __init__( self : List[str] , *SCREAMING_SNAKE_CASE__ : Tuple , **SCREAMING_SNAKE_CASE__ : List[Any] ): '''simple docstring''' requires_backends(self , ['torch', 'transformers', 'onnx'] ) @classmethod def __lowerCAmelCase ( cls : int , *SCREAMING_SNAKE_CASE__ : Tuple , **SCREAMING_SNAKE_CASE__ : Union[str, Any] ): '''simple docstring''' requires_backends(cls , ['torch', 'transformers', 'onnx'] ) @classmethod def __lowerCAmelCase ( cls : Optional[Any] , *SCREAMING_SNAKE_CASE__ : List[Any] , **SCREAMING_SNAKE_CASE__ : int ): '''simple docstring''' requires_backends(cls , ['torch', 'transformers', 'onnx'] ) class _UpperCamelCase( metaclass=__lowerCamelCase ): __SCREAMING_SNAKE_CASE : Optional[int] = ['''torch''', '''transformers''', '''onnx'''] def __init__( self : Optional[int] , *SCREAMING_SNAKE_CASE__ : Dict , **SCREAMING_SNAKE_CASE__ : Any ): '''simple docstring''' requires_backends(self , ['torch', 'transformers', 'onnx'] ) @classmethod def __lowerCAmelCase ( cls : Optional[Any] , *SCREAMING_SNAKE_CASE__ : Tuple , **SCREAMING_SNAKE_CASE__ : Union[str, Any] ): '''simple docstring''' requires_backends(cls , ['torch', 'transformers', 'onnx'] ) @classmethod def __lowerCAmelCase ( cls : int , *SCREAMING_SNAKE_CASE__ : Optional[Any] , **SCREAMING_SNAKE_CASE__ : Tuple ): '''simple docstring''' requires_backends(cls , ['torch', 'transformers', 'onnx'] )
47
0
"""simple docstring""" import copy import os from collections import OrderedDict from typing import TYPE_CHECKING, Any, Dict, Mapping, Optional, Union if TYPE_CHECKING: from ...processing_utils import ProcessorMixin from ...utils import TensorType from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __SCREAMING_SNAKE_CASE = logging.get_logger(__name__) __SCREAMING_SNAKE_CASE = { 'google/owlvit-base-patch32': 'https://huggingface.co/google/owlvit-base-patch32/resolve/main/config.json', 'google/owlvit-base-patch16': 'https://huggingface.co/google/owlvit-base-patch16/resolve/main/config.json', 'google/owlvit-large-patch14': 'https://huggingface.co/google/owlvit-large-patch14/resolve/main/config.json', } class __UpperCamelCase ( __lowerCamelCase ): lowercase_ : Union[str, Any] = '''owlvit_text_model''' def __init__( self : Optional[int] , UpperCAmelCase : Tuple=4_9408 , UpperCAmelCase : Optional[int]=512 , UpperCAmelCase : Optional[Any]=2048 , UpperCAmelCase : int=12 , UpperCAmelCase : Union[str, Any]=8 , UpperCAmelCase : int=16 , UpperCAmelCase : Dict="quick_gelu" , UpperCAmelCase : Any=1e-5 , UpperCAmelCase : str=0.0 , UpperCAmelCase : Tuple=0.0_2 , UpperCAmelCase : List[Any]=1.0 , UpperCAmelCase : List[Any]=0 , UpperCAmelCase : List[Any]=4_9406 , UpperCAmelCase : Union[str, Any]=4_9407 , **UpperCAmelCase : Dict , ) -> str: super().__init__(pad_token_id=SCREAMING_SNAKE_CASE__ , bos_token_id=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) lowerCAmelCase :List[Any] = vocab_size lowerCAmelCase :Dict = hidden_size lowerCAmelCase :Optional[Any] = intermediate_size lowerCAmelCase :Any = num_hidden_layers lowerCAmelCase :List[Any] = num_attention_heads lowerCAmelCase :Optional[Any] = max_position_embeddings lowerCAmelCase :Dict = hidden_act lowerCAmelCase :str = layer_norm_eps lowerCAmelCase :List[str] = attention_dropout lowerCAmelCase :Optional[int] = initializer_range lowerCAmelCase :List[Any] = initializer_factor @classmethod def UpperCAmelCase__ ( cls : int , UpperCAmelCase : Union[str, os.PathLike] , **UpperCAmelCase : Any ) -> List[Any]: cls._set_token_in_kwargs(SCREAMING_SNAKE_CASE__ ) lowerCAmelCase :Dict = cls.get_config_dict(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) # get the text config dict if we are loading from OwlViTConfig if config_dict.get('model_type' ) == "owlvit": lowerCAmelCase :Dict = config_dict['text_config'] if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type: logger.warning( f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """ f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" ) return cls.from_dict(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) class __UpperCamelCase ( __lowerCamelCase ): lowercase_ : str = '''owlvit_vision_model''' def __init__( self : Tuple , UpperCAmelCase : Union[str, Any]=768 , UpperCAmelCase : Dict=3072 , UpperCAmelCase : Dict=12 , UpperCAmelCase : List[str]=12 , UpperCAmelCase : Dict=3 , UpperCAmelCase : List[Any]=768 , UpperCAmelCase : int=32 , UpperCAmelCase : Tuple="quick_gelu" , UpperCAmelCase : List[Any]=1e-5 , UpperCAmelCase : List[str]=0.0 , UpperCAmelCase : List[str]=0.0_2 , UpperCAmelCase : Tuple=1.0 , **UpperCAmelCase : Tuple , ) -> int: super().__init__(**SCREAMING_SNAKE_CASE__ ) lowerCAmelCase :str = hidden_size lowerCAmelCase :int = intermediate_size lowerCAmelCase :Dict = num_hidden_layers lowerCAmelCase :str = num_attention_heads lowerCAmelCase :Optional[int] = num_channels lowerCAmelCase :Tuple = image_size lowerCAmelCase :str = patch_size lowerCAmelCase :List[str] = hidden_act lowerCAmelCase :Dict = layer_norm_eps lowerCAmelCase :Union[str, Any] = attention_dropout lowerCAmelCase :int = initializer_range lowerCAmelCase :List[str] = initializer_factor @classmethod def UpperCAmelCase__ ( cls : Optional[Any] , UpperCAmelCase : Union[str, os.PathLike] , **UpperCAmelCase : Dict ) -> Union[str, Any]: cls._set_token_in_kwargs(SCREAMING_SNAKE_CASE__ ) lowerCAmelCase :int = cls.get_config_dict(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) # get the vision config dict if we are loading from OwlViTConfig if config_dict.get('model_type' ) == "owlvit": lowerCAmelCase :List[str] = config_dict['vision_config'] if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type: logger.warning( f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """ f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" ) return cls.from_dict(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) class __UpperCamelCase ( __lowerCamelCase ): lowercase_ : Any = '''owlvit''' lowercase_ : str = True def __init__( self : str , UpperCAmelCase : List[Any]=None , UpperCAmelCase : List[str]=None , UpperCAmelCase : Any=512 , UpperCAmelCase : Optional[int]=2.6_5_9_2 , UpperCAmelCase : Dict=True , **UpperCAmelCase : str , ) -> int: super().__init__(**SCREAMING_SNAKE_CASE__ ) if text_config is None: lowerCAmelCase :int = {} logger.info('text_config is None. Initializing the OwlViTTextConfig with default values.' ) if vision_config is None: lowerCAmelCase :Optional[int] = {} logger.info('vision_config is None. initializing the OwlViTVisionConfig with default values.' ) lowerCAmelCase :int = OwlViTTextConfig(**SCREAMING_SNAKE_CASE__ ) lowerCAmelCase :str = OwlViTVisionConfig(**SCREAMING_SNAKE_CASE__ ) lowerCAmelCase :str = projection_dim lowerCAmelCase :Union[str, Any] = logit_scale_init_value lowerCAmelCase :Union[str, Any] = return_dict lowerCAmelCase :int = 1.0 @classmethod def UpperCAmelCase__ ( cls : Any , UpperCAmelCase : Union[str, os.PathLike] , **UpperCAmelCase : Tuple ) -> Union[str, Any]: cls._set_token_in_kwargs(SCREAMING_SNAKE_CASE__ ) lowerCAmelCase :int = cls.get_config_dict(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type: logger.warning( f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """ f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" ) return cls.from_dict(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) @classmethod def UpperCAmelCase__ ( cls : Dict , UpperCAmelCase : Dict , UpperCAmelCase : Dict , **UpperCAmelCase : Optional[int] ) -> str: lowerCAmelCase :Union[str, Any] = {} lowerCAmelCase :List[str] = text_config lowerCAmelCase :Tuple = vision_config return cls.from_dict(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) def UpperCAmelCase__ ( self : Optional[int] ) -> int: lowerCAmelCase :List[Any] = copy.deepcopy(self.__dict__ ) lowerCAmelCase :Optional[Any] = self.text_config.to_dict() lowerCAmelCase :Dict = self.vision_config.to_dict() lowerCAmelCase :Dict = self.__class__.model_type return output class __UpperCamelCase ( __lowerCamelCase ): @property def UpperCAmelCase__ ( self : Optional[Any] ) -> Tuple: return OrderedDict( [ ('input_ids', {0: 'batch', 1: 'sequence'}), ('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}), ('attention_mask', {0: 'batch', 1: 'sequence'}), ] ) @property def UpperCAmelCase__ ( self : Dict ) -> Tuple: return OrderedDict( [ ('logits_per_image', {0: 'batch'}), ('logits_per_text', {0: 'batch'}), ('text_embeds', {0: 'batch'}), ('image_embeds', {0: 'batch'}), ] ) @property def UpperCAmelCase__ ( self : str ) -> Union[str, Any]: return 1e-4 def UpperCAmelCase__ ( self : Optional[int] , UpperCAmelCase : "ProcessorMixin" , UpperCAmelCase : int = -1 , UpperCAmelCase : int = -1 , UpperCAmelCase : Optional["TensorType"] = None , ) -> Tuple: lowerCAmelCase :Tuple = super().generate_dummy_inputs( processor.tokenizer , batch_size=SCREAMING_SNAKE_CASE__ , seq_length=SCREAMING_SNAKE_CASE__ , framework=SCREAMING_SNAKE_CASE__ ) lowerCAmelCase :Dict = super().generate_dummy_inputs( processor.image_processor , batch_size=SCREAMING_SNAKE_CASE__ , framework=SCREAMING_SNAKE_CASE__ ) return {**text_input_dict, **image_input_dict} @property def UpperCAmelCase__ ( self : Optional[Any] ) -> Dict: return 14
553
import math from datetime import datetime, timedelta def UpperCAmelCase__ ( lowerCamelCase_ : int ): __a : Union[str, Any] = year % 1_9 __a : int = year % 4 __a : Optional[int] = year % 7 __a : Dict = math.floor(year / 1_0_0 ) __a : Optional[Any] = math.floor((1_3 + 8 * leap_day_inhibits) / 2_5 ) __a : Union[str, Any] = leap_day_inhibits / 4 __a : str = ( 1_5 - lunar_orbit_correction + leap_day_inhibits - leap_day_reinstall_number ) % 3_0 __a : Union[str, Any] = (4 + leap_day_inhibits - leap_day_reinstall_number) % 7 # days to be added to March 21 __a : List[Any] = (1_9 * metonic_cycle + secular_moon_shift) % 3_0 # PHM -> Paschal Full Moon __a : List[Any] = ( 2 * julian_leap_year + 4 * non_leap_year + 6 * days_to_add + century_starting_point ) % 7 if days_to_add == 2_9 and days_from_phm_to_sunday == 6: return datetime(lowerCamelCase_ , 4 , 1_9 ) elif days_to_add == 2_8 and days_from_phm_to_sunday == 6: return datetime(lowerCamelCase_ , 4 , 1_8 ) else: return datetime(lowerCamelCase_ , 3 , 2_2 ) + timedelta( days=int(days_to_add + days_from_phm_to_sunday ) ) if __name__ == "__main__": for year in (1994, 2000, 2010, 2021, 2023): SCREAMING_SNAKE_CASE__ = '''will be''' if year > datetime.now().year else '''was''' print(F"Easter in {year} {tense} {gauss_easter(year)}")
47
0
'''simple docstring''' import argparse import intel_extension_for_pytorch as ipex import torch from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline lowerCAmelCase_ : Optional[int] = argparse.ArgumentParser('''Stable Diffusion script with intel optimization''', add_help=False) parser.add_argument('''--dpm''', action='''store_true''', help='''Enable DPMSolver or not''') parser.add_argument('''--steps''', default=None, type=int, help='''Num inference steps''') lowerCAmelCase_ : Any = parser.parse_args() lowerCAmelCase_ : Union[str, Any] = '''cpu''' lowerCAmelCase_ : Optional[int] = '''a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings''' lowerCAmelCase_ : int = '''path-to-your-trained-model''' lowerCAmelCase_ : Tuple = StableDiffusionPipeline.from_pretrained(model_id) if args.dpm: lowerCAmelCase_ : List[str] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config) lowerCAmelCase_ : Any = pipe.to(device) # to channels last lowerCAmelCase_ : int = pipe.unet.to(memory_format=torch.channels_last) lowerCAmelCase_ : Any = pipe.vae.to(memory_format=torch.channels_last) lowerCAmelCase_ : Any = pipe.text_encoder.to(memory_format=torch.channels_last) if pipe.requires_safety_checker: lowerCAmelCase_ : str = pipe.safety_checker.to(memory_format=torch.channels_last) # optimize with ipex lowerCAmelCase_ : Tuple = torch.randn(2, 4, 64, 64) lowerCAmelCase_ : str = torch.rand(1) * 999 lowerCAmelCase_ : Dict = torch.randn(2, 77, 768) lowerCAmelCase_ : Dict = (sample, timestep, encoder_hidden_status) try: lowerCAmelCase_ : Dict = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example) except Exception: lowerCAmelCase_ : Any = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True) lowerCAmelCase_ : List[str] = ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True) lowerCAmelCase_ : Any = ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True) if pipe.requires_safety_checker: lowerCAmelCase_ : Union[str, Any] = ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True) # compute lowerCAmelCase_ : Optional[Any] = 666 lowerCAmelCase_ : List[str] = torch.Generator(device).manual_seed(seed) lowerCAmelCase_ : Optional[Any] = {'''generator''': generator} if args.steps is not None: lowerCAmelCase_ : Optional[Any] = args.steps with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa): lowerCAmelCase_ : str = pipe(prompt, **generate_kwargs).images[0] # save image image.save('''generated.png''')
414
from typing import List, Optional, Union from ...configuration_utils import PretrainedConfig from ...utils import logging SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ = { '''huggingface/informer-tourism-monthly''': ( '''https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json''' ), # See all Informer models at https://huggingface.co/models?filter=informer } class _UpperCamelCase( __lowerCamelCase ): __SCREAMING_SNAKE_CASE : List[Any] = '''informer''' __SCREAMING_SNAKE_CASE : List[Any] = { '''hidden_size''': '''d_model''', '''num_attention_heads''': '''encoder_attention_heads''', '''num_hidden_layers''': '''encoder_layers''', } def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : str = "student_t" , SCREAMING_SNAKE_CASE__ : str = "nll" , SCREAMING_SNAKE_CASE__ : int = 1 , SCREAMING_SNAKE_CASE__ : List[int] = None , SCREAMING_SNAKE_CASE__ : Optional[Union[str, bool]] = "mean" , SCREAMING_SNAKE_CASE__ : int = 0 , SCREAMING_SNAKE_CASE__ : int = 0 , SCREAMING_SNAKE_CASE__ : int = 0 , SCREAMING_SNAKE_CASE__ : int = 0 , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None , SCREAMING_SNAKE_CASE__ : int = 6_4 , SCREAMING_SNAKE_CASE__ : int = 3_2 , SCREAMING_SNAKE_CASE__ : int = 3_2 , SCREAMING_SNAKE_CASE__ : int = 2 , SCREAMING_SNAKE_CASE__ : int = 2 , SCREAMING_SNAKE_CASE__ : int = 2 , SCREAMING_SNAKE_CASE__ : int = 2 , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : str = "gelu" , SCREAMING_SNAKE_CASE__ : float = 0.05 , SCREAMING_SNAKE_CASE__ : float = 0.1 , SCREAMING_SNAKE_CASE__ : float = 0.1 , SCREAMING_SNAKE_CASE__ : float = 0.1 , SCREAMING_SNAKE_CASE__ : float = 0.1 , SCREAMING_SNAKE_CASE__ : int = 1_0_0 , SCREAMING_SNAKE_CASE__ : float = 0.02 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=True , SCREAMING_SNAKE_CASE__ : str = "prob" , SCREAMING_SNAKE_CASE__ : int = 5 , SCREAMING_SNAKE_CASE__ : bool = True , **SCREAMING_SNAKE_CASE__ : Tuple , ): '''simple docstring''' __a : Dict = prediction_length __a : Tuple = context_length or prediction_length __a : Tuple = distribution_output __a : Tuple = loss __a : str = input_size __a : Dict = num_time_features __a : Optional[int] = lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7] __a : str = scaling __a : Tuple = num_dynamic_real_features __a : int = num_static_real_features __a : Dict = num_static_categorical_features # set cardinality if cardinality and num_static_categorical_features > 0: if len(SCREAMING_SNAKE_CASE__ ) != num_static_categorical_features: raise ValueError( 'The cardinality should be a list of the same length as `num_static_categorical_features`' ) __a : Optional[Any] = cardinality else: __a : Optional[int] = [0] # set embedding_dimension if embedding_dimension and num_static_categorical_features > 0: if len(SCREAMING_SNAKE_CASE__ ) != num_static_categorical_features: raise ValueError( 'The embedding dimension should be a list of the same length as `num_static_categorical_features`' ) __a : int = embedding_dimension else: __a : List[Any] = [min(5_0 , (cat + 1) // 2 ) for cat in self.cardinality] __a : int = num_parallel_samples # Transformer architecture configuration __a : str = input_size * len(self.lags_sequence ) + self._number_of_features __a : Optional[int] = d_model __a : Union[str, Any] = encoder_attention_heads __a : int = decoder_attention_heads __a : Any = encoder_ffn_dim __a : Union[str, Any] = decoder_ffn_dim __a : List[Any] = encoder_layers __a : Optional[int] = decoder_layers __a : int = dropout __a : Optional[Any] = attention_dropout __a : Dict = activation_dropout __a : Union[str, Any] = encoder_layerdrop __a : Optional[int] = decoder_layerdrop __a : List[str] = activation_function __a : str = init_std __a : Optional[int] = use_cache # Informer __a : Union[str, Any] = attention_type __a : str = sampling_factor __a : Dict = distil super().__init__(is_encoder_decoder=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) @property def __lowerCAmelCase ( self : Any ): '''simple docstring''' return ( sum(self.embedding_dimension ) + self.num_dynamic_real_features + self.num_time_features + self.num_static_real_features + self.input_size * 2 # the log1p(abs(loc)) and log(scale) features )
47
0
'''simple docstring''' import json import os from collections import Counter import torch import torchvision import torchvision.transforms as transforms from PIL import Image from torch import nn from torch.utils.data import Dataset _UpperCAmelCase : List[Any] = {1: (1, 1), 2: (2, 1), 3: (3, 1), 4: (2, 2), 5: (5, 1), 6: (3, 2), 7: (7, 1), 8: (4, 2), 9: (3, 3)} class lowercase_ ( nn.Module ): """simple docstring""" def __init__( self : Tuple, UpperCamelCase__ : List[str] ) -> int: super().__init__() _A = torchvision.models.resnetaaa(pretrained=SCREAMING_SNAKE_CASE__ ) _A = list(model.children() )[:-2] _A = nn.Sequential(*SCREAMING_SNAKE_CASE__ ) _A = nn.AdaptiveAvgPoolad(POOLING_BREAKDOWN[args.num_image_embeds] ) def __UpperCAmelCase ( self : int, UpperCamelCase__ : Optional[int] ) -> Optional[Any]: _A = self.pool(self.model(SCREAMING_SNAKE_CASE__ ) ) _A = torch.flatten(SCREAMING_SNAKE_CASE__, start_dim=2 ) _A = out.transpose(1, 2 ).contiguous() return out # BxNx2048 class lowercase_ ( __lowerCamelCase ): """simple docstring""" def __init__( self : int, UpperCamelCase__ : Any, UpperCamelCase__ : Dict, UpperCamelCase__ : Optional[Any], UpperCamelCase__ : Dict, UpperCamelCase__ : Union[str, Any] ) -> Dict: _A = [json.loads(SCREAMING_SNAKE_CASE__ ) for l in open(SCREAMING_SNAKE_CASE__ )] _A = os.path.dirname(SCREAMING_SNAKE_CASE__ ) _A = tokenizer _A = labels _A = len(SCREAMING_SNAKE_CASE__ ) _A = max_seq_length _A = transforms def __len__( self : List[Any] ) -> int: return len(self.data ) def __getitem__( self : List[Any], UpperCamelCase__ : Tuple ) -> Optional[int]: _A = torch.LongTensor(self.tokenizer.encode(self.data[index]['text'], add_special_tokens=SCREAMING_SNAKE_CASE__ ) ) _A = sentence[0], sentence[1:-1], sentence[-1] _A = sentence[: self.max_seq_length] _A = torch.zeros(self.n_classes ) _A = 1 _A = Image.open(os.path.join(self.data_dir, self.data[index]['img'] ) ).convert('RGB' ) _A = self.transforms(SCREAMING_SNAKE_CASE__ ) return { "image_start_token": start_token, "image_end_token": end_token, "sentence": sentence, "image": image, "label": label, } def __UpperCAmelCase ( self : Optional[Any] ) -> int: _A = Counter() for row in self.data: label_freqs.update(row['label'] ) return label_freqs def _SCREAMING_SNAKE_CASE ( __snake_case : Any ): _A = [len(row['sentence'] ) for row in batch] _A = len(lowerCamelCase_ ), max(lowerCamelCase_ ) _A = torch.zeros(lowerCamelCase_ , lowerCamelCase_ , dtype=torch.long ) _A = torch.zeros(lowerCamelCase_ , lowerCamelCase_ , dtype=torch.long ) for i_batch, (input_row, length) in enumerate(zip(lowerCamelCase_ , lowerCamelCase_ ) ): _A = input_row['sentence'] _A = 1 _A = torch.stack([row['image'] for row in batch] ) _A = torch.stack([row['label'] for row in batch] ) _A = torch.stack([row['image_start_token'] for row in batch] ) _A = torch.stack([row['image_end_token'] for row in batch] ) return text_tensor, mask_tensor, img_tensor, img_start_token, img_end_token, tgt_tensor def _SCREAMING_SNAKE_CASE ( ): return [ "Crime", "Drama", "Thriller", "Action", "Comedy", "Romance", "Documentary", "Short", "Mystery", "History", "Family", "Adventure", "Fantasy", "Sci-Fi", "Western", "Horror", "Sport", "War", "Music", "Musical", "Animation", "Biography", "Film-Noir", ] def _SCREAMING_SNAKE_CASE ( ): return transforms.Compose( [ transforms.Resize(2_5_6 ), transforms.CenterCrop(2_2_4 ), transforms.ToTensor(), transforms.Normalize( mean=[0.46_77_70_44, 0.44_53_14_29, 0.40_66_10_17] , std=[0.12_22_19_94, 0.12_14_58_35, 0.14_38_04_69] , ), ] )
107
import torch from diffusers import DDIMParallelScheduler from .test_schedulers import SchedulerCommonTest class _UpperCamelCase( __lowerCamelCase ): __SCREAMING_SNAKE_CASE : int = (DDIMParallelScheduler,) __SCREAMING_SNAKE_CASE : Union[str, Any] = (('''eta''', 0.0), ('''num_inference_steps''', 50)) def __lowerCAmelCase ( self : str , **SCREAMING_SNAKE_CASE__ : Optional[int] ): '''simple docstring''' __a : List[Any] = { 'num_train_timesteps': 1_0_0_0, 'beta_start': 0.0_001, 'beta_end': 0.02, 'beta_schedule': 'linear', 'clip_sample': True, } config.update(**SCREAMING_SNAKE_CASE__ ) return config def __lowerCAmelCase ( self : str , **SCREAMING_SNAKE_CASE__ : Optional[int] ): '''simple docstring''' __a : Tuple = self.scheduler_classes[0] __a : Optional[Any] = self.get_scheduler_config(**SCREAMING_SNAKE_CASE__ ) __a : Any = scheduler_class(**SCREAMING_SNAKE_CASE__ ) __a , __a : List[str] = 1_0, 0.0 __a : Dict = self.dummy_model() __a : str = self.dummy_sample_deter scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ ) for t in scheduler.timesteps: __a : str = model(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) __a : List[str] = scheduler.step(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).prev_sample return sample def __lowerCAmelCase ( self : Tuple ): '''simple docstring''' for timesteps in [1_0_0, 5_0_0, 1_0_0_0]: self.check_over_configs(num_train_timesteps=SCREAMING_SNAKE_CASE__ ) def __lowerCAmelCase ( self : Dict ): '''simple docstring''' for steps_offset in [0, 1]: self.check_over_configs(steps_offset=SCREAMING_SNAKE_CASE__ ) __a : Optional[Any] = self.scheduler_classes[0] __a : List[str] = self.get_scheduler_config(steps_offset=1 ) __a : Optional[Any] = scheduler_class(**SCREAMING_SNAKE_CASE__ ) scheduler.set_timesteps(5 ) assert torch.equal(scheduler.timesteps , torch.LongTensor([8_0_1, 6_0_1, 4_0_1, 2_0_1, 1] ) ) def __lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' for beta_start, beta_end in zip([0.0_001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ): self.check_over_configs(beta_start=SCREAMING_SNAKE_CASE__ , beta_end=SCREAMING_SNAKE_CASE__ ) def __lowerCAmelCase ( self : int ): '''simple docstring''' for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=SCREAMING_SNAKE_CASE__ ) def __lowerCAmelCase ( self : Optional[Any] ): '''simple docstring''' for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=SCREAMING_SNAKE_CASE__ ) def __lowerCAmelCase ( self : List[Any] ): '''simple docstring''' for clip_sample in [True, False]: self.check_over_configs(clip_sample=SCREAMING_SNAKE_CASE__ ) def __lowerCAmelCase ( self : Optional[Any] ): '''simple docstring''' for timestep_spacing in ["trailing", "leading"]: self.check_over_configs(timestep_spacing=SCREAMING_SNAKE_CASE__ ) def __lowerCAmelCase ( self : int ): '''simple docstring''' for rescale_betas_zero_snr in [True, False]: self.check_over_configs(rescale_betas_zero_snr=SCREAMING_SNAKE_CASE__ ) def __lowerCAmelCase ( self : str ): '''simple docstring''' self.check_over_configs(thresholding=SCREAMING_SNAKE_CASE__ ) for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs( thresholding=SCREAMING_SNAKE_CASE__ , prediction_type=SCREAMING_SNAKE_CASE__ , sample_max_value=SCREAMING_SNAKE_CASE__ , ) def __lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' for t in [1, 1_0, 4_9]: self.check_over_forward(time_step=SCREAMING_SNAKE_CASE__ ) def __lowerCAmelCase ( self : List[str] ): '''simple docstring''' for t, num_inference_steps in zip([1, 1_0, 5_0] , [1_0, 5_0, 5_0_0] ): self.check_over_forward(time_step=SCREAMING_SNAKE_CASE__ , num_inference_steps=SCREAMING_SNAKE_CASE__ ) def __lowerCAmelCase ( self : List[Any] ): '''simple docstring''' for t, eta in zip([1, 1_0, 4_9] , [0.0, 0.5, 1.0] ): self.check_over_forward(time_step=SCREAMING_SNAKE_CASE__ , eta=SCREAMING_SNAKE_CASE__ ) def __lowerCAmelCase ( self : List[Any] ): '''simple docstring''' __a : List[str] = self.scheduler_classes[0] __a : Union[str, Any] = self.get_scheduler_config() __a : Any = scheduler_class(**SCREAMING_SNAKE_CASE__ ) assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(4_2_0 , 4_0_0 ) - 0.14_771 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(9_8_0 , 9_6_0 ) - 0.32_460 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(4_8_7 , 4_8_6 ) - 0.00_979 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(9_9_9 , 9_9_8 ) - 0.02 ) ) < 1e-5 def __lowerCAmelCase ( self : Optional[Any] ): '''simple docstring''' __a : List[str] = self.scheduler_classes[0] __a : List[str] = self.get_scheduler_config() __a : Optional[Any] = scheduler_class(**SCREAMING_SNAKE_CASE__ ) __a , __a : Any = 1_0, 0.0 scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ ) __a : List[Any] = self.dummy_model() __a : int = self.dummy_sample_deter __a : List[Any] = self.dummy_sample_deter + 0.1 __a : List[str] = self.dummy_sample_deter - 0.1 __a : Optional[Any] = samplea.shape[0] __a : Optional[Any] = torch.stack([samplea, samplea, samplea] , dim=0 ) __a : Union[str, Any] = torch.arange(SCREAMING_SNAKE_CASE__ )[0:3, None].repeat(1 , SCREAMING_SNAKE_CASE__ ) __a : int = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) ) __a : int = scheduler.batch_step_no_noise(SCREAMING_SNAKE_CASE__ , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) , SCREAMING_SNAKE_CASE__ ) __a : Dict = torch.sum(torch.abs(SCREAMING_SNAKE_CASE__ ) ) __a : Optional[Any] = torch.mean(torch.abs(SCREAMING_SNAKE_CASE__ ) ) assert abs(result_sum.item() - 1_147.7_904 ) < 1e-2 assert abs(result_mean.item() - 0.4_982 ) < 1e-3 def __lowerCAmelCase ( self : str ): '''simple docstring''' __a : List[str] = self.full_loop() __a : Tuple = torch.sum(torch.abs(SCREAMING_SNAKE_CASE__ ) ) __a : int = torch.mean(torch.abs(SCREAMING_SNAKE_CASE__ ) ) assert abs(result_sum.item() - 172.0_067 ) < 1e-2 assert abs(result_mean.item() - 0.223_967 ) < 1e-3 def __lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' __a : Optional[int] = self.full_loop(prediction_type='v_prediction' ) __a : str = torch.sum(torch.abs(SCREAMING_SNAKE_CASE__ ) ) __a : Union[str, Any] = torch.mean(torch.abs(SCREAMING_SNAKE_CASE__ ) ) assert abs(result_sum.item() - 52.5_302 ) < 1e-2 assert abs(result_mean.item() - 0.0_684 ) < 1e-3 def __lowerCAmelCase ( self : int ): '''simple docstring''' __a : Union[str, Any] = self.full_loop(set_alpha_to_one=SCREAMING_SNAKE_CASE__ , beta_start=0.01 ) __a : Optional[int] = torch.sum(torch.abs(SCREAMING_SNAKE_CASE__ ) ) __a : Optional[int] = torch.mean(torch.abs(SCREAMING_SNAKE_CASE__ ) ) assert abs(result_sum.item() - 149.8_295 ) < 1e-2 assert abs(result_mean.item() - 0.1_951 ) < 1e-3 def __lowerCAmelCase ( self : str ): '''simple docstring''' __a : Dict = self.full_loop(set_alpha_to_one=SCREAMING_SNAKE_CASE__ , beta_start=0.01 ) __a : str = torch.sum(torch.abs(SCREAMING_SNAKE_CASE__ ) ) __a : Tuple = torch.mean(torch.abs(SCREAMING_SNAKE_CASE__ ) ) assert abs(result_sum.item() - 149.0_784 ) < 1e-2 assert abs(result_mean.item() - 0.1_941 ) < 1e-3
47
0
'''simple docstring''' import argparse import logging import os import datasets import tensorflow as tf from transformers import AutoTokenizer lowerCAmelCase : Tuple = logging.getLogger(__name__) def _A ( ) -> List[str]: lowercase : Union[str, Any] = argparse.ArgumentParser( description="Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset." ) parser.add_argument( "--dataset_name" ,type=lowerCamelCase_ ,default="wikitext" ,help="Name of the training. Explore datasets at: hf.co/datasets." ,) parser.add_argument( "--dataset_config" ,type=lowerCamelCase_ ,default="wikitext-103-raw-v1" ,help="Configuration name of the dataset." ) parser.add_argument( "--tokenizer_name_or_path" ,type=lowerCamelCase_ ,default="sayakpaul/unigram-tokenizer-wikitext" ,help="Tokenizer identifier. Can be a local filepath or a Hub identifier." ,) parser.add_argument( "--shard_size" ,type=lowerCamelCase_ ,default=1_0_0_0 ,help="Number of entries to go in a single shard." ,) parser.add_argument("--split" ,type=lowerCamelCase_ ,default="train" ,choices=["train", "test", "validation"] ) parser.add_argument( "--limit" ,default=lowerCamelCase_ ,type=lowerCamelCase_ ,help="Limit the number of shards (used for debugging)." ,) parser.add_argument( "--max_length" ,type=lowerCamelCase_ ,default=5_1_2 ,help="Maximum sequence length. For training on TPUs, it helps to have a maximum" " sequence length that is a multiple of 8." ,) parser.add_argument( "--output_dir" ,default="tf-tpu" ,type=lowerCamelCase_ ,help="Output directory where the TFRecord shards will be saved. If the" " path is appended with `gs://` (\'gs://tf-tpu\', for example) then the TFRecord" " shards will be directly saved to a Google Cloud Storage bucket." ,) lowercase : int = parser.parse_args() return args def _A ( A ) -> Optional[Any]: def fn(A ): return tokenizer(examples["text"] ) return fn def _A ( A ) -> Any: lowercase : Dict = [] for i in range(len(tokenized_data["input_ids"] ) ): lowercase : Optional[int] = { 'input_ids': tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data["input_ids"][i] ) ), 'attention_mask': tf.train.Feature( intaa_list=tf.train.IntaaList(value=tokenized_data["attention_mask"][i] ) ), } lowercase : Optional[Any] = tf.train.Features(feature=lowerCamelCase_ ) lowercase : Dict = tf.train.Example(features=lowerCamelCase_ ) lowercase : Any = example.SerializeToString() records.append(lowerCamelCase_ ) return records def _A ( A ) -> Optional[Any]: lowercase : Optional[Any] = datasets.load_dataset(args.dataset_name ,args.dataset_config ,split=args.split ) if args.limit is not None: lowercase : Optional[int] = min(len(lowerCamelCase_ ) ,args.limit ) lowercase : List[str] = dataset.select(range(lowerCamelCase_ ) ) print(F'''Limiting the dataset to {args.limit} entries.''' ) lowercase : Any = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path ) # Handle output directory creation. # For serializing into a Google Cloud Storage Bucket, one needs to first # create a bucket. if "gs" not in args.output_dir: if not os.path.exists(args.output_dir ): os.makedirs(args.output_dir ) lowercase : Optional[Any] = os.path.join(args.output_dir ,args.split ) if not os.path.exists(lowerCamelCase_ ): os.makedirs(lowerCamelCase_ ) else: lowercase : Tuple = os.path.join(args.output_dir ,args.split ) # Tokenize the whole dataset at once. lowercase : Dict = tokenize_function(lowerCamelCase_ ) lowercase : List[str] = dataset.map(lowerCamelCase_ ,batched=lowerCamelCase_ ,num_proc=4 ,remove_columns=["text"] ) # We need to concatenate all our texts together, and then split the result # into chunks of a fixed size, which we will call block_size. To do this, we # will use the map method again, with the option batched=True. When we use batched=True, # the function we pass to map() will be passed multiple inputs at once, allowing us # to group them into more or fewer examples than we had in the input. # This allows us to create our new fixed-length samples. The advantage of this # method is that we don't lose a whole lot of content from the dataset compared to the # case where we simply tokenize with a pre-defined max_length. def group_texts(A ): # Concatenate all texts. lowercase : Optional[int] = {k: sum(examples[k] ,[] ) for k in examples.keys()} lowercase : Dict = len(concatenated_examples[list(examples.keys() )[0]] ) # We drop the small remainder, though you could add padding instead if the model supports it # In this, as in all things, we advise you to follow your heart 🫀 lowercase : int = (total_length // args.max_length) * args.max_length # Split by chunks of max_len. lowercase : int = { k: [t[i : i + args.max_length] for i in range(0 ,lowerCamelCase_ ,args.max_length )] for k, t in concatenated_examples.items() } return result lowercase : Union[str, Any] = dataset_tokenized.map(lowerCamelCase_ ,batched=lowerCamelCase_ ,batch_size=1_0_0_0 ,num_proc=4 ) lowercase : Union[str, Any] = 0 lowercase : Union[str, Any] = 0 for shard in range(0 ,len(lowerCamelCase_ ) ,args.shard_size ): lowercase : List[Any] = grouped_dataset[shard : shard + args.shard_size] lowercase : Any = len(dataset_snapshot["input_ids"] ) lowercase : str = os.path.join(lowerCamelCase_ ,F'''dataset-{shard_count}-{records_containing}.tfrecord''' ) lowercase : int = get_serialized_examples(lowerCamelCase_ ) with tf.io.TFRecordWriter(lowerCamelCase_ ) as out_file: for i in range(len(lowerCamelCase_ ) ): lowercase : Dict = serialized_examples[i] out_file.write(lowerCamelCase_ ) print("Wrote file {} containing {} records".format(lowerCamelCase_ ,lowerCamelCase_ ) ) shard_count += 1 total_records += records_containing with open(F'''split-{args.split}-records-count.txt''' ,"w" ) as f: print(F'''Total {args.split} records: {total_records}''' ,file=lowerCamelCase_ ) if __name__ == "__main__": lowerCAmelCase : int = parse_args() main(args)
372
def UpperCAmelCase__ ( lowerCamelCase_ : list[int] , lowerCamelCase_ : list[int] ): # Check if the input is valid if not len(lowerCamelCase_ ) == len(lowerCamelCase_ ) == 3: raise ValueError('Please enter a valid equation.' ) if equationa[0] == equationa[1] == equationa[0] == equationa[1] == 0: raise ValueError('Both a & b of two equations can\'t be zero.' ) # Extract the coefficients __a , __a , __a : Optional[Any] = equationa __a , __a , __a : Optional[int] = equationa # Calculate the determinants of the matrices __a : str = aa * ba - aa * ba __a : Tuple = ca * ba - ca * ba __a : Union[str, Any] = aa * ca - aa * ca # Check if the system of linear equations has a solution (using Cramer's rule) if determinant == 0: if determinant_x == determinant_y == 0: raise ValueError('Infinite solutions. (Consistent system)' ) else: raise ValueError('No solution. (Inconsistent system)' ) else: if determinant_x == determinant_y == 0: # Trivial solution (Inconsistent system) return (0.0, 0.0) else: __a : Any = determinant_x / determinant __a : Optional[Any] = determinant_y / determinant # Non-Trivial Solution (Consistent system) return (x, y)
47
0
"""simple docstring""" import collections from typing import List, Optional, Union from ...tokenization_utils_base import BatchEncoding from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging from ..bert.tokenization_bert import BertTokenizer __snake_case = logging.get_logger(__name__) __snake_case = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'} __snake_case = { 'vocab_file': { 'facebook/dpr-ctx_encoder-single-nq-base': ( 'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt' ), 'facebook/dpr-ctx_encoder-multiset-base': ( 'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt' ), }, 'tokenizer_file': { 'facebook/dpr-ctx_encoder-single-nq-base': ( 'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json' ), 'facebook/dpr-ctx_encoder-multiset-base': ( 'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json' ), }, } __snake_case = { 'vocab_file': { 'facebook/dpr-question_encoder-single-nq-base': ( 'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt' ), 'facebook/dpr-question_encoder-multiset-base': ( 'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt' ), }, 'tokenizer_file': { 'facebook/dpr-question_encoder-single-nq-base': ( 'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json' ), 'facebook/dpr-question_encoder-multiset-base': ( 'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json' ), }, } __snake_case = { 'vocab_file': { 'facebook/dpr-reader-single-nq-base': ( 'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt' ), 'facebook/dpr-reader-multiset-base': ( 'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt' ), }, 'tokenizer_file': { 'facebook/dpr-reader-single-nq-base': ( 'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json' ), 'facebook/dpr-reader-multiset-base': ( 'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json' ), }, } __snake_case = { 'facebook/dpr-ctx_encoder-single-nq-base': 512, 'facebook/dpr-ctx_encoder-multiset-base': 512, } __snake_case = { 'facebook/dpr-question_encoder-single-nq-base': 512, 'facebook/dpr-question_encoder-multiset-base': 512, } __snake_case = { 'facebook/dpr-reader-single-nq-base': 512, 'facebook/dpr-reader-multiset-base': 512, } __snake_case = { 'facebook/dpr-ctx_encoder-single-nq-base': {'do_lower_case': True}, 'facebook/dpr-ctx_encoder-multiset-base': {'do_lower_case': True}, } __snake_case = { 'facebook/dpr-question_encoder-single-nq-base': {'do_lower_case': True}, 'facebook/dpr-question_encoder-multiset-base': {'do_lower_case': True}, } __snake_case = { 'facebook/dpr-reader-single-nq-base': {'do_lower_case': True}, 'facebook/dpr-reader-multiset-base': {'do_lower_case': True}, } class _SCREAMING_SNAKE_CASE ( __lowerCamelCase ): """simple docstring""" _a : Dict = VOCAB_FILES_NAMES _a : List[Any] = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP _a : Tuple = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _a : Optional[int] = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION class _SCREAMING_SNAKE_CASE ( __lowerCamelCase ): """simple docstring""" _a : Any = VOCAB_FILES_NAMES _a : Union[str, Any] = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP _a : List[Any] = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _a : List[Any] = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION __snake_case = collections.namedtuple( 'DPRSpanPrediction', ['span_score', 'relevance_score', 'doc_id', 'start_index', 'end_index', 'text'] ) __snake_case = collections.namedtuple('DPRReaderOutput', ['start_logits', 'end_logits', 'relevance_logits']) __snake_case = R'\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n ```\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n ```\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `\'tf\'`: Return TensorFlow `tf.constant` objects.\n - `\'pt\'`: Return PyTorch `torch.Tensor` objects.\n - `\'np\'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer\'s default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Returns:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n ' @add_start_docstrings(__lowerCamelCase ) class _SCREAMING_SNAKE_CASE : """simple docstring""" def __call__( self , lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = False , lowerCamelCase__ = False , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , **lowerCamelCase__ , ) -> Any: if titles is None and texts is None: return super().__call__( SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ , truncation=SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ , return_attention_mask=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , ) elif titles is None or texts is None: lowercase__ : Dict = titles if texts is None else texts return super().__call__( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ , truncation=SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ , return_attention_mask=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , ) lowercase__ : Optional[Any] = titles if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else [titles] lowercase__ : List[Any] = texts if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else [texts] lowercase__ : Dict = len(SCREAMING_SNAKE_CASE__ ) lowercase__ : Optional[Any] = questions if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else [questions] * n_passages if len(SCREAMING_SNAKE_CASE__ ) != len(SCREAMING_SNAKE_CASE__ ): raise ValueError( F'''There should be as many titles than texts but got {len(SCREAMING_SNAKE_CASE__ )} titles and {len(SCREAMING_SNAKE_CASE__ )} texts.''' ) lowercase__ : Union[str, Any] = super().__call__(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ , truncation=SCREAMING_SNAKE_CASE__ )['input_ids'] lowercase__ : List[str] = super().__call__(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ , truncation=SCREAMING_SNAKE_CASE__ )['input_ids'] lowercase__ : str = { 'input_ids': [ (encoded_question_and_title + encoded_text)[:max_length] if max_length is not None and truncation else encoded_question_and_title + encoded_text for encoded_question_and_title, encoded_text in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) ] } if return_attention_mask is not False: lowercase__ : Optional[int] = [] for input_ids in encoded_inputs["input_ids"]: attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] ) lowercase__ : str = attention_mask return self.pad(SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ ) def UpperCAmelCase__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = 16 , lowerCamelCase__ = 64 , lowerCamelCase__ = 4 , ) -> List[Any]: lowercase__ : Optional[int] = reader_input['input_ids'] lowercase__ : Optional[Any] = reader_output[:3] lowercase__ : str = len(SCREAMING_SNAKE_CASE__ ) lowercase__ : List[Any] = sorted(range(SCREAMING_SNAKE_CASE__ ) , reverse=SCREAMING_SNAKE_CASE__ , key=relevance_logits.__getitem__ ) lowercase__ : List[DPRReaderOutput] = [] for doc_id in sorted_docs: lowercase__ : Tuple = list(input_ids[doc_id] ) # assuming question & title information is at the beginning of the sequence lowercase__ : Union[str, Any] = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id if sequence_ids[-1] == self.pad_token_id: lowercase__ : Optional[Any] = sequence_ids.index(self.pad_token_id ) else: lowercase__ : Any = len(SCREAMING_SNAKE_CASE__ ) lowercase__ : Tuple = self._get_best_spans( start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=SCREAMING_SNAKE_CASE__ , top_spans=SCREAMING_SNAKE_CASE__ , ) for start_index, end_index in best_spans: start_index += passage_offset end_index += passage_offset nbest_spans_predictions.append( DPRSpanPrediction( span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=SCREAMING_SNAKE_CASE__ , start_index=SCREAMING_SNAKE_CASE__ , end_index=SCREAMING_SNAKE_CASE__ , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) ) if len(SCREAMING_SNAKE_CASE__ ) >= num_spans: break return nbest_spans_predictions[:num_spans] def UpperCAmelCase__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , ) -> Tuple: lowercase__ : str = [] for start_index, start_score in enumerate(SCREAMING_SNAKE_CASE__ ): for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ): scores.append(((start_index, start_index + answer_length), start_score + end_score) ) lowercase__ : Dict = sorted(SCREAMING_SNAKE_CASE__ , key=lambda lowerCamelCase__ : x[1] , reverse=SCREAMING_SNAKE_CASE__ ) lowercase__ : int = [] for (start_index, end_index), score in scores: if start_index > end_index: raise ValueError(F'''Wrong span indices: [{start_index}:{end_index}]''' ) lowercase__ : Tuple = end_index - start_index + 1 if length > max_answer_length: raise ValueError(F'''Span is too long: {length} > {max_answer_length}''' ) if any( start_index <= prev_start_index <= prev_end_index <= end_index or prev_start_index <= start_index <= end_index <= prev_end_index for (prev_start_index, prev_end_index) in chosen_span_intervals ): continue chosen_span_intervals.append((start_index, end_index) ) if len(SCREAMING_SNAKE_CASE__ ) == top_spans: break return chosen_span_intervals @add_end_docstrings(__lowerCamelCase ) class _SCREAMING_SNAKE_CASE ( __lowerCamelCase , __lowerCamelCase ): """simple docstring""" _a : Any = VOCAB_FILES_NAMES _a : Optional[Any] = READER_PRETRAINED_VOCAB_FILES_MAP _a : Union[str, Any] = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _a : Dict = READER_PRETRAINED_INIT_CONFIGURATION _a : int = ['''input_ids''', '''attention_mask''']
200
from ...utils import is_note_seq_available, is_transformers_available, is_torch_available from ...utils import OptionalDependencyNotAvailable try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .notes_encoder import SpectrogramNotesEncoder from .continous_encoder import SpectrogramContEncoder from .pipeline_spectrogram_diffusion import ( SpectrogramContEncoder, SpectrogramDiffusionPipeline, TaFilmDecoder, ) try: if not (is_transformers_available() and is_torch_available() and is_note_seq_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403 else: from .midi_utils import MidiProcessor
47
0
import json import os import unittest from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, require_torch from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class lowerCAmelCase__ ( __lowerCamelCase, unittest.TestCase ): """simple docstring""" __UpperCAmelCase : List[Any] = LEDTokenizer __UpperCAmelCase : List[str] = LEDTokenizerFast __UpperCAmelCase : Optional[Any] = True def _UpperCamelCase ( self ): super().setUp() lowerCamelCase_ : str = [ 'l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', '\u0120', '\u0120l', '\u0120n', '\u0120lo', '\u0120low', 'er', '\u0120lowest', '\u0120newer', '\u0120wider', '<unk>', ] lowerCamelCase_ : Union[str, Any] = dict(zip(SCREAMING_SNAKE_CASE__ , range(len(SCREAMING_SNAKE_CASE__ ) ) ) ) lowerCamelCase_ : str = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', ''] lowerCamelCase_ : Optional[int] = {'unk_token': '<unk>'} lowerCamelCase_ : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) lowerCamelCase_ : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as fp: fp.write(json.dumps(SCREAMING_SNAKE_CASE__ ) + "\n" ) with open(self.merges_file , "w" , encoding="utf-8" ) as fp: fp.write("\n".join(SCREAMING_SNAKE_CASE__ ) ) def _UpperCamelCase ( self , **a_ ): kwargs.update(self.special_tokens_map ) return self.tokenizer_class.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE__ ) def _UpperCamelCase ( self , **a_ ): kwargs.update(self.special_tokens_map ) return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE__ ) def _UpperCamelCase ( self , a_ ): return "lower newer", "lower newer" @cached_property def _UpperCamelCase ( self ): return LEDTokenizer.from_pretrained("allenai/led-base-16384" ) @cached_property def _UpperCamelCase ( self ): return LEDTokenizerFast.from_pretrained("allenai/led-base-16384" ) @require_torch def _UpperCamelCase ( self ): lowerCamelCase_ : str = ['A long paragraph for summarization.', 'Another paragraph for summarization.'] lowerCamelCase_ : int = [0, 250, 251, 1_7818, 13, 3_9186, 1938, 4, 2] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: lowerCamelCase_ : Optional[int] = tokenizer(SCREAMING_SNAKE_CASE__ , max_length=len(SCREAMING_SNAKE_CASE__ ) , padding=SCREAMING_SNAKE_CASE__ , return_tensors="pt" ) self.assertIsInstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) self.assertEqual((2, 9) , batch.input_ids.shape ) self.assertEqual((2, 9) , batch.attention_mask.shape ) lowerCamelCase_ : int = batch.input_ids.tolist()[0] self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) @require_torch def _UpperCamelCase ( self ): lowerCamelCase_ : Any = ['A long paragraph for summarization.', 'Another paragraph for summarization.'] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: lowerCamelCase_ : Dict = tokenizer(SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ , return_tensors="pt" ) self.assertIn("input_ids" , SCREAMING_SNAKE_CASE__ ) self.assertIn("attention_mask" , SCREAMING_SNAKE_CASE__ ) self.assertNotIn("labels" , SCREAMING_SNAKE_CASE__ ) self.assertNotIn("decoder_attention_mask" , SCREAMING_SNAKE_CASE__ ) @require_torch def _UpperCamelCase ( self ): lowerCamelCase_ : int = [ 'Summary of the text.', 'Another summary.', ] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: lowerCamelCase_ : Optional[int] = tokenizer(text_target=SCREAMING_SNAKE_CASE__ , max_length=32 , padding="max_length" , return_tensors="pt" ) self.assertEqual(32 , targets["input_ids"].shape[1] ) @require_torch def _UpperCamelCase ( self ): for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: lowerCamelCase_ : List[Any] = tokenizer( ["I am a small frog" * 1024, "I am a small frog"] , padding=SCREAMING_SNAKE_CASE__ , truncation=SCREAMING_SNAKE_CASE__ , return_tensors="pt" ) self.assertIsInstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) self.assertEqual(batch.input_ids.shape , (2, 5122) ) @require_torch def _UpperCamelCase ( self ): lowerCamelCase_ : Dict = ['A long paragraph for summarization.'] lowerCamelCase_ : List[Any] = [ 'Summary of the text.', ] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: lowerCamelCase_ : List[str] = tokenizer(SCREAMING_SNAKE_CASE__ , return_tensors="pt" ) lowerCamelCase_ : Union[str, Any] = tokenizer(text_target=SCREAMING_SNAKE_CASE__ , return_tensors="pt" ) lowerCamelCase_ : Tuple = inputs['input_ids'] lowerCamelCase_ : List[str] = targets['input_ids'] self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() ) self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() ) self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() ) self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() ) @require_torch def _UpperCamelCase ( self ): for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: lowerCamelCase_ : Tuple = ['Summary of the text.', 'Another summary.'] lowerCamelCase_ : Tuple = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]] lowerCamelCase_ : Union[str, Any] = tokenizer(SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ ) lowerCamelCase_ : Dict = [[0] * len(SCREAMING_SNAKE_CASE__ ) for x in encoded_output['input_ids']] lowerCamelCase_ : Optional[int] = tokenizer.pad(SCREAMING_SNAKE_CASE__ ) self.assertSequenceEqual(outputs["global_attention_mask"] , SCREAMING_SNAKE_CASE__ ) def _UpperCamelCase ( self ): pass def _UpperCamelCase ( self ): for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): lowerCamelCase_ : Tuple = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) lowerCamelCase_ : str = self.tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) lowerCamelCase_ : Optional[Any] = 'A, <mask> AllenNLP sentence.' lowerCamelCase_ : Dict = tokenizer_r.encode_plus(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ , return_token_type_ids=SCREAMING_SNAKE_CASE__ ) lowerCamelCase_ : Dict = tokenizer_p.encode_plus(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ , return_token_type_ids=SCREAMING_SNAKE_CASE__ ) self.assertEqual(sum(tokens_r["token_type_ids"] ) , sum(tokens_p["token_type_ids"] ) ) self.assertEqual( sum(tokens_r["attention_mask"] ) / len(tokens_r["attention_mask"] ) , sum(tokens_p["attention_mask"] ) / len(tokens_p["attention_mask"] ) , ) lowerCamelCase_ : Dict = tokenizer_r.convert_ids_to_tokens(tokens_r["input_ids"] ) lowerCamelCase_ : List[str] = tokenizer_p.convert_ids_to_tokens(tokens_p["input_ids"] ) self.assertSequenceEqual(tokens_p["input_ids"] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] ) self.assertSequenceEqual(tokens_r["input_ids"] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] ) self.assertSequenceEqual( SCREAMING_SNAKE_CASE__ , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] ) self.assertSequenceEqual( SCREAMING_SNAKE_CASE__ , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
250
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available SCREAMING_SNAKE_CASE__ = { '''configuration_bridgetower''': [ '''BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BridgeTowerConfig''', '''BridgeTowerTextConfig''', '''BridgeTowerVisionConfig''', ], '''processing_bridgetower''': ['''BridgeTowerProcessor'''], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ = ['''BridgeTowerImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ = [ '''BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''BridgeTowerForContrastiveLearning''', '''BridgeTowerForImageAndTextRetrieval''', '''BridgeTowerForMaskedLM''', '''BridgeTowerModel''', '''BridgeTowerPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_bridgetower import ( BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP, BridgeTowerConfig, BridgeTowerTextConfig, BridgeTowerVisionConfig, ) from .processing_bridgetower import BridgeTowerProcessor try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_bridgetower import BridgeTowerImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_bridgetower import ( BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST, BridgeTowerForContrastiveLearning, BridgeTowerForImageAndTextRetrieval, BridgeTowerForMaskedLM, BridgeTowerModel, BridgeTowerPreTrainedModel, ) else: import sys SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
47
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, is_vision_available, ) a_ = {'processing_layoutxlm': ['LayoutXLMProcessor']} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ = ['LayoutXLMTokenizer'] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ = ['LayoutXLMTokenizerFast'] if TYPE_CHECKING: from .processing_layoutxlm import LayoutXLMProcessor try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_layoutxlm import LayoutXLMTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast else: import sys a_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
76
from string import ascii_lowercase, ascii_uppercase def UpperCAmelCase__ ( lowerCamelCase_ : str ): if not sentence: return "" __a : Union[str, Any] = dict(zip(lowerCamelCase_ , lowerCamelCase_ ) ) return lower_to_upper.get(sentence[0] , sentence[0] ) + sentence[1:] if __name__ == "__main__": from doctest import testmod testmod()
47
0
"""simple docstring""" # Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available __lowerCAmelCase : Optional[Any] = { '''configuration_vivit''': ['''VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''VivitConfig'''], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase : Optional[int] = ['''VivitImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase : Optional[int] = [ '''VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''VivitModel''', '''VivitPreTrainedModel''', '''VivitForVideoClassification''', ] if TYPE_CHECKING: from .configuration_vivit import VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, VivitConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_vivit import VivitImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vivit import ( VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST, VivitForVideoClassification, VivitModel, VivitPreTrainedModel, ) else: import sys __lowerCAmelCase : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
58
import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ = { '''asapp/sew-d-tiny-100k''': '''https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json''', # See all SEW-D models at https://huggingface.co/models?filter=sew-d } class _UpperCamelCase( __lowerCamelCase ): __SCREAMING_SNAKE_CASE : Optional[Any] = '''sew-d''' def __init__( self : Dict , SCREAMING_SNAKE_CASE__ : Dict=3_2 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=7_6_8 , SCREAMING_SNAKE_CASE__ : Optional[Any]=1_2 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=1_2 , SCREAMING_SNAKE_CASE__ : str=3_0_7_2 , SCREAMING_SNAKE_CASE__ : str=2 , SCREAMING_SNAKE_CASE__ : Optional[Any]=5_1_2 , SCREAMING_SNAKE_CASE__ : List[str]=2_5_6 , SCREAMING_SNAKE_CASE__ : Optional[Any]=True , SCREAMING_SNAKE_CASE__ : Tuple=True , SCREAMING_SNAKE_CASE__ : List[str]=("p2c", "c2p") , SCREAMING_SNAKE_CASE__ : str="layer_norm" , SCREAMING_SNAKE_CASE__ : Tuple="gelu_python" , SCREAMING_SNAKE_CASE__ : Tuple=0.1 , SCREAMING_SNAKE_CASE__ : Optional[int]=0.1 , SCREAMING_SNAKE_CASE__ : Any=0.1 , SCREAMING_SNAKE_CASE__ : Any=0.0 , SCREAMING_SNAKE_CASE__ : str=0.1 , SCREAMING_SNAKE_CASE__ : Optional[int]=0.02 , SCREAMING_SNAKE_CASE__ : int=1e-7 , SCREAMING_SNAKE_CASE__ : Any=1e-5 , SCREAMING_SNAKE_CASE__ : Optional[int]="group" , SCREAMING_SNAKE_CASE__ : Optional[Any]="gelu" , SCREAMING_SNAKE_CASE__ : Optional[int]=(6_4, 1_2_8, 1_2_8, 1_2_8, 1_2_8, 2_5_6, 2_5_6, 2_5_6, 2_5_6, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , SCREAMING_SNAKE_CASE__ : List[Any]=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , SCREAMING_SNAKE_CASE__ : str=(1_0, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , SCREAMING_SNAKE_CASE__ : Optional[int]=False , SCREAMING_SNAKE_CASE__ : Optional[int]=1_2_8 , SCREAMING_SNAKE_CASE__ : Tuple=1_6 , SCREAMING_SNAKE_CASE__ : Any=True , SCREAMING_SNAKE_CASE__ : List[Any]=0.05 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=1_0 , SCREAMING_SNAKE_CASE__ : List[Any]=2 , SCREAMING_SNAKE_CASE__ : int=0.0 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=1_0 , SCREAMING_SNAKE_CASE__ : Optional[int]=0 , SCREAMING_SNAKE_CASE__ : Optional[int]="mean" , SCREAMING_SNAKE_CASE__ : List[Any]=False , SCREAMING_SNAKE_CASE__ : List[str]=False , SCREAMING_SNAKE_CASE__ : str=2_5_6 , SCREAMING_SNAKE_CASE__ : str=0 , SCREAMING_SNAKE_CASE__ : List[Any]=1 , SCREAMING_SNAKE_CASE__ : List[Any]=2 , **SCREAMING_SNAKE_CASE__ : Any , ): '''simple docstring''' super().__init__(**SCREAMING_SNAKE_CASE__ , pad_token_id=SCREAMING_SNAKE_CASE__ , bos_token_id=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ ) __a : Optional[int] = hidden_size __a : Optional[Any] = feat_extract_norm __a : List[str] = feat_extract_activation __a : Dict = list(SCREAMING_SNAKE_CASE__ ) __a : Union[str, Any] = list(SCREAMING_SNAKE_CASE__ ) __a : List[str] = list(SCREAMING_SNAKE_CASE__ ) __a : int = conv_bias __a : Tuple = num_conv_pos_embeddings __a : List[str] = num_conv_pos_embedding_groups __a : Optional[Any] = len(self.conv_dim ) __a : Union[str, Any] = num_hidden_layers __a : Optional[Any] = intermediate_size __a : Union[str, Any] = squeeze_factor __a : List[Any] = max_position_embeddings __a : Tuple = position_buckets __a : Optional[int] = share_att_key __a : List[str] = relative_attention __a : Any = norm_rel_ebd __a : Any = list(SCREAMING_SNAKE_CASE__ ) __a : Union[str, Any] = hidden_act __a : str = num_attention_heads __a : Union[str, Any] = hidden_dropout __a : Optional[int] = attention_dropout __a : List[str] = activation_dropout __a : int = feat_proj_dropout __a : int = final_dropout __a : Dict = layer_norm_eps __a : Tuple = feature_layer_norm_eps __a : str = initializer_range __a : Tuple = vocab_size if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( 'Configuration for convolutional layers is incorrect.' 'It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,' f'''but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)''' f'''= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 __a : Tuple = apply_spec_augment __a : Optional[Any] = mask_time_prob __a : Any = mask_time_length __a : List[str] = mask_time_min_masks __a : List[str] = mask_feature_prob __a : Tuple = mask_feature_length __a : Any = mask_feature_min_masks # ctc loss __a : Optional[int] = ctc_loss_reduction __a : List[Any] = ctc_zero_infinity # sequence classification __a : Dict = use_weighted_layer_sum __a : Optional[Any] = classifier_proj_size @property def __lowerCAmelCase ( self : Optional[Any] ): '''simple docstring''' return functools.reduce(operator.mul , self.conv_stride , 1 )
47
0
'''simple docstring''' import os _SCREAMING_SNAKE_CASE : Optional[Any] = {"I": 1, "V": 5, "X": 10, "L": 50, "C": 1_00, "D": 5_00, "M": 10_00} def _UpperCamelCase ( UpperCamelCase__ ): """simple docstring""" __magic_name__ : Optional[Any] = 0 __magic_name__ : Dict = 0 while index < len(lowerCamelCase_ ) - 1: __magic_name__ : Optional[Any] = SYMBOLS[numerals[index]] __magic_name__ : Union[str, Any] = SYMBOLS[numerals[index + 1]] if current_value < next_value: total_value -= current_value else: total_value += current_value index += 1 total_value += SYMBOLS[numerals[index]] return total_value def _UpperCamelCase ( UpperCamelCase__ ): """simple docstring""" __magic_name__ : Optional[Any] = '' __magic_name__ : int = num // 1000 numerals += m_count * "M" num %= 1000 __magic_name__ : Tuple = num // 100 if c_count == 9: numerals += "CM" c_count -= 9 elif c_count == 4: numerals += "CD" c_count -= 4 if c_count >= 5: numerals += "D" c_count -= 5 numerals += c_count * "C" num %= 100 __magic_name__ : Optional[int] = num // 10 if x_count == 9: numerals += "XC" x_count -= 9 elif x_count == 4: numerals += "XL" x_count -= 4 if x_count >= 5: numerals += "L" x_count -= 5 numerals += x_count * "X" num %= 10 if num == 9: numerals += "IX" num -= 9 elif num == 4: numerals += "IV" num -= 4 if num >= 5: numerals += "V" num -= 5 numerals += num * "I" return numerals def _UpperCamelCase ( UpperCamelCase__ = "/p089_roman.txt" ): """simple docstring""" __magic_name__ : List[Any] = 0 with open(os.path.dirname(lowerCamelCase_ ) + roman_numerals_filename ) as filea: __magic_name__ : Tuple = filea.readlines() for line in lines: __magic_name__ : str = line.strip() __magic_name__ : Dict = parse_roman_numerals(lowerCamelCase_ ) __magic_name__ : str = generate_roman_numerals(lowerCamelCase_ ) savings += len(lowerCamelCase_ ) - len(lowerCamelCase_ ) return savings if __name__ == "__main__": print(f"{solution() = }")
436
from __future__ import annotations from sys import maxsize from typing import Generic, TypeVar SCREAMING_SNAKE_CASE__ = TypeVar('''T''') def UpperCAmelCase__ ( lowerCamelCase_ : int ): return (position - 1) // 2 def UpperCAmelCase__ ( lowerCamelCase_ : int ): return (2 * position) + 1 def UpperCAmelCase__ ( lowerCamelCase_ : int ): return (2 * position) + 2 class _UpperCamelCase( Generic[T] ): def __init__( self : List[str] ): '''simple docstring''' __a : list[tuple[T, int]] = [] __a : dict[T, int] = {} __a : int = 0 def __len__( self : Any ): '''simple docstring''' return self.elements def __repr__( self : Any ): '''simple docstring''' return str(self.heap ) def __lowerCAmelCase ( self : Optional[Any] ): '''simple docstring''' return self.elements == 0 def __lowerCAmelCase ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : T , SCREAMING_SNAKE_CASE__ : int ): '''simple docstring''' self.heap.append((elem, weight) ) __a : List[Any] = self.elements self.elements += 1 self._bubble_up(SCREAMING_SNAKE_CASE__ ) def __lowerCAmelCase ( self : Tuple ): '''simple docstring''' if self.elements > 1: self._swap_nodes(0 , self.elements - 1 ) __a , __a : Union[str, Any] = self.heap.pop() del self.position_map[elem] self.elements -= 1 if self.elements > 0: __a , __a : Dict = self.heap[0] self._bubble_down(SCREAMING_SNAKE_CASE__ ) return elem def __lowerCAmelCase ( self : List[str] , SCREAMING_SNAKE_CASE__ : T , SCREAMING_SNAKE_CASE__ : int ): '''simple docstring''' __a : List[Any] = self.position_map[elem] __a : str = (elem, weight) if position > 0: __a : Tuple = get_parent_position(SCREAMING_SNAKE_CASE__ ) __a , __a : Dict = self.heap[parent_position] if parent_weight > weight: self._bubble_up(SCREAMING_SNAKE_CASE__ ) else: self._bubble_down(SCREAMING_SNAKE_CASE__ ) else: self._bubble_down(SCREAMING_SNAKE_CASE__ ) def __lowerCAmelCase ( self : Any , SCREAMING_SNAKE_CASE__ : T ): '''simple docstring''' __a : List[Any] = self.position_map[elem] if curr_pos == 0: return None __a : List[str] = get_parent_position(SCREAMING_SNAKE_CASE__ ) __a , __a : str = self.heap[curr_pos] __a , __a : Optional[int] = self.heap[parent_position] if parent_weight > weight: self._swap_nodes(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) return self._bubble_up(SCREAMING_SNAKE_CASE__ ) return None def __lowerCAmelCase ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : T ): '''simple docstring''' __a : int = self.position_map[elem] __a , __a : Optional[Any] = self.heap[curr_pos] __a : Tuple = get_child_left_position(SCREAMING_SNAKE_CASE__ ) __a : Optional[Any] = get_child_right_position(SCREAMING_SNAKE_CASE__ ) if child_left_position < self.elements and child_right_position < self.elements: __a , __a : str = self.heap[child_left_position] __a , __a : List[str] = self.heap[child_right_position] if child_right_weight < child_left_weight and child_right_weight < weight: self._swap_nodes(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) return self._bubble_down(SCREAMING_SNAKE_CASE__ ) if child_left_position < self.elements: __a , __a : Any = self.heap[child_left_position] if child_left_weight < weight: self._swap_nodes(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) return self._bubble_down(SCREAMING_SNAKE_CASE__ ) else: return None if child_right_position < self.elements: __a , __a : Union[str, Any] = self.heap[child_right_position] if child_right_weight < weight: self._swap_nodes(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) return self._bubble_down(SCREAMING_SNAKE_CASE__ ) return None def __lowerCAmelCase ( self : List[Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ): '''simple docstring''' __a : Optional[Any] = self.heap[nodea_pos][0] __a : str = self.heap[nodea_pos][0] __a , __a : int = ( self.heap[nodea_pos], self.heap[nodea_pos], ) __a : str = nodea_pos __a : Optional[int] = nodea_pos class _UpperCamelCase( Generic[T] ): def __init__( self : List[Any] ): '''simple docstring''' __a : dict[T, dict[T, int]] = {} __a : int = 0 def __repr__( self : Tuple ): '''simple docstring''' return str(self.connections ) def __len__( self : Dict ): '''simple docstring''' return self.nodes def __lowerCAmelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : T ): '''simple docstring''' if node not in self.connections: __a : Tuple = {} self.nodes += 1 def __lowerCAmelCase ( self : Any , SCREAMING_SNAKE_CASE__ : T , SCREAMING_SNAKE_CASE__ : T , SCREAMING_SNAKE_CASE__ : int ): '''simple docstring''' self.add_node(SCREAMING_SNAKE_CASE__ ) self.add_node(SCREAMING_SNAKE_CASE__ ) __a : Optional[Any] = weight __a : Any = weight def UpperCAmelCase__ ( lowerCamelCase_ : GraphUndirectedWeighted[T] , ): __a : dict[T, int] = {node: maxsize for node in graph.connections} __a : dict[T, T | None] = {node: None for node in graph.connections} __a : MinPriorityQueue[T] = MinPriorityQueue() for node, weight in dist.items(): priority_queue.push(lowerCamelCase_ , lowerCamelCase_ ) if priority_queue.is_empty(): return dist, parent # initialization __a : Optional[int] = priority_queue.extract_min() __a : int = 0 for neighbour in graph.connections[node]: if dist[neighbour] > dist[node] + graph.connections[node][neighbour]: __a : str = dist[node] + graph.connections[node][neighbour] priority_queue.update_key(lowerCamelCase_ , dist[neighbour] ) __a : Optional[int] = node # running prim's algorithm while not priority_queue.is_empty(): __a : Any = priority_queue.extract_min() for neighbour in graph.connections[node]: if dist[neighbour] > dist[node] + graph.connections[node][neighbour]: __a : Tuple = dist[node] + graph.connections[node][neighbour] priority_queue.update_key(lowerCamelCase_ , dist[neighbour] ) __a : Dict = node return dist, parent
47
0
'''simple docstring''' from tempfile import TemporaryDirectory from unittest import TestCase from unittest.mock import MagicMock, patch from transformers import AutoModel, TFAutoModel from transformers.onnx import FeaturesManager from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch @require_torch @require_tf class UpperCamelCase__( __lowerCamelCase ): def a__( self : Optional[int] )-> str: """simple docstring""" UpperCAmelCase = SMALL_MODEL_IDENTIFIER UpperCAmelCase = 'pt' UpperCAmelCase = 'tf' def a__( self : Tuple , lowerCAmelCase : str )-> str: """simple docstring""" UpperCAmelCase = AutoModel.from_pretrained(self.test_model ) model_pt.save_pretrained(SCREAMING_SNAKE_CASE__ ) def a__( self : Optional[int] , lowerCAmelCase : Optional[Any] )-> List[Any]: """simple docstring""" UpperCAmelCase = TFAutoModel.from_pretrained(self.test_model , from_pt=SCREAMING_SNAKE_CASE__ ) model_tf.save_pretrained(SCREAMING_SNAKE_CASE__ ) def a__( self : Dict )-> List[str]: """simple docstring""" UpperCAmelCase = 'mock_framework' # Framework provided - return whatever the user provides UpperCAmelCase = FeaturesManager.determine_framework(self.test_model , SCREAMING_SNAKE_CASE__ ) self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) # Local checkpoint and framework provided - return provided framework # PyTorch checkpoint with TemporaryDirectory() as local_pt_ckpt: self._setup_pt_ckpt(SCREAMING_SNAKE_CASE__ ) UpperCAmelCase = FeaturesManager.determine_framework(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) # TensorFlow checkpoint with TemporaryDirectory() as local_tf_ckpt: self._setup_tf_ckpt(SCREAMING_SNAKE_CASE__ ) UpperCAmelCase = FeaturesManager.determine_framework(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) def a__( self : int )-> int: """simple docstring""" with TemporaryDirectory() as local_pt_ckpt: self._setup_pt_ckpt(SCREAMING_SNAKE_CASE__ ) UpperCAmelCase = FeaturesManager.determine_framework(SCREAMING_SNAKE_CASE__ ) self.assertEqual(SCREAMING_SNAKE_CASE__ , self.framework_pt ) # TensorFlow checkpoint with TemporaryDirectory() as local_tf_ckpt: self._setup_tf_ckpt(SCREAMING_SNAKE_CASE__ ) UpperCAmelCase = FeaturesManager.determine_framework(SCREAMING_SNAKE_CASE__ ) self.assertEqual(SCREAMING_SNAKE_CASE__ , self.framework_tf ) # Invalid local checkpoint with TemporaryDirectory() as local_invalid_ckpt: with self.assertRaises(SCREAMING_SNAKE_CASE__ ): UpperCAmelCase = FeaturesManager.determine_framework(SCREAMING_SNAKE_CASE__ ) def a__( self : str )-> List[str]: """simple docstring""" UpperCAmelCase = MagicMock(return_value=SCREAMING_SNAKE_CASE__ ) with patch('''transformers.onnx.features.is_tf_available''' , SCREAMING_SNAKE_CASE__ ): UpperCAmelCase = FeaturesManager.determine_framework(self.test_model ) self.assertEqual(SCREAMING_SNAKE_CASE__ , self.framework_pt ) # PyTorch not in environment -> use TensorFlow UpperCAmelCase = MagicMock(return_value=SCREAMING_SNAKE_CASE__ ) with patch('''transformers.onnx.features.is_torch_available''' , SCREAMING_SNAKE_CASE__ ): UpperCAmelCase = FeaturesManager.determine_framework(self.test_model ) self.assertEqual(SCREAMING_SNAKE_CASE__ , self.framework_tf ) # Both in environment -> use PyTorch UpperCAmelCase = MagicMock(return_value=SCREAMING_SNAKE_CASE__ ) UpperCAmelCase = MagicMock(return_value=SCREAMING_SNAKE_CASE__ ) with patch('''transformers.onnx.features.is_tf_available''' , SCREAMING_SNAKE_CASE__ ), patch( '''transformers.onnx.features.is_torch_available''' , SCREAMING_SNAKE_CASE__ ): UpperCAmelCase = FeaturesManager.determine_framework(self.test_model ) self.assertEqual(SCREAMING_SNAKE_CASE__ , self.framework_pt ) # Both not in environment -> raise error UpperCAmelCase = MagicMock(return_value=SCREAMING_SNAKE_CASE__ ) UpperCAmelCase = MagicMock(return_value=SCREAMING_SNAKE_CASE__ ) with patch('''transformers.onnx.features.is_tf_available''' , SCREAMING_SNAKE_CASE__ ), patch( '''transformers.onnx.features.is_torch_available''' , SCREAMING_SNAKE_CASE__ ): with self.assertRaises(SCREAMING_SNAKE_CASE__ ): UpperCAmelCase = FeaturesManager.determine_framework(self.test_model )
210
from collections.abc import Sequence from queue import Queue class _UpperCamelCase: def __init__( self : Tuple , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : int=None , SCREAMING_SNAKE_CASE__ : Tuple=None ): '''simple docstring''' __a : Tuple = start __a : Dict = end __a : List[str] = val __a : List[Any] = (start + end) // 2 __a : Optional[Any] = left __a : List[str] = right def __repr__( self : Dict ): '''simple docstring''' return f'''SegmentTreeNode(start={self.start}, end={self.end}, val={self.val})''' class _UpperCamelCase: def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : Sequence , SCREAMING_SNAKE_CASE__ : Optional[Any] ): '''simple docstring''' __a : Tuple = collection __a : Dict = function if self.collection: __a : int = self._build_tree(0 , len(SCREAMING_SNAKE_CASE__ ) - 1 ) def __lowerCAmelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : int ): '''simple docstring''' self._update_tree(self.root , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) def __lowerCAmelCase ( self : str , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[str] ): '''simple docstring''' return self._query_range(self.root , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) def __lowerCAmelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Dict ): '''simple docstring''' if start == end: return SegmentTreeNode(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , self.collection[start] ) __a : Tuple = (start + end) // 2 __a : Optional[int] = self._build_tree(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) __a : Tuple = self._build_tree(mid + 1 , SCREAMING_SNAKE_CASE__ ) return SegmentTreeNode(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , self.fn(left.val , right.val ) , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) def __lowerCAmelCase ( self : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : List[Any] ): '''simple docstring''' if node.start == i and node.end == i: __a : Optional[Any] = val return if i <= node.mid: self._update_tree(node.left , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else: self._update_tree(node.right , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) __a : int = self.fn(node.left.val , node.right.val ) def __lowerCAmelCase ( self : Any , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[Any] ): '''simple docstring''' if node.start == i and node.end == j: return node.val if i <= node.mid: if j <= node.mid: # range in left child tree return self._query_range(node.left , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else: # range in left child tree and right child tree return self.fn( self._query_range(node.left , SCREAMING_SNAKE_CASE__ , node.mid ) , self._query_range(node.right , node.mid + 1 , SCREAMING_SNAKE_CASE__ ) , ) else: # range in right child tree return self._query_range(node.right , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) def __lowerCAmelCase ( self : List[Any] ): '''simple docstring''' if self.root is not None: __a : Tuple = Queue() queue.put(self.root ) while not queue.empty(): __a : Tuple = queue.get() yield node if node.left is not None: queue.put(node.left ) if node.right is not None: queue.put(node.right ) if __name__ == "__main__": import operator for fn in [operator.add, max, min]: print('''*''' * 50) SCREAMING_SNAKE_CASE__ = SegmentTree([2, 1, 5, 3, 4], fn) for node in arr.traverse(): print(node) print() arr.update(1, 5) for node in arr.traverse(): print(node) print() print(arr.query_range(3, 4)) # 7 print(arr.query_range(2, 2)) # 5 print(arr.query_range(1, 3)) # 13 print()
47
0
import numpy # List of input, output pairs lowerCamelCase : Union[str, Any] = ( ((5, 2, 3), 15), ((6, 5, 9), 25), ((11, 12, 13), 41), ((1, 1, 1), 8), ((11, 12, 13), 41), ) lowerCamelCase : Tuple = (((515, 22, 13), 555), ((61, 35, 49), 150)) lowerCamelCase : Any = [2, 4, 1, 5] lowerCamelCase : List[Any] = len(train_data) lowerCamelCase : Union[str, Any] = 0.009 def _SCREAMING_SNAKE_CASE ( lowercase : Union[str, Any] , lowercase : Dict="train" ): '''simple docstring''' return calculate_hypothesis_value(lowerCamelCase_ , lowerCamelCase_ ) - output( lowerCamelCase_ , lowerCamelCase_ ) def _SCREAMING_SNAKE_CASE ( lowercase : Optional[int] ): '''simple docstring''' lowerCamelCase_ = 0 for i in range(len(lowerCamelCase_ ) - 1 ): hyp_val += data_input_tuple[i] * parameter_vector[i + 1] hyp_val += parameter_vector[0] return hyp_val def _SCREAMING_SNAKE_CASE ( lowercase : Optional[Any] , lowercase : Optional[int] ): '''simple docstring''' if data_set == "train": return train_data[example_no][1] elif data_set == "test": return test_data[example_no][1] return None def _SCREAMING_SNAKE_CASE ( lowercase : List[Any] , lowercase : Optional[Any] ): '''simple docstring''' if data_set == "train": return _hypothesis_value(train_data[example_no][0] ) elif data_set == "test": return _hypothesis_value(test_data[example_no][0] ) return None def _SCREAMING_SNAKE_CASE ( lowercase : Optional[int] , lowercase : Optional[int]=m ): '''simple docstring''' lowerCamelCase_ = 0 for i in range(lowerCamelCase_ ): if index == -1: summation_value += _error(lowerCamelCase_ ) else: summation_value += _error(lowerCamelCase_ ) * train_data[i][0][index] return summation_value def _SCREAMING_SNAKE_CASE ( lowercase : Optional[Any] ): '''simple docstring''' lowerCamelCase_ = summation_of_cost_derivative(lowerCamelCase_ , lowerCamelCase_ ) / m return cost_derivative_value def _SCREAMING_SNAKE_CASE ( ): '''simple docstring''' global parameter_vector # Tune these values to set a tolerance value for predicted output lowerCamelCase_ = 0.00_0002 lowerCamelCase_ = 0 lowerCamelCase_ = 0 while True: j += 1 lowerCamelCase_ = [0, 0, 0, 0] for i in range(0 , len(lowerCamelCase_ ) ): lowerCamelCase_ = get_cost_derivative(i - 1 ) lowerCamelCase_ = ( parameter_vector[i] - LEARNING_RATE * cost_derivative ) if numpy.allclose( lowerCamelCase_ , lowerCamelCase_ , atol=lowerCamelCase_ , rtol=lowerCamelCase_ , ): break lowerCamelCase_ = temp_parameter_vector print(('Number of iterations:', j) ) def _SCREAMING_SNAKE_CASE ( ): '''simple docstring''' for i in range(len(lowerCamelCase_ ) ): print(('Actual output value:', output(lowerCamelCase_ , 'test' )) ) print(('Hypothesis output:', calculate_hypothesis_value(lowerCamelCase_ , 'test' )) ) if __name__ == "__main__": run_gradient_descent() print("\nTesting gradient descent for a linear hypothesis function.\n") test_gradient_descent()
70
import os import posixpath import uuid from dataclasses import dataclass from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union import numpy as np import pyarrow as pa import datasets from datasets.arrow_writer import ArrowWriter, ParquetWriter from datasets.config import MAX_SHARD_SIZE from datasets.filesystems import ( is_remote_filesystem, rename, ) from datasets.iterable_dataset import _BaseExamplesIterable from datasets.utils.py_utils import convert_file_size_to_int SCREAMING_SNAKE_CASE__ = datasets.utils.logging.get_logger(__name__) if TYPE_CHECKING: import pyspark @dataclass class _UpperCamelCase( datasets.BuilderConfig ): __SCREAMING_SNAKE_CASE : Optional[datasets.Features] = None def UpperCAmelCase__ ( lowerCamelCase_ : "pyspark.sql.DataFrame" , lowerCamelCase_ : List[int] , ): import pyspark def generate_fn(): __a : List[Any] = df.select('*' , pyspark.sql.functions.spark_partition_id().alias('part_id' ) ) for partition_id in partition_order: __a : Optional[int] = df_with_partition_id.select('*' ).where(f'''part_id = {partition_id}''' ).drop('part_id' ) __a : Optional[Any] = partition_df.collect() __a : Union[str, Any] = 0 for row in rows: yield f'''{partition_id}_{row_id}''', row.asDict() row_id += 1 return generate_fn class _UpperCamelCase( _BaseExamplesIterable ): def __init__( self : Any , SCREAMING_SNAKE_CASE__ : "pyspark.sql.DataFrame" , SCREAMING_SNAKE_CASE__ : Dict=None , ): '''simple docstring''' __a : List[str] = df __a : Tuple = partition_order or range(self.df.rdd.getNumPartitions() ) __a : List[Any] = _generate_iterable_examples(self.df , self.partition_order ) def __iter__( self : Tuple ): '''simple docstring''' yield from self.generate_examples_fn() def __lowerCAmelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : np.random.Generator ): '''simple docstring''' __a : Union[str, Any] = list(range(self.df.rdd.getNumPartitions() ) ) generator.shuffle(SCREAMING_SNAKE_CASE__ ) return SparkExamplesIterable(self.df , partition_order=SCREAMING_SNAKE_CASE__ ) def __lowerCAmelCase ( self : str , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ): '''simple docstring''' __a : Union[str, Any] = self.split_shard_indices_by_worker(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) return SparkExamplesIterable(self.df , partition_order=SCREAMING_SNAKE_CASE__ ) @property def __lowerCAmelCase ( self : Dict ): '''simple docstring''' return len(self.partition_order ) class _UpperCamelCase( datasets.DatasetBuilder ): __SCREAMING_SNAKE_CASE : List[str] = SparkConfig def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : "pyspark.sql.DataFrame" , SCREAMING_SNAKE_CASE__ : str = None , SCREAMING_SNAKE_CASE__ : str = None , **SCREAMING_SNAKE_CASE__ : Optional[int] , ): '''simple docstring''' import pyspark __a : int = pyspark.sql.SparkSession.builder.getOrCreate() __a : Optional[int] = df __a : List[Any] = working_dir super().__init__( cache_dir=SCREAMING_SNAKE_CASE__ , config_name=str(self.df.semanticHash() ) , **SCREAMING_SNAKE_CASE__ , ) def __lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' def create_cache_and_write_probe(SCREAMING_SNAKE_CASE__ : List[str] ): # makedirs with exist_ok will recursively create the directory. It will not throw an error if directories # already exist. os.makedirs(self._cache_dir , exist_ok=SCREAMING_SNAKE_CASE__ ) __a : List[Any] = os.path.join(self._cache_dir , 'fs_test' + uuid.uuida().hex ) # Opening the file in append mode will create a new file unless it already exists, in which case it will not # change the file contents. open(SCREAMING_SNAKE_CASE__ , 'a' ) return [probe_file] if self._spark.conf.get('spark.master' , '' ).startswith('local' ): return # If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS # accessible to the driver. # TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error. if self._cache_dir: __a : List[Any] = ( self._spark.sparkContext.parallelize(range(1 ) , 1 ).mapPartitions(SCREAMING_SNAKE_CASE__ ).collect() ) if os.path.isfile(probe[0] ): return raise ValueError( 'When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir' ) def __lowerCAmelCase ( self : List[str] ): '''simple docstring''' return datasets.DatasetInfo(features=self.config.features ) def __lowerCAmelCase ( self : int , SCREAMING_SNAKE_CASE__ : datasets.download.download_manager.DownloadManager ): '''simple docstring''' return [datasets.SplitGenerator(name=datasets.Split.TRAIN )] def __lowerCAmelCase ( self : List[str] , SCREAMING_SNAKE_CASE__ : Any ): '''simple docstring''' import pyspark def get_arrow_batch_size(SCREAMING_SNAKE_CASE__ : int ): for batch in it: yield pa.RecordBatch.from_pydict({'batch_bytes': [batch.nbytes]} ) __a : List[str] = self.df.count() __a : Dict = df_num_rows if df_num_rows <= 1_0_0 else 1_0_0 # Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample. __a : List[str] = ( self.df.limit(SCREAMING_SNAKE_CASE__ ) .repartition(1 ) .mapInArrow(SCREAMING_SNAKE_CASE__ , 'batch_bytes: long' ) .agg(pyspark.sql.functions.sum('batch_bytes' ).alias('sample_bytes' ) ) .collect()[0] .sample_bytes / sample_num_rows ) __a : Dict = approx_bytes_per_row * df_num_rows if approx_total_size > max_shard_size: # Make sure there is at least one row per partition. __a : Union[str, Any] = min(SCREAMING_SNAKE_CASE__ , int(approx_total_size / max_shard_size ) ) __a : int = self.df.repartition(SCREAMING_SNAKE_CASE__ ) def __lowerCAmelCase ( self : Tuple , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : int , ): '''simple docstring''' import pyspark __a : Any = ParquetWriter if file_format == 'parquet' else ArrowWriter __a : Union[str, Any] = os.path.join(self._working_dir , os.path.basename(SCREAMING_SNAKE_CASE__ ) ) if self._working_dir else fpath __a : Optional[int] = file_format == 'parquet' # Define these so that we don't reference self in write_arrow, which will result in a pickling error due to # pickling the SparkContext. __a : List[str] = self.config.features __a : int = self._writer_batch_size __a : Union[str, Any] = self._fs.storage_options def write_arrow(SCREAMING_SNAKE_CASE__ : Optional[int] ): # Within the same SparkContext, no two task attempts will share the same attempt ID. __a : Any = pyspark.TaskContext().taskAttemptId() __a : str = next(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) if first_batch is None: # Some partitions might not receive any data. return pa.RecordBatch.from_arrays( [[task_id], [0], [0]] , names=['task_id', 'num_examples', 'num_bytes'] , ) __a : Any = 0 __a : List[str] = writer_class( features=SCREAMING_SNAKE_CASE__ , path=working_fpath.replace('SSSSS' , f'''{shard_id:05d}''' ).replace('TTTTT' , f'''{task_id:05d}''' ) , writer_batch_size=SCREAMING_SNAKE_CASE__ , storage_options=SCREAMING_SNAKE_CASE__ , embed_local_files=SCREAMING_SNAKE_CASE__ , ) __a : Optional[Any] = pa.Table.from_batches([first_batch] ) writer.write_table(SCREAMING_SNAKE_CASE__ ) for batch in it: if max_shard_size is not None and writer._num_bytes >= max_shard_size: __a , __a : Optional[int] = writer.finalize() writer.close() yield pa.RecordBatch.from_arrays( [[task_id], [num_examples], [num_bytes]] , names=['task_id', 'num_examples', 'num_bytes'] , ) shard_id += 1 __a : Optional[Any] = writer_class( features=writer._features , path=working_fpath.replace('SSSSS' , f'''{shard_id:05d}''' ).replace('TTTTT' , f'''{task_id:05d}''' ) , writer_batch_size=SCREAMING_SNAKE_CASE__ , storage_options=SCREAMING_SNAKE_CASE__ , embed_local_files=SCREAMING_SNAKE_CASE__ , ) __a : Union[str, Any] = pa.Table.from_batches([batch] ) writer.write_table(SCREAMING_SNAKE_CASE__ ) if writer._num_bytes > 0: __a , __a : str = writer.finalize() writer.close() yield pa.RecordBatch.from_arrays( [[task_id], [num_examples], [num_bytes]] , names=['task_id', 'num_examples', 'num_bytes'] , ) if working_fpath != fpath: for file in os.listdir(os.path.dirname(SCREAMING_SNAKE_CASE__ ) ): __a : Any = os.path.join(os.path.dirname(SCREAMING_SNAKE_CASE__ ) , os.path.basename(SCREAMING_SNAKE_CASE__ ) ) shutil.move(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) __a : Dict = ( self.df.mapInArrow(SCREAMING_SNAKE_CASE__ , 'task_id: long, num_examples: long, num_bytes: long' ) .groupBy('task_id' ) .agg( pyspark.sql.functions.sum('num_examples' ).alias('total_num_examples' ) , pyspark.sql.functions.sum('num_bytes' ).alias('total_num_bytes' ) , pyspark.sql.functions.count('num_bytes' ).alias('num_shards' ) , pyspark.sql.functions.collect_list('num_examples' ).alias('shard_lengths' ) , ) .collect() ) for row in stats: yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths) def __lowerCAmelCase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : "datasets.SplitGenerator" , SCREAMING_SNAKE_CASE__ : str = "arrow" , SCREAMING_SNAKE_CASE__ : Optional[Union[str, int]] = None , SCREAMING_SNAKE_CASE__ : Optional[int] = None , **SCREAMING_SNAKE_CASE__ : Optional[Any] , ): '''simple docstring''' self._validate_cache_dir() __a : List[str] = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE ) self._repartition_df_if_needed(SCREAMING_SNAKE_CASE__ ) __a : Union[str, Any] = not is_remote_filesystem(self._fs ) __a : Optional[Any] = os.path.join if is_local else posixpath.join __a : Any = '-TTTTT-SSSSS-of-NNNNN' __a : Union[str, Any] = f'''{self.name}-{split_generator.name}{SUFFIX}.{file_format}''' __a : Any = path_join(self._output_dir , SCREAMING_SNAKE_CASE__ ) __a : Any = 0 __a : Dict = 0 __a : int = 0 __a : List[str] = [] __a : Optional[int] = [] for task_id, content in self._prepare_split_single(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): ( ( __a ) , ( __a ) , ( __a ) , ( __a ) , ) : Optional[int] = content if num_bytes > 0: total_num_examples += num_examples total_num_bytes += num_bytes total_shards += num_shards task_id_and_num_shards.append((task_id, num_shards) ) all_shard_lengths.extend(SCREAMING_SNAKE_CASE__ ) __a : List[str] = total_num_examples __a : Optional[int] = total_num_bytes # should rename everything at the end logger.debug(f'''Renaming {total_shards} shards.''' ) if total_shards > 1: __a : Any = all_shard_lengths # Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a # pickling error due to pickling the SparkContext. __a : Dict = self._fs # use the -SSSSS-of-NNNNN pattern def _rename_shard( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , ): rename( SCREAMING_SNAKE_CASE__ , fpath.replace('SSSSS' , f'''{shard_id:05d}''' ).replace('TTTTT' , f'''{task_id:05d}''' ) , fpath.replace('TTTTT-SSSSS' , f'''{global_shard_id:05d}''' ).replace('NNNNN' , f'''{total_shards:05d}''' ) , ) __a : Union[str, Any] = [] __a : List[str] = 0 for i in range(len(SCREAMING_SNAKE_CASE__ ) ): __a , __a : Union[str, Any] = task_id_and_num_shards[i] for shard_id in range(SCREAMING_SNAKE_CASE__ ): args.append([task_id, shard_id, global_shard_id] ) global_shard_id += 1 self._spark.sparkContext.parallelize(SCREAMING_SNAKE_CASE__ , len(SCREAMING_SNAKE_CASE__ ) ).map(lambda SCREAMING_SNAKE_CASE__ : _rename_shard(*SCREAMING_SNAKE_CASE__ ) ).collect() else: # don't use any pattern __a : List[Any] = 0 __a : Any = task_id_and_num_shards[0][0] self._rename( fpath.replace('SSSSS' , f'''{shard_id:05d}''' ).replace('TTTTT' , f'''{task_id:05d}''' ) , fpath.replace(SCREAMING_SNAKE_CASE__ , '' ) , ) def __lowerCAmelCase ( self : str , SCREAMING_SNAKE_CASE__ : "datasets.SplitGenerator" , ): '''simple docstring''' return SparkExamplesIterable(self.df )
47
0
"""simple docstring""" import os import tempfile import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch if is_torch_available(): import torch from torch import nn from transformers import ( Adafactor, AdamW, get_constant_schedule, get_constant_schedule_with_warmup, get_cosine_schedule_with_warmup, get_cosine_with_hard_restarts_schedule_with_warmup, get_inverse_sqrt_schedule, get_linear_schedule_with_warmup, get_polynomial_decay_schedule_with_warmup, ) def UpperCAmelCase ( a__ , a__=10 ): '''simple docstring''' lowerCAmelCase :Optional[Any] = [] for _ in range(lowerCamelCase_ ): lrs.append(scheduler.get_lr()[0] ) scheduler.step() return lrs def UpperCAmelCase ( a__ , a__=10 ): '''simple docstring''' lowerCAmelCase :Union[str, Any] = [] for step in range(lowerCamelCase_ ): lrs.append(scheduler.get_lr()[0] ) scheduler.step() if step == num_steps // 2: with tempfile.TemporaryDirectory() as tmpdirname: lowerCAmelCase :List[Any] = os.path.join(lowerCamelCase_ , 'schedule.bin' ) torch.save(scheduler.state_dict() , lowerCamelCase_ ) lowerCAmelCase :Tuple = torch.load(lowerCamelCase_ ) scheduler.load_state_dict(lowerCamelCase_ ) return lrs @require_torch class __UpperCamelCase ( unittest.TestCase ): def UpperCAmelCase__ ( self : Optional[Any] , UpperCAmelCase : Dict , UpperCAmelCase : Optional[int] , UpperCAmelCase : str ) -> str: self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , len(SCREAMING_SNAKE_CASE__ ) ) for a, b in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): self.assertAlmostEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , delta=SCREAMING_SNAKE_CASE__ ) def UpperCAmelCase__ ( self : Tuple ) -> List[Any]: lowerCAmelCase :Union[str, Any] = torch.tensor([0.1, -0.2, -0.1] , requires_grad=SCREAMING_SNAKE_CASE__ ) lowerCAmelCase :List[str] = torch.tensor([0.4, 0.2, -0.5] ) lowerCAmelCase :List[Any] = nn.MSELoss() # No warmup, constant schedule, no gradient clipping lowerCAmelCase :List[Any] = AdamW(params=[w] , lr=2e-1 , weight_decay=0.0 ) for _ in range(100 ): lowerCAmelCase :int = criterion(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) loss.backward() optimizer.step() w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves. w.grad.zero_() self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1e-2 ) def UpperCAmelCase__ ( self : List[str] ) -> str: lowerCAmelCase :List[str] = torch.tensor([0.1, -0.2, -0.1] , requires_grad=SCREAMING_SNAKE_CASE__ ) lowerCAmelCase :Union[str, Any] = torch.tensor([0.4, 0.2, -0.5] ) lowerCAmelCase :Union[str, Any] = nn.MSELoss() # No warmup, constant schedule, no gradient clipping lowerCAmelCase :int = Adafactor( params=[w] , lr=1e-2 , eps=(1e-3_0, 1e-3) , clip_threshold=1.0 , decay_rate=-0.8 , betaa=SCREAMING_SNAKE_CASE__ , weight_decay=0.0 , relative_step=SCREAMING_SNAKE_CASE__ , scale_parameter=SCREAMING_SNAKE_CASE__ , warmup_init=SCREAMING_SNAKE_CASE__ , ) for _ in range(1000 ): lowerCAmelCase :str = criterion(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) loss.backward() optimizer.step() w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves. w.grad.zero_() self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1e-2 ) @require_torch class __UpperCamelCase ( unittest.TestCase ): lowercase_ : Union[str, Any] = nn.Linear(50 , 50 ) if is_torch_available() else None lowercase_ : str = AdamW(m.parameters() , lr=10.0 ) if is_torch_available() else None lowercase_ : str = 10 def UpperCAmelCase__ ( self : Any , UpperCAmelCase : List[str] , UpperCAmelCase : int , UpperCAmelCase : Dict , UpperCAmelCase : Dict=None ) -> List[str]: self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , len(SCREAMING_SNAKE_CASE__ ) ) for a, b in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): self.assertAlmostEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , delta=SCREAMING_SNAKE_CASE__ , msg=SCREAMING_SNAKE_CASE__ ) def UpperCAmelCase__ ( self : Any ) -> List[str]: lowerCAmelCase :str = {'num_warmup_steps': 2, 'num_training_steps': 10} # schedulers doct format # function: (sched_args_dict, expected_learning_rates) lowerCAmelCase :Any = { get_constant_schedule: ({}, [1_0.0] * self.num_steps), get_constant_schedule_with_warmup: ( {'num_warmup_steps': 4}, [0.0, 2.5, 5.0, 7.5, 1_0.0, 1_0.0, 1_0.0, 1_0.0, 1_0.0, 1_0.0], ), get_linear_schedule_with_warmup: ( {**common_kwargs}, [0.0, 5.0, 1_0.0, 8.7_5, 7.5, 6.2_5, 5.0, 3.7_5, 2.5, 1.2_5], ), get_cosine_schedule_with_warmup: ( {**common_kwargs}, [0.0, 5.0, 1_0.0, 9.6_1, 8.5_3, 6.9_1, 5.0, 3.0_8, 1.4_6, 0.3_8], ), get_cosine_with_hard_restarts_schedule_with_warmup: ( {**common_kwargs, 'num_cycles': 2}, [0.0, 5.0, 1_0.0, 8.5_3, 5.0, 1.4_6, 1_0.0, 8.5_3, 5.0, 1.4_6], ), get_polynomial_decay_schedule_with_warmup: ( {**common_kwargs, 'power': 2.0, 'lr_end': 1e-7}, [0.0, 5.0, 1_0.0, 7.6_5_6, 5.6_2_5, 3.9_0_6, 2.5, 1.4_0_6, 0.6_2_5, 0.1_5_6], ), get_inverse_sqrt_schedule: ( {'num_warmup_steps': 2}, [0.0, 5.0, 1_0.0, 8.1_6_5, 7.0_7_1, 6.3_2_5, 5.7_7_4, 5.3_4_5, 5.0, 4.7_1_4], ), } for scheduler_func, data in scheds.items(): lowerCAmelCase :str = data lowerCAmelCase :Tuple = scheduler_func(self.optimizer , **SCREAMING_SNAKE_CASE__ ) self.assertEqual(len([scheduler.get_lr()[0]] ) , 1 ) lowerCAmelCase :List[Any] = unwrap_schedule(SCREAMING_SNAKE_CASE__ , self.num_steps ) self.assertListAlmostEqual( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , tol=1e-2 , msg=f"""failed for {scheduler_func} in normal scheduler""" , ) lowerCAmelCase :Dict = scheduler_func(self.optimizer , **SCREAMING_SNAKE_CASE__ ) if scheduler_func.__name__ != "get_constant_schedule": LambdaScheduleWrapper.wrap_scheduler(SCREAMING_SNAKE_CASE__ ) # wrap to test picklability of the schedule lowerCAmelCase :Optional[int] = unwrap_and_save_reload_schedule(SCREAMING_SNAKE_CASE__ , self.num_steps ) self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , msg=f"""failed for {scheduler_func} in save and reload""" ) class __UpperCamelCase : def __init__( self : Dict , UpperCAmelCase : str ) -> Optional[int]: lowerCAmelCase :str = fn def __call__( self : int , *UpperCAmelCase : Dict , **UpperCAmelCase : Union[str, Any] ) -> Dict: return self.fn(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) @classmethod def UpperCAmelCase__ ( self : str , UpperCAmelCase : str ) -> Optional[int]: lowerCAmelCase :List[Any] = list(map(self , scheduler.lr_lambdas ) )
553
import argparse import logging import os from datetime import datetime import numpy as np import torch from torch import nn from torch.utils.data import DataLoader, RandomSampler, TensorDataset from tqdm import tqdm from transformers import GPTaLMHeadModel SCREAMING_SNAKE_CASE__ = logging.getLogger(__name__) def UpperCAmelCase__ ( lowerCamelCase_ : str , lowerCamelCase_ : int ): # save results if os.path.exists(lowerCamelCase_ ): if os.path.exists(os.path.join(lowerCamelCase_ , 'config.json' ) ) and os.path.isfile( os.path.join(lowerCamelCase_ , 'config.json' ) ): os.remove(os.path.join(lowerCamelCase_ , 'config.json' ) ) if os.path.exists(os.path.join(lowerCamelCase_ , 'pytorch_model.bin' ) ) and os.path.isfile( os.path.join(lowerCamelCase_ , 'pytorch_model.bin' ) ): os.remove(os.path.join(lowerCamelCase_ , 'pytorch_model.bin' ) ) else: os.makedirs(lowerCamelCase_ ) model.save_pretrained(lowerCamelCase_ ) def UpperCAmelCase__ ( lowerCamelCase_ : int , lowerCamelCase_ : Any=False ): __a : Dict = 2 if unlogit: __a : Optional[Any] = torch.pow(lowerCamelCase_ , lowerCamelCase_ ) __a : Any = p * torch.log(lowerCamelCase_ ) __a : Union[str, Any] = 0 return -plogp.sum(dim=-1 ) def UpperCAmelCase__ ( lowerCamelCase_ : Any ): logger.info('lv, h >\t' + '\t'.join(f'''{x + 1}''' for x in range(len(lowerCamelCase_ ) ) ) ) for row in range(len(lowerCamelCase_ ) ): if tensor.dtype != torch.long: logger.info(f'''layer {row + 1}:\t''' + '\t'.join(f'''{x:.5f}''' for x in tensor[row].cpu().data ) ) else: logger.info(f'''layer {row + 1}:\t''' + '\t'.join(f'''{x:d}''' for x in tensor[row].cpu().data ) ) def UpperCAmelCase__ ( lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Any , lowerCamelCase_ : int , lowerCamelCase_ : int=True , lowerCamelCase_ : Optional[Any]=True , lowerCamelCase_ : List[Any]=None , lowerCamelCase_ : List[Any]=False ): __a , __a : Optional[int] = model.config.num_hidden_layers, model.config.num_attention_heads __a : str = torch.zeros(lowerCamelCase_ , lowerCamelCase_ ).to(args.device ) __a : int = torch.zeros(lowerCamelCase_ , lowerCamelCase_ ).to(args.device ) if head_mask is None: __a : Union[str, Any] = torch.ones(lowerCamelCase_ , lowerCamelCase_ ).to(args.device ) head_mask.requires_grad_(requires_grad=lowerCamelCase_ ) # If actually pruned attention multi-head, set head mask to None to avoid shape mismatch if actually_pruned: __a : Any = None __a : Optional[int] = 0.0 __a : Optional[Any] = 0.0 for step, inputs in enumerate(tqdm(lowerCamelCase_ , desc='Iteration' , disable=args.local_rank not in [-1, 0] ) ): __a : Dict = tuple(t.to(args.device ) for t in inputs ) ((__a) , ) : Dict = inputs # Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below) __a : List[Any] = model(lowerCamelCase_ , labels=lowerCamelCase_ , head_mask=lowerCamelCase_ ) # (loss), lm_logits, presents, (all hidden_states), (attentions) __a , __a , __a : int = ( outputs[0], outputs[1], outputs[-1], ) # Loss and logits are the first, attention the last loss.backward() # Backpropagate to populate the gradients in the head mask total_loss += loss.detach().cpu().numpy() if compute_entropy: for layer, attn in enumerate(lowerCamelCase_ ): __a : List[str] = entropy(attn.detach() , lowerCamelCase_ ) attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach() if compute_importance: head_importance += head_mask.grad.abs().detach() tot_tokens += torch.ones_like(lowerCamelCase_ ).float().detach().sum().data # Normalize attn_entropy /= tot_tokens head_importance /= tot_tokens # Layerwise importance normalization if not args.dont_normalize_importance_by_layer: __a : Optional[Any] = 2 __a : Union[str, Any] = torch.pow(torch.pow(lowerCamelCase_ , lowerCamelCase_ ).sum(-1 ) , 1 / exponent ) head_importance /= norm_by_layer.unsqueeze(-1 ) + 1e-20 if not args.dont_normalize_global_importance: __a : List[str] = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min()) # Print matrices if compute_entropy: logger.info('Attention entropies' ) print_ad_tensor(lowerCamelCase_ ) if compute_importance: logger.info('Head importance scores' ) print_ad_tensor(lowerCamelCase_ ) logger.info('Head ranked by importance scores' ) __a : Optional[Any] = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device ) __a : str = torch.arange( head_importance.numel() , device=args.device ) __a : Tuple = head_ranks.view_as(lowerCamelCase_ ) print_ad_tensor(lowerCamelCase_ ) return attn_entropy, head_importance, total_loss def UpperCAmelCase__ ( lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Tuple , lowerCamelCase_ : int ): __a , __a , __a : Optional[int] = compute_heads_importance(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , compute_entropy=lowerCamelCase_ ) __a : Tuple = 1 / loss # instead of downsteam score use the LM loss logger.info('Pruning: original score: %f, threshold: %f' , lowerCamelCase_ , original_score * args.masking_threshold ) __a : Tuple = torch.ones_like(lowerCamelCase_ ) __a : int = max(1 , int(new_head_mask.numel() * args.masking_amount ) ) __a : Tuple = original_score while current_score >= original_score * args.masking_threshold: __a : Optional[Any] = new_head_mask.clone().detach() # save current head mask # heads from least important to most - keep only not-masked heads __a : List[str] = float('Inf' ) __a : List[Any] = head_importance.view(-1 ).sort()[1] if len(lowerCamelCase_ ) <= num_to_mask: print('BREAK BY num_to_mask' ) break # mask heads __a : Any = current_heads_to_mask[:num_to_mask] logger.info('Heads to mask: %s' , str(current_heads_to_mask.tolist() ) ) __a : int = new_head_mask.view(-1 ) __a : Tuple = 0.0 __a : int = new_head_mask.view_as(lowerCamelCase_ ) __a : Optional[int] = new_head_mask.clone().detach() print_ad_tensor(lowerCamelCase_ ) # Compute metric and head importance again __a , __a , __a : int = compute_heads_importance( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , compute_entropy=lowerCamelCase_ , head_mask=lowerCamelCase_ ) __a : List[Any] = 1 / loss logger.info( 'Masking: current score: %f, remaining heads %d (%.1f percents)' , lowerCamelCase_ , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 1_0_0 , ) logger.info('Final head mask' ) print_ad_tensor(lowerCamelCase_ ) np.save(os.path.join(args.output_dir , 'head_mask.npy' ) , head_mask.detach().cpu().numpy() ) return head_mask def UpperCAmelCase__ ( lowerCamelCase_ : Dict , lowerCamelCase_ : Tuple , lowerCamelCase_ : Any , lowerCamelCase_ : Union[str, Any] ): __a : List[Any] = datetime.now() __a , __a , __a : List[str] = compute_heads_importance( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , compute_entropy=lowerCamelCase_ , compute_importance=lowerCamelCase_ , head_mask=lowerCamelCase_ ) __a : List[str] = 1 / loss __a : List[Any] = datetime.now() - before_time __a : List[str] = sum(p.numel() for p in model.parameters() ) __a : Dict = { layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(lowerCamelCase_ ) ) } for k, v in heads_to_prune.items(): if isinstance(lowerCamelCase_ , lowerCamelCase_ ): __a : Tuple = [ v, ] assert sum(len(lowerCamelCase_ ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item() model.prune_heads(lowerCamelCase_ ) __a : Optional[Any] = sum(p.numel() for p in model.parameters() ) __a : Tuple = datetime.now() __a , __a , __a : Tuple = compute_heads_importance( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , compute_entropy=lowerCamelCase_ , compute_importance=lowerCamelCase_ , head_mask=lowerCamelCase_ , actually_pruned=lowerCamelCase_ , ) __a : Optional[Any] = 1 / loss __a : List[Any] = datetime.now() - before_time logger.info( 'Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)' , lowerCamelCase_ , lowerCamelCase_ , pruned_num_params / original_num_params * 1_0_0 , ) logger.info('Pruning: score with masking: %f score with pruning: %f' , lowerCamelCase_ , lowerCamelCase_ ) logger.info('Pruning: speed ratio (original timing / new timing): %f percents' , original_time / new_time * 1_0_0 ) save_model(lowerCamelCase_ , args.output_dir ) def UpperCAmelCase__ ( ): __a : List[str] = argparse.ArgumentParser() # Required parameters parser.add_argument( '--data_dir' , default=lowerCamelCase_ , type=lowerCamelCase_ , required=lowerCamelCase_ , help='The input data dir. Should contain the .tsv files (or other data files) for the task.' , ) parser.add_argument( '--model_name_or_path' , default=lowerCamelCase_ , type=lowerCamelCase_ , required=lowerCamelCase_ , help='Path to pretrained model or model identifier from huggingface.co/models' , ) parser.add_argument( '--output_dir' , default=lowerCamelCase_ , type=lowerCamelCase_ , required=lowerCamelCase_ , help='The output directory where the model predictions and checkpoints will be written.' , ) # Other parameters parser.add_argument( '--config_name' , default='' , type=lowerCamelCase_ , help='Pretrained config name or path if not the same as model_name_or_path' , ) parser.add_argument( '--tokenizer_name' , default='' , type=lowerCamelCase_ , help='Pretrained tokenizer name or path if not the same as model_name_or_path' , ) parser.add_argument( '--cache_dir' , default=lowerCamelCase_ , type=lowerCamelCase_ , help='Where do you want to store the pre-trained models downloaded from s3' , ) parser.add_argument( '--data_subset' , type=lowerCamelCase_ , default=-1 , help='If > 0: limit the data to a subset of data_subset instances.' ) parser.add_argument( '--overwrite_output_dir' , action='store_true' , help='Whether to overwrite data in output directory' ) parser.add_argument( '--overwrite_cache' , action='store_true' , help='Overwrite the cached training and evaluation sets' ) parser.add_argument( '--dont_normalize_importance_by_layer' , action='store_true' , help='Don\'t normalize importance score by layers' ) parser.add_argument( '--dont_normalize_global_importance' , action='store_true' , help='Don\'t normalize all importance scores between 0 and 1' , ) parser.add_argument( '--try_masking' , action='store_true' , help='Whether to try to mask head until a threshold of accuracy.' ) parser.add_argument( '--masking_threshold' , default=0.9 , type=lowerCamelCase_ , help='masking threshold in term of metrics (stop masking when metric < threshold * original metric value).' , ) parser.add_argument( '--masking_amount' , default=0.1 , type=lowerCamelCase_ , help='Amount to heads to masking at each masking step.' ) parser.add_argument('--metric_name' , default='acc' , type=lowerCamelCase_ , help='Metric to use for head masking.' ) parser.add_argument( '--max_seq_length' , default=1_2_8 , type=lowerCamelCase_ , help=( 'The maximum total input sequence length after WordPiece tokenization. \n' 'Sequences longer than this will be truncated, sequences shorter padded.' ) , ) parser.add_argument('--batch_size' , default=1 , type=lowerCamelCase_ , help='Batch size.' ) parser.add_argument('--seed' , type=lowerCamelCase_ , default=4_2 ) parser.add_argument('--local_rank' , type=lowerCamelCase_ , default=-1 , help='local_rank for distributed training on gpus' ) parser.add_argument('--no_cuda' , action='store_true' , help='Whether not to use CUDA when available' ) parser.add_argument('--server_ip' , type=lowerCamelCase_ , default='' , help='Can be used for distant debugging.' ) parser.add_argument('--server_port' , type=lowerCamelCase_ , default='' , help='Can be used for distant debugging.' ) __a : Optional[Any] = parser.parse_args() if args.server_ip and args.server_port: # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script import ptvsd print('Waiting for debugger attach' ) ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=lowerCamelCase_ ) ptvsd.wait_for_attach() # Setup devices and distributed training if args.local_rank == -1 or args.no_cuda: __a : List[str] = torch.device('cuda' if torch.cuda.is_available() and not args.no_cuda else 'cpu' ) __a : Tuple = 0 if args.no_cuda else torch.cuda.device_count() else: torch.cuda.set_device(args.local_rank ) __a : Union[str, Any] = torch.device('cuda' , args.local_rank ) __a : Any = 1 torch.distributed.init_process_group(backend='nccl' ) # Initializes the distributed backend # Setup logging logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN ) logger.info('device: {} n_gpu: {}, distributed: {}'.format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) ) __a : Optional[Any] = GPTaLMHeadModel.from_pretrained(args.model_name_or_path ) # Distributed and parallel training model.to(args.device ) if args.local_rank != -1: __a : List[Any] = nn.parallel.DistributedDataParallel( lowerCamelCase_ , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=lowerCamelCase_ ) elif args.n_gpu > 1: __a : Union[str, Any] = nn.DataParallel(lowerCamelCase_ ) # Print/save training arguments os.makedirs(args.output_dir , exist_ok=lowerCamelCase_ ) torch.save(lowerCamelCase_ , os.path.join(args.output_dir , 'run_args.bin' ) ) logger.info('Training/evaluation parameters %s' , lowerCamelCase_ ) # Prepare dataset __a : Tuple = np.concatenate( [ np.loadtxt(args.data_dir , dtype=np.intaa ), ] ) __a : str = (torch.from_numpy(lowerCamelCase_ ),) __a : List[str] = TensorDataset(*lowerCamelCase_ ) __a : Optional[Any] = RandomSampler(lowerCamelCase_ ) __a : Union[str, Any] = DataLoader(lowerCamelCase_ , sampler=lowerCamelCase_ , batch_size=args.batch_size ) # Compute head entropy and importance score compute_heads_importance(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) # Try head masking (set heads to zero until the score goes under a threshole) # and head pruning (remove masked heads and see the effect on the network) if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0: __a : Union[str, Any] = mask_heads(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) prune_heads(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) if __name__ == "__main__": main()
47
0
'''simple docstring''' import heapq as hq import math from collections.abc import Iterator class __lowerCAmelCase : def __init__(self , lowerCAmelCase__ ): _UpperCAmelCase : str = str(id_ ) _UpperCAmelCase : Optional[int] = None _UpperCAmelCase : Any = None _UpperCAmelCase : List[Any] = [] _UpperCAmelCase : Dict = {} # {vertex:distance} def __lt__(self , lowerCAmelCase__ ): return self.key < other.key def __repr__(self ): return self.id def snake_case_ (self , lowerCAmelCase__ ): self.neighbors.append(SCREAMING_SNAKE_CASE__ ) def snake_case_ (self , lowerCAmelCase__ , lowerCAmelCase__ ): _UpperCAmelCase : Any = weight def __A ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): # add the neighbors: graph[a - 1].add_neighbor(graph[b - 1] ) graph[b - 1].add_neighbor(graph[a - 1] ) # add the edges: graph[a - 1].add_edge(graph[b - 1] , lowerCamelCase_ ) graph[b - 1].add_edge(graph[a - 1] , lowerCamelCase_ ) def __A ( lowerCAmelCase_ , lowerCAmelCase_ ): _UpperCAmelCase : Optional[int] = [] for u in graph: _UpperCAmelCase : Union[str, Any] = math.inf _UpperCAmelCase : Optional[int] = None _UpperCAmelCase : List[Any] = 0 _UpperCAmelCase : Union[str, Any] = graph[:] while q: _UpperCAmelCase : Optional[Any] = min(lowerCamelCase_ ) q.remove(lowerCamelCase_ ) for v in u.neighbors: if (v in q) and (u.edges[v.id] < v.key): _UpperCAmelCase : Dict = u _UpperCAmelCase : Dict = u.edges[v.id] for i in range(1 , len(lowerCamelCase_ ) ): a.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) ) return a def __A ( lowerCAmelCase_ , lowerCAmelCase_ ): for u in graph: _UpperCAmelCase : Optional[int] = math.inf _UpperCAmelCase : Dict = None _UpperCAmelCase : Tuple = 0 _UpperCAmelCase : Union[str, Any] = list(lowerCamelCase_ ) hq.heapify(lowerCamelCase_ ) while h: _UpperCAmelCase : int = hq.heappop(lowerCamelCase_ ) for v in u.neighbors: if (v in h) and (u.edges[v.id] < v.key): _UpperCAmelCase : List[str] = u _UpperCAmelCase : Optional[Any] = u.edges[v.id] hq.heapify(lowerCamelCase_ ) for i in range(1 , len(lowerCamelCase_ ) ): yield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) def __A ( ): pass if __name__ == "__main__": import doctest doctest.testmod()
414
import argparse import os import gluonnlp as nlp import mxnet as mx import numpy as np import torch from gluonnlp.base import get_home_dir from gluonnlp.model.bert import BERTEncoder from gluonnlp.model.utils import _load_vocab from gluonnlp.vocab import Vocab from packaging import version from torch import nn from transformers import BertConfig, BertForMaskedLM, BertModel, RobertaTokenizer from transformers.models.bert.modeling_bert import ( BertIntermediate, BertLayer, BertOutput, BertSelfAttention, BertSelfOutput, ) from transformers.utils import logging if version.parse(nlp.__version__) != version.parse('''0.8.3'''): raise Exception('''requires gluonnlp == 0.8.3''') if version.parse(mx.__version__) != version.parse('''1.5.0'''): raise Exception('''requires mxnet == 1.5.0''') logging.set_verbosity_info() SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ = '''The Nymphenburg Palace is a beautiful palace in Munich!''' def UpperCAmelCase__ ( lowerCamelCase_ : str , lowerCamelCase_ : str ): __a : List[Any] = { 'attention_cell': 'multi_head', 'num_layers': 4, 'units': 1_0_2_4, 'hidden_size': 7_6_8, 'max_length': 5_1_2, 'num_heads': 8, 'scaled': True, 'dropout': 0.1, 'use_residual': True, 'embed_size': 1_0_2_4, 'embed_dropout': 0.1, 'word_embed': None, 'layer_norm_eps': 1e-5, 'token_type_vocab_size': 2, } __a : Optional[int] = bort_4_8_768_1024_hparams # Let's construct the original Bort model here # Taken from official BERT implementation, see: # https://github.com/alexa/bort/blob/master/bort/bort.py __a : List[str] = BERTEncoder( attention_cell=predefined_args['attention_cell'] , num_layers=predefined_args['num_layers'] , units=predefined_args['units'] , hidden_size=predefined_args['hidden_size'] , max_length=predefined_args['max_length'] , num_heads=predefined_args['num_heads'] , scaled=predefined_args['scaled'] , dropout=predefined_args['dropout'] , output_attention=lowerCamelCase_ , output_all_encodings=lowerCamelCase_ , use_residual=predefined_args['use_residual'] , activation=predefined_args.get('activation' , 'gelu' ) , layer_norm_eps=predefined_args.get('layer_norm_eps' , lowerCamelCase_ ) , ) # Vocab information needs to be fetched first # It's the same as RoBERTa, so RobertaTokenizer can be used later __a : int = 'openwebtext_ccnews_stories_books_cased' # Specify download folder to Gluonnlp's vocab __a : Optional[Any] = os.path.join(get_home_dir() , 'models' ) __a : Optional[Any] = _load_vocab(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , cls=lowerCamelCase_ ) __a : Any = nlp.model.BERTModel( lowerCamelCase_ , len(lowerCamelCase_ ) , units=predefined_args['units'] , embed_size=predefined_args['embed_size'] , embed_dropout=predefined_args['embed_dropout'] , word_embed=predefined_args['word_embed'] , use_pooler=lowerCamelCase_ , use_token_type_embed=lowerCamelCase_ , token_type_vocab_size=predefined_args['token_type_vocab_size'] , use_classifier=lowerCamelCase_ , use_decoder=lowerCamelCase_ , ) original_bort.load_parameters(lowerCamelCase_ , cast_dtype=lowerCamelCase_ , ignore_extra=lowerCamelCase_ ) __a : Dict = original_bort._collect_params_with_prefix() # Build our config 🤗 __a : Optional[Any] = { 'architectures': ['BertForMaskedLM'], 'attention_probs_dropout_prob': predefined_args['dropout'], 'hidden_act': 'gelu', 'hidden_dropout_prob': predefined_args['dropout'], 'hidden_size': predefined_args['embed_size'], 'initializer_range': 0.02, 'intermediate_size': predefined_args['hidden_size'], 'layer_norm_eps': predefined_args['layer_norm_eps'], 'max_position_embeddings': predefined_args['max_length'], 'model_type': 'bort', 'num_attention_heads': predefined_args['num_heads'], 'num_hidden_layers': predefined_args['num_layers'], 'pad_token_id': 1, # 2 = BERT, 1 = RoBERTa 'type_vocab_size': 1, # 2 = BERT, 1 = RoBERTa 'vocab_size': len(lowerCamelCase_ ), } __a : str = BertConfig.from_dict(lowerCamelCase_ ) __a : Optional[int] = BertForMaskedLM(lowerCamelCase_ ) hf_bort_model.eval() # Parameter mapping table (Gluonnlp to Transformers) # * denotes layer index # # | Gluon Parameter | Transformers Parameter # | -------------------------------------------------------------- | ---------------------- # | `encoder.layer_norm.beta` | `bert.embeddings.LayerNorm.bias` # | `encoder.layer_norm.gamma` | `bert.embeddings.LayerNorm.weight` # | `encoder.position_weight` | `bert.embeddings.position_embeddings.weight` # | `word_embed.0.weight` | `bert.embeddings.word_embeddings.weight` # | `encoder.transformer_cells.*.attention_cell.proj_key.bias` | `bert.encoder.layer.*.attention.self.key.bias` # | `encoder.transformer_cells.*.attention_cell.proj_key.weight` | `bert.encoder.layer.*.attention.self.key.weight` # | `encoder.transformer_cells.*.attention_cell.proj_query.bias` | `bert.encoder.layer.*.attention.self.query.bias` # | `encoder.transformer_cells.*.attention_cell.proj_query.weight` | `bert.encoder.layer.*.attention.self.query.weight` # | `encoder.transformer_cells.*.attention_cell.proj_value.bias` | `bert.encoder.layer.*.attention.self.value.bias` # | `encoder.transformer_cells.*.attention_cell.proj_value.weight` | `bert.encoder.layer.*.attention.self.value.weight` # | `encoder.transformer_cells.*.ffn.ffn_2.bias` | `bert.encoder.layer.*.attention.output.dense.bias` # | `encoder.transformer_cells.*.ffn.ffn_2.weight` | `bert.encoder.layer.*.attention.output.dense.weight` # | `encoder.transformer_cells.*.layer_norm.beta` | `bert.encoder.layer.*.attention.output.LayerNorm.bias` # | `encoder.transformer_cells.*.layer_norm.gamma` | `bert.encoder.layer.*.attention.output.LayerNorm.weight` # | `encoder.transformer_cells.*.ffn.ffn_1.bias` | `bert.encoder.layer.*.intermediate.dense.bias` # | `encoder.transformer_cells.*.ffn.ffn_1.weight` | `bert.encoder.layer.*.intermediate.dense.weight` # | `encoder.transformer_cells.*.ffn.layer_norm.beta` | `bert.encoder.layer.*.output.LayerNorm.bias` # | `encoder.transformer_cells.*.ffn.layer_norm.gamma` | `bert.encoder.layer.*.output.LayerNorm.weight` # | `encoder.transformer_cells.*.proj.bias` | `bert.encoder.layer.*.output.dense.bias` # | `encoder.transformer_cells.*.proj.weight` | `bert.encoder.layer.*.output.dense.weight` # Helper function to convert MXNET Arrays to PyTorch def to_torch(lowerCamelCase_ : Optional[Any] ) -> nn.Parameter: return nn.Parameter(torch.FloatTensor(mx_array.data().asnumpy() ) ) # Check param shapes and map new HF param back def check_and_map_params(lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : List[str] ): __a : Optional[int] = hf_param.shape __a : int = to_torch(params[gluon_param] ) __a : int = gluon_param.shape assert ( shape_hf == shape_gluon ), f'''The gluon parameter {gluon_param} has shape {shape_gluon}, but expects shape {shape_hf} for Transformers''' return gluon_param __a : str = check_and_map_params( hf_bort_model.bert.embeddings.word_embeddings.weight , 'word_embed.0.weight' ) __a : str = check_and_map_params( hf_bort_model.bert.embeddings.position_embeddings.weight , 'encoder.position_weight' ) __a : Tuple = check_and_map_params( hf_bort_model.bert.embeddings.LayerNorm.bias , 'encoder.layer_norm.beta' ) __a : Union[str, Any] = check_and_map_params( hf_bort_model.bert.embeddings.LayerNorm.weight , 'encoder.layer_norm.gamma' ) # Inspired by RoBERTa conversion script, we just zero them out (Bort does not use them) __a : Union[str, Any] = torch.zeros_like( hf_bort_model.bert.embeddings.token_type_embeddings.weight.data ) for i in range(hf_bort_config.num_hidden_layers ): __a : BertLayer = hf_bort_model.bert.encoder.layer[i] # self attention __a : BertSelfAttention = layer.attention.self __a : Optional[int] = check_and_map_params( self_attn.key.bias.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_key.bias''' ) __a : str = check_and_map_params( self_attn.key.weight.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_key.weight''' ) __a : List[str] = check_and_map_params( self_attn.query.bias.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_query.bias''' ) __a : str = check_and_map_params( self_attn.query.weight.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_query.weight''' ) __a : Dict = check_and_map_params( self_attn.value.bias.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_value.bias''' ) __a : str = check_and_map_params( self_attn.value.weight.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_value.weight''' ) # self attention output __a : BertSelfOutput = layer.attention.output __a : Tuple = check_and_map_params( self_output.dense.bias , f'''encoder.transformer_cells.{i}.proj.bias''' ) __a : Dict = check_and_map_params( self_output.dense.weight , f'''encoder.transformer_cells.{i}.proj.weight''' ) __a : Optional[Any] = check_and_map_params( self_output.LayerNorm.bias , f'''encoder.transformer_cells.{i}.layer_norm.beta''' ) __a : Optional[Any] = check_and_map_params( self_output.LayerNorm.weight , f'''encoder.transformer_cells.{i}.layer_norm.gamma''' ) # intermediate __a : BertIntermediate = layer.intermediate __a : List[str] = check_and_map_params( intermediate.dense.bias , f'''encoder.transformer_cells.{i}.ffn.ffn_1.bias''' ) __a : Optional[Any] = check_and_map_params( intermediate.dense.weight , f'''encoder.transformer_cells.{i}.ffn.ffn_1.weight''' ) # output __a : BertOutput = layer.output __a : str = check_and_map_params( bert_output.dense.bias , f'''encoder.transformer_cells.{i}.ffn.ffn_2.bias''' ) __a : List[Any] = check_and_map_params( bert_output.dense.weight , f'''encoder.transformer_cells.{i}.ffn.ffn_2.weight''' ) __a : str = check_and_map_params( bert_output.LayerNorm.bias , f'''encoder.transformer_cells.{i}.ffn.layer_norm.beta''' ) __a : List[str] = check_and_map_params( bert_output.LayerNorm.weight , f'''encoder.transformer_cells.{i}.ffn.layer_norm.gamma''' ) # Save space and energy 🎄 hf_bort_model.half() # Compare output of both models __a : Union[str, Any] = RobertaTokenizer.from_pretrained('roberta-base' ) __a : Union[str, Any] = tokenizer.encode_plus(lowerCamelCase_ )['input_ids'] # Get gluon output __a : Optional[int] = mx.nd.array([input_ids] ) __a : Tuple = original_bort(inputs=lowerCamelCase_ , token_types=[] ) # Get Transformer output (save and reload model again) hf_bort_model.save_pretrained(lowerCamelCase_ ) __a : Optional[Any] = BertModel.from_pretrained(lowerCamelCase_ ) hf_bort_model.eval() __a : Union[str, Any] = tokenizer.encode_plus(lowerCamelCase_ , return_tensors='pt' ) __a : int = hf_bort_model(**lowerCamelCase_ )[0] __a : Dict = output_gluon[0].asnumpy() __a : str = output_hf[0].detach().numpy() __a : List[Any] = np.max(np.abs(hf_layer - gluon_layer ) ).item() __a : str = np.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1e-3 ) if success: print('✔️ Both model do output the same tensors' ) else: print('❌ Both model do **NOT** output the same tensors' ) print('Absolute difference is:' , lowerCamelCase_ ) if __name__ == "__main__": SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--bort_checkpoint_path''', default=None, type=str, required=True, help='''Path the official Bort params file.''' ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) SCREAMING_SNAKE_CASE__ = parser.parse_args() convert_bort_checkpoint_to_pytorch(args.bort_checkpoint_path, args.pytorch_dump_folder_path)
47
0
'''simple docstring''' import re import string import numpy as np import datasets _UpperCAmelCase : Tuple = ''' Returns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list. ''' _UpperCAmelCase : List[str] = ''' Args: predictions: List of predicted texts. references: List of reference texts. regexes_to_ignore: List, defaults to None. Regex expressions of characters to ignore when calculating the exact matches. Note: these regexes are removed from the input data before the changes based on the options below (e.g. ignore_case, ignore_punctuation, ignore_numbers) are applied. ignore_case: Boolean, defaults to False. If true, turns everything to lowercase so that capitalization differences are ignored. ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before comparing predictions and references. ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before comparing predictions and references. Returns: exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive. Examples: >>> exact_match = datasets.load_metric("exact_match") >>> refs = ["the cat", "theater", "YELLING", "agent007"] >>> preds = ["cat?", "theater", "yelling", "agent"] >>> results = exact_match.compute(references=refs, predictions=preds) >>> print(round(results["exact_match"], 1)) 25.0 >>> exact_match = datasets.load_metric("exact_match") >>> refs = ["the cat", "theater", "YELLING", "agent007"] >>> preds = ["cat?", "theater", "yelling", "agent"] >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell"], ignore_case=True, ignore_punctuation=True) >>> print(round(results["exact_match"], 1)) 50.0 >>> exact_match = datasets.load_metric("exact_match") >>> refs = ["the cat", "theater", "YELLING", "agent007"] >>> preds = ["cat?", "theater", "yelling", "agent"] >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True) >>> print(round(results["exact_match"], 1)) 75.0 >>> exact_match = datasets.load_metric("exact_match") >>> refs = ["the cat", "theater", "YELLING", "agent007"] >>> preds = ["cat?", "theater", "yelling", "agent"] >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True) >>> print(round(results["exact_match"], 1)) 100.0 >>> exact_match = datasets.load_metric("exact_match") >>> refs = ["The cat sat on the mat.", "Theaters are great.", "It\'s like comparing oranges and apples."] >>> preds = ["The cat sat on the mat?", "Theaters are great.", "It\'s like comparing apples and oranges."] >>> results = exact_match.compute(references=refs, predictions=preds) >>> print(round(results["exact_match"], 1)) 33.3 ''' _UpperCAmelCase : List[str] = ''' ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowercase_ ( datasets.Metric ): """simple docstring""" def __UpperCAmelCase ( self : Dict ) -> Union[str, Any]: return datasets.MetricInfo( description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features( { 'predictions': datasets.Value('string', id='sequence' ), 'references': datasets.Value('string', id='sequence' ), } ), reference_urls=[], ) def __UpperCAmelCase ( self : Union[str, Any], UpperCamelCase__ : int, UpperCamelCase__ : Tuple, UpperCamelCase__ : str=None, UpperCamelCase__ : List[Any]=False, UpperCamelCase__ : Optional[int]=False, UpperCamelCase__ : List[str]=False, ) -> List[Any]: if regexes_to_ignore is not None: for s in regexes_to_ignore: _A = np.array([re.sub(SCREAMING_SNAKE_CASE__, '', SCREAMING_SNAKE_CASE__ ) for x in predictions] ) _A = np.array([re.sub(SCREAMING_SNAKE_CASE__, '', SCREAMING_SNAKE_CASE__ ) for x in references] ) else: _A = np.asarray(SCREAMING_SNAKE_CASE__ ) _A = np.asarray(SCREAMING_SNAKE_CASE__ ) if ignore_case: _A = np.char.lower(SCREAMING_SNAKE_CASE__ ) _A = np.char.lower(SCREAMING_SNAKE_CASE__ ) if ignore_punctuation: _A = string.punctuation.maketrans('', '', string.punctuation ) _A = np.char.translate(SCREAMING_SNAKE_CASE__, table=SCREAMING_SNAKE_CASE__ ) _A = np.char.translate(SCREAMING_SNAKE_CASE__, table=SCREAMING_SNAKE_CASE__ ) if ignore_numbers: _A = string.digits.maketrans('', '', string.digits ) _A = np.char.translate(SCREAMING_SNAKE_CASE__, table=SCREAMING_SNAKE_CASE__ ) _A = np.char.translate(SCREAMING_SNAKE_CASE__, table=SCREAMING_SNAKE_CASE__ ) _A = predictions == references return {"exact_match": np.mean(SCREAMING_SNAKE_CASE__ ) * 1_00}
107
def UpperCAmelCase__ ( lowerCamelCase_ : Optional[int] , lowerCamelCase_ : List[str] ): __a : Any = '' for i in table: res += inp[i - 1] return res def UpperCAmelCase__ ( lowerCamelCase_ : Optional[Any] ): return data[1:] + data[0] def UpperCAmelCase__ ( lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Optional[int] ): __a : Optional[int] = '' for i in range(len(lowerCamelCase_ ) ): if a[i] == b[i]: res += "0" else: res += "1" return res def UpperCAmelCase__ ( lowerCamelCase_ : List[Any] , lowerCamelCase_ : str ): __a : List[str] = int('0b' + data[0] + data[-1] , 2 ) __a : List[str] = int('0b' + data[1:3] , 2 ) return bin(s[row][col] )[2:] def UpperCAmelCase__ ( lowerCamelCase_ : Any , lowerCamelCase_ : List[str] , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : Optional[Any] ): __a : List[Any] = message[:4] __a : str = message[4:] __a : Any = apply_table(lowerCamelCase_ , lowerCamelCase_ ) __a : int = xor(lowerCamelCase_ , lowerCamelCase_ ) __a : Dict = apply_sbox(lowerCamelCase_ , temp[:4] ) # noqa: E741 __a : Tuple = apply_sbox(lowerCamelCase_ , temp[4:] ) __a : List[Any] = '0' * (2 - len(lowerCamelCase_ )) + l # noqa: E741 __a : List[str] = '0' * (2 - len(lowerCamelCase_ )) + r __a : List[Any] = apply_table(l + r , lowerCamelCase_ ) __a : Dict = xor(lowerCamelCase_ , lowerCamelCase_ ) return temp + right if __name__ == "__main__": SCREAMING_SNAKE_CASE__ = input('''Enter 10 bit key: ''') SCREAMING_SNAKE_CASE__ = input('''Enter 8 bit message: ''') SCREAMING_SNAKE_CASE__ = [6, 3, 7, 4, 8, 5, 10, 9] SCREAMING_SNAKE_CASE__ = [3, 5, 2, 7, 4, 10, 1, 9, 8, 6] SCREAMING_SNAKE_CASE__ = [2, 4, 3, 1] SCREAMING_SNAKE_CASE__ = [2, 6, 3, 1, 4, 8, 5, 7] SCREAMING_SNAKE_CASE__ = [4, 1, 3, 5, 7, 2, 8, 6] SCREAMING_SNAKE_CASE__ = [4, 1, 2, 3, 2, 3, 4, 1] SCREAMING_SNAKE_CASE__ = [[1, 0, 3, 2], [3, 2, 1, 0], [0, 2, 1, 3], [3, 1, 3, 2]] SCREAMING_SNAKE_CASE__ = [[0, 1, 2, 3], [2, 0, 1, 3], [3, 0, 1, 0], [2, 1, 0, 3]] # key generation SCREAMING_SNAKE_CASE__ = apply_table(key, paa_table) SCREAMING_SNAKE_CASE__ = temp[:5] SCREAMING_SNAKE_CASE__ = temp[5:] SCREAMING_SNAKE_CASE__ = left_shift(left) SCREAMING_SNAKE_CASE__ = left_shift(right) SCREAMING_SNAKE_CASE__ = apply_table(left + right, pa_table) SCREAMING_SNAKE_CASE__ = left_shift(left) SCREAMING_SNAKE_CASE__ = left_shift(right) SCREAMING_SNAKE_CASE__ = left_shift(left) SCREAMING_SNAKE_CASE__ = left_shift(right) SCREAMING_SNAKE_CASE__ = apply_table(left + right, pa_table) # encryption SCREAMING_SNAKE_CASE__ = apply_table(message, IP) SCREAMING_SNAKE_CASE__ = function(expansion, sa, sa, keya, temp) SCREAMING_SNAKE_CASE__ = temp[4:] + temp[:4] SCREAMING_SNAKE_CASE__ = function(expansion, sa, sa, keya, temp) SCREAMING_SNAKE_CASE__ = apply_table(temp, IP_inv) print('''Cipher text is:''', CT) # decryption SCREAMING_SNAKE_CASE__ = apply_table(CT, IP) SCREAMING_SNAKE_CASE__ = function(expansion, sa, sa, keya, temp) SCREAMING_SNAKE_CASE__ = temp[4:] + temp[:4] SCREAMING_SNAKE_CASE__ = function(expansion, sa, sa, keya, temp) SCREAMING_SNAKE_CASE__ = apply_table(temp, IP_inv) print('''Plain text after decypting is:''', PT)
47
0
'''simple docstring''' import argparse import csv import logging import os import random import numpy as np import torch from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset from tqdm import tqdm, trange from transformers import ( CONFIG_NAME, WEIGHTS_NAME, AdamW, OpenAIGPTDoubleHeadsModel, OpenAIGPTTokenizer, get_linear_schedule_with_warmup, ) logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", level=logging.INFO ) lowerCAmelCase : Dict = logging.getLogger(__name__) def _A ( A ,A ) -> List[str]: lowercase : Any = np.argmax(lowerCamelCase_ ,axis=1 ) return np.sum(outputs == labels ) def _A ( A ) -> Tuple: with open(lowerCamelCase_ ,encoding="utf_8" ) as f: lowercase : Optional[Any] = csv.reader(lowerCamelCase_ ) lowercase : Optional[int] = [] next(lowerCamelCase_ ) # skip the first line for line in tqdm(lowerCamelCase_ ): output.append((" ".join(line[1:5] ), line[5], line[6], int(line[-1] ) - 1) ) return output def _A ( A ,A ,A ,A ,A ,A ) -> Tuple: lowercase : int = [] for dataset in encoded_datasets: lowercase : List[Any] = len(lowerCamelCase_ ) lowercase : Optional[Any] = np.zeros((n_batch, 2, input_len) ,dtype=np.intaa ) lowercase : Optional[int] = np.zeros((n_batch, 2) ,dtype=np.intaa ) lowercase : str = np.full((n_batch, 2, input_len) ,fill_value=-1_0_0 ,dtype=np.intaa ) lowercase : List[Any] = np.zeros((n_batch,) ,dtype=np.intaa ) for ( i, (story, conta, conta, mc_label), ) in enumerate(lowerCamelCase_ ): lowercase : str = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token] lowercase : int = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token] lowercase : List[str] = with_conta lowercase : Optional[Any] = with_conta lowercase : Any = len(lowerCamelCase_ ) - 1 lowercase : Any = len(lowerCamelCase_ ) - 1 lowercase : Optional[int] = with_conta lowercase : Tuple = with_conta lowercase : Optional[int] = mc_label lowercase : Union[str, Any] = (input_ids, mc_token_ids, lm_labels, mc_labels) tensor_datasets.append(tuple(torch.tensor(lowerCamelCase_ ) for t in all_inputs ) ) return tensor_datasets def _A ( ) -> Dict: lowercase : List[str] = argparse.ArgumentParser() parser.add_argument("--model_name" ,type=lowerCamelCase_ ,default="openai-gpt" ,help="pretrained model name" ) parser.add_argument("--do_train" ,action="store_true" ,help="Whether to run training." ) parser.add_argument("--do_eval" ,action="store_true" ,help="Whether to run eval on the dev set." ) parser.add_argument( "--output_dir" ,default=lowerCamelCase_ ,type=lowerCamelCase_ ,required=lowerCamelCase_ ,help="The output directory where the model predictions and checkpoints will be written." ,) parser.add_argument("--train_dataset" ,type=lowerCamelCase_ ,default="" ) parser.add_argument("--eval_dataset" ,type=lowerCamelCase_ ,default="" ) parser.add_argument("--seed" ,type=lowerCamelCase_ ,default=4_2 ) parser.add_argument("--num_train_epochs" ,type=lowerCamelCase_ ,default=3 ) parser.add_argument("--train_batch_size" ,type=lowerCamelCase_ ,default=8 ) parser.add_argument("--eval_batch_size" ,type=lowerCamelCase_ ,default=1_6 ) parser.add_argument("--adam_epsilon" ,default=1e-8 ,type=lowerCamelCase_ ,help="Epsilon for Adam optimizer." ) parser.add_argument("--max_grad_norm" ,type=lowerCamelCase_ ,default=1 ) parser.add_argument( "--max_steps" ,default=-1 ,type=lowerCamelCase_ ,help=( "If > 0: set total number of training steps to perform. Override num_train_epochs." ) ,) parser.add_argument( "--gradient_accumulation_steps" ,type=lowerCamelCase_ ,default=1 ,help="Number of updates steps to accumulate before performing a backward/update pass." ,) parser.add_argument("--learning_rate" ,type=lowerCamelCase_ ,default=6.25e-5 ) parser.add_argument("--warmup_steps" ,default=0 ,type=lowerCamelCase_ ,help="Linear warmup over warmup_steps." ) parser.add_argument("--lr_schedule" ,type=lowerCamelCase_ ,default="warmup_linear" ) parser.add_argument("--weight_decay" ,type=lowerCamelCase_ ,default=0.01 ) parser.add_argument("--lm_coef" ,type=lowerCamelCase_ ,default=0.9 ) parser.add_argument("--n_valid" ,type=lowerCamelCase_ ,default=3_7_4 ) parser.add_argument("--server_ip" ,type=lowerCamelCase_ ,default="" ,help="Can be used for distant debugging." ) parser.add_argument("--server_port" ,type=lowerCamelCase_ ,default="" ,help="Can be used for distant debugging." ) lowercase : int = parser.parse_args() print(lowerCamelCase_ ) if args.server_ip and args.server_port: # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script import ptvsd print("Waiting for debugger attach" ) ptvsd.enable_attach(address=(args.server_ip, args.server_port) ,redirect_output=lowerCamelCase_ ) ptvsd.wait_for_attach() random.seed(args.seed ) np.random.seed(args.seed ) torch.manual_seed(args.seed ) torch.cuda.manual_seed_all(args.seed ) lowercase : Any = torch.device("cuda" if torch.cuda.is_available() else "cpu" ) lowercase : Union[str, Any] = torch.cuda.device_count() logger.info("device: {}, n_gpu {}".format(lowerCamelCase_ ,lowerCamelCase_ ) ) if not args.do_train and not args.do_eval: raise ValueError("At least one of `do_train` or `do_eval` must be True." ) if not os.path.exists(args.output_dir ): os.makedirs(args.output_dir ) # Load tokenizer and model # This loading functions also add new tokens and embeddings called `special tokens` # These new embeddings will be fine-tuned on the RocStories dataset lowercase : int = ['_start_', '_delimiter_', '_classify_'] lowercase : Optional[int] = OpenAIGPTTokenizer.from_pretrained(args.model_name ) tokenizer.add_tokens(lowerCamelCase_ ) lowercase : Optional[int] = tokenizer.convert_tokens_to_ids(lowerCamelCase_ ) lowercase : int = OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name ) model.resize_token_embeddings(len(lowerCamelCase_ ) ) model.to(lowerCamelCase_ ) # Load and encode the datasets def tokenize_and_encode(A ): if isinstance(lowerCamelCase_ ,lowerCamelCase_ ): return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(lowerCamelCase_ ) ) elif isinstance(lowerCamelCase_ ,lowerCamelCase_ ): return obj return [tokenize_and_encode(lowerCamelCase_ ) for o in obj] logger.info("Encoding dataset..." ) lowercase : Tuple = load_rocstories_dataset(args.train_dataset ) lowercase : List[Any] = load_rocstories_dataset(args.eval_dataset ) lowercase : str = (train_dataset, eval_dataset) lowercase : str = tokenize_and_encode(lowerCamelCase_ ) # Compute the max input length for the Transformer lowercase : Any = model.config.n_positions // 2 - 2 lowercase : Dict = max( len(story[:max_length] ) + max(len(conta[:max_length] ) ,len(conta[:max_length] ) ) + 3 for dataset in encoded_datasets for story, conta, conta, _ in dataset ) lowercase : Dict = min(lowerCamelCase_ ,model.config.n_positions ) # Max size of input for the pre-trained model # Prepare inputs tensors and dataloaders lowercase : Dict = pre_process_datasets(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,*lowerCamelCase_ ) lowercase : str = tensor_datasets[0], tensor_datasets[1] lowercase : str = TensorDataset(*lowerCamelCase_ ) lowercase : int = RandomSampler(lowerCamelCase_ ) lowercase : str = DataLoader(lowerCamelCase_ ,sampler=lowerCamelCase_ ,batch_size=args.train_batch_size ) lowercase : List[Any] = TensorDataset(*lowerCamelCase_ ) lowercase : str = SequentialSampler(lowerCamelCase_ ) lowercase : Dict = DataLoader(lowerCamelCase_ ,sampler=lowerCamelCase_ ,batch_size=args.eval_batch_size ) # Prepare optimizer if args.do_train: if args.max_steps > 0: lowercase : Optional[Any] = args.max_steps lowercase : int = args.max_steps // (len(lowerCamelCase_ ) // args.gradient_accumulation_steps) + 1 else: lowercase : List[Any] = len(lowerCamelCase_ ) // args.gradient_accumulation_steps * args.num_train_epochs lowercase : Any = list(model.named_parameters() ) lowercase : Optional[int] = ['bias', 'LayerNorm.bias', 'LayerNorm.weight'] lowercase : int = [ { 'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay )], 'weight_decay': args.weight_decay, }, {'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay )], 'weight_decay': 0.0}, ] lowercase : List[str] = AdamW(lowerCamelCase_ ,lr=args.learning_rate ,eps=args.adam_epsilon ) lowercase : Optional[Any] = get_linear_schedule_with_warmup( lowerCamelCase_ ,num_warmup_steps=args.warmup_steps ,num_training_steps=lowerCamelCase_ ) if args.do_train: lowercase : List[Any] = 0, 0, None model.train() for _ in trange(int(args.num_train_epochs ) ,desc="Epoch" ): lowercase : Union[str, Any] = 0 lowercase : int = 0 lowercase : str = tqdm(lowerCamelCase_ ,desc="Training" ) for step, batch in enumerate(lowerCamelCase_ ): lowercase : int = tuple(t.to(lowerCamelCase_ ) for t in batch ) lowercase : Any = batch lowercase : List[Any] = model(lowerCamelCase_ ,mc_token_ids=lowerCamelCase_ ,lm_labels=lowerCamelCase_ ,mc_labels=lowerCamelCase_ ) lowercase : Union[str, Any] = args.lm_coef * losses[0] + losses[1] loss.backward() optimizer.step() scheduler.step() optimizer.zero_grad() tr_loss += loss.item() lowercase : Optional[int] = ( loss.item() if exp_average_loss is None else 0.7 * exp_average_loss + 0.3 * loss.item() ) nb_tr_steps += 1 lowercase : Optional[Any] = 'Training loss: {:.2e} lr: {:.2e}'.format(lowerCamelCase_ ,scheduler.get_lr()[0] ) # Save a trained model if args.do_train: # Save a trained model, configuration and tokenizer lowercase : List[str] = model.module if hasattr(lowerCamelCase_ ,"module" ) else model # Only save the model itself # If we save using the predefined names, we can load using `from_pretrained` lowercase : Union[str, Any] = os.path.join(args.output_dir ,lowerCamelCase_ ) lowercase : List[Any] = os.path.join(args.output_dir ,lowerCamelCase_ ) torch.save(model_to_save.state_dict() ,lowerCamelCase_ ) model_to_save.config.to_json_file(lowerCamelCase_ ) tokenizer.save_vocabulary(args.output_dir ) # Load a trained model and vocabulary that you have fine-tuned lowercase : str = OpenAIGPTDoubleHeadsModel.from_pretrained(args.output_dir ) lowercase : Tuple = OpenAIGPTTokenizer.from_pretrained(args.output_dir ) model.to(lowerCamelCase_ ) if args.do_eval: model.eval() lowercase : Union[str, Any] = 0, 0 lowercase : List[Any] = 0, 0 for batch in tqdm(lowerCamelCase_ ,desc="Evaluating" ): lowercase : int = tuple(t.to(lowerCamelCase_ ) for t in batch ) lowercase : Dict = batch with torch.no_grad(): lowercase : Dict = model( lowerCamelCase_ ,mc_token_ids=lowerCamelCase_ ,lm_labels=lowerCamelCase_ ,mc_labels=lowerCamelCase_ ) lowercase : int = mc_logits.detach().cpu().numpy() lowercase : List[str] = mc_labels.to("cpu" ).numpy() lowercase : str = accuracy(lowerCamelCase_ ,lowerCamelCase_ ) eval_loss += mc_loss.mean().item() eval_accuracy += tmp_eval_accuracy nb_eval_examples += input_ids.size(0 ) nb_eval_steps += 1 lowercase : Optional[int] = eval_loss / nb_eval_steps lowercase : Dict = eval_accuracy / nb_eval_examples lowercase : Dict = tr_loss / nb_tr_steps if args.do_train else None lowercase : Dict = {'eval_loss': eval_loss, 'eval_accuracy': eval_accuracy, 'train_loss': train_loss} lowercase : str = os.path.join(args.output_dir ,"eval_results.txt" ) with open(lowerCamelCase_ ,"w" ) as writer: logger.info("***** Eval results *****" ) for key in sorted(result.keys() ): logger.info(" %s = %s" ,lowerCamelCase_ ,str(result[key] ) ) writer.write("%s = %s\n" % (key, str(result[key] )) ) if __name__ == "__main__": main()
372
import unittest from transformers import is_tf_available from transformers.testing_utils import require_tf if is_tf_available(): import tensorflow as tf from tensorflow.python.eager import context from tensorflow.python.framework import ops from transformers import GradientAccumulator, create_optimizer @require_tf class _UpperCamelCase( unittest.TestCase ): def __lowerCAmelCase ( self : List[str] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Union[str, Any] ): '''simple docstring''' self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , len(SCREAMING_SNAKE_CASE__ ) ) for a, b in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): self.assertAlmostEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , delta=SCREAMING_SNAKE_CASE__ ) def __lowerCAmelCase ( self : Any ): '''simple docstring''' __a : List[Any] = GradientAccumulator() accumulator([tf.constant([1.0, 2.0] )] ) accumulator([tf.constant([-2.0, 1.0] )] ) accumulator([tf.constant([-1.0, 2.0] )] ) with self.assertRaises(SCREAMING_SNAKE_CASE__ ): accumulator([tf.constant([1.0, 1.0] ), tf.constant([2.0, 2.0] )] ) self.assertEqual(accumulator.step , 3 ) self.assertEqual(len(accumulator.gradients ) , 1 ) self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [-2.0, 5.0] , tol=1e-2 ) accumulator.reset() self.assertEqual(accumulator.step , 0 ) self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [0.0, 0.0] , tol=1e-2 ) def __lowerCAmelCase ( self : Dict ): '''simple docstring''' __a : int = None ops.enable_eager_execution_internal() __a : Optional[Any] = tf.config.list_physical_devices('CPU' ) if len(SCREAMING_SNAKE_CASE__ ) == 1: tf.config.set_logical_device_configuration( physical_devices[0] , [tf.config.LogicalDeviceConfiguration(), tf.config.LogicalDeviceConfiguration()] ) __a : int = tf.config.list_logical_devices(device_type='CPU' ) __a : str = tf.distribute.MirroredStrategy(devices=devices[:2] ) with strategy.scope(): __a : List[str] = GradientAccumulator() __a : Tuple = tf.Variable([4.0, 3.0] ) __a , __a : int = create_optimizer(5e-5 , 1_0 , 5 ) __a : List[Any] = tf.Variable([0.0, 0.0] , trainable=SCREAMING_SNAKE_CASE__ ) def accumulate_on_replica(SCREAMING_SNAKE_CASE__ : Optional[Any] ): accumulator([gradient] ) def apply_on_replica(): optimizer.apply_gradients(list(zip(accumulator.gradients , [variable] ) ) ) @tf.function def accumulate(SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Tuple ): with strategy.scope(): __a : Optional[Any] = strategy.experimental_local_results(SCREAMING_SNAKE_CASE__ ) local_variables[0].assign(SCREAMING_SNAKE_CASE__ ) local_variables[1].assign(SCREAMING_SNAKE_CASE__ ) strategy.run(SCREAMING_SNAKE_CASE__ , args=(gradient_placeholder,) ) @tf.function def apply_grad(): with strategy.scope(): strategy.run(SCREAMING_SNAKE_CASE__ ) def _check_local_values(SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : int ): __a : Union[str, Any] = strategy.experimental_local_results(accumulator._gradients[0] ) self.assertListAlmostEqual(values[0].value() , SCREAMING_SNAKE_CASE__ , tol=1e-2 ) self.assertListAlmostEqual(values[1].value() , SCREAMING_SNAKE_CASE__ , tol=1e-2 ) accumulate([1.0, 2.0] , [-1.0, 1.0] ) accumulate([3.0, -1.0] , [-1.0, -1.0] ) accumulate([-2.0, 2.0] , [3.0, -2.0] ) self.assertEqual(accumulator.step , 3 ) _check_local_values([2.0, 3.0] , [1.0, -2.0] ) apply_grad() self.assertListAlmostEqual(variable.value() , [4.0, 3.0] , tol=1e-2 ) accumulator.reset() self.assertEqual(accumulator.step , 0 ) _check_local_values([0.0, 0.0] , [0.0, 0.0] )
47
0
"""simple docstring""" import unittest from parameterized import parameterized from transformers import AutoTokenizer, GPTNeoXConfig, is_torch_available, set_seed from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( GPTNeoXForCausalLM, GPTNeoXForQuestionAnswering, GPTNeoXForSequenceClassification, GPTNeoXForTokenClassification, GPTNeoXModel, ) class _SCREAMING_SNAKE_CASE : """simple docstring""" def __init__( self , lowerCamelCase__ , lowerCamelCase__=13 , lowerCamelCase__=7 , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=99 , lowerCamelCase__=64 , lowerCamelCase__=5 , lowerCamelCase__=4 , lowerCamelCase__=37 , lowerCamelCase__="gelu" , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=512 , lowerCamelCase__=16 , lowerCamelCase__=2 , lowerCamelCase__=0.02 , lowerCamelCase__=3 , lowerCamelCase__=4 , lowerCamelCase__=None , ) -> List[str]: lowercase__ : Tuple = parent lowercase__ : str = batch_size lowercase__ : int = seq_length lowercase__ : Union[str, Any] = is_training lowercase__ : str = use_input_mask lowercase__ : Dict = use_token_type_ids lowercase__ : int = use_labels lowercase__ : Tuple = vocab_size lowercase__ : Union[str, Any] = hidden_size lowercase__ : Optional[Any] = num_hidden_layers lowercase__ : List[str] = num_attention_heads lowercase__ : List[str] = intermediate_size lowercase__ : Dict = hidden_act lowercase__ : str = hidden_dropout_prob lowercase__ : str = attention_probs_dropout_prob lowercase__ : List[str] = max_position_embeddings lowercase__ : List[Any] = type_vocab_size lowercase__ : List[Any] = type_sequence_label_size lowercase__ : str = initializer_range lowercase__ : Optional[Any] = num_labels lowercase__ : List[str] = num_choices lowercase__ : Tuple = scope lowercase__ : int = vocab_size - 1 def UpperCAmelCase__( self ) -> Dict: lowercase__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowercase__ : Optional[Any] = None if self.use_input_mask: lowercase__ : Tuple = random_attention_mask([self.batch_size, self.seq_length] ) lowercase__ : Tuple = None if self.use_labels: lowercase__ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowercase__ : Dict = self.get_config() return config, input_ids, input_mask, token_labels def UpperCAmelCase__( self ) -> Optional[Any]: return GPTNeoXConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=SCREAMING_SNAKE_CASE__ , initializer_range=self.initializer_range , pad_token_id=self.pad_token_id , ) def UpperCAmelCase__( self ) -> Optional[Any]: lowercase__ : Optional[int] = self.prepare_config_and_inputs() lowercase__ : str = True return config, input_ids, input_mask, token_labels def UpperCAmelCase__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> List[Any]: lowercase__ : Tuple = GPTNeoXModel(config=SCREAMING_SNAKE_CASE__ ) model.to(SCREAMING_SNAKE_CASE__ ) model.eval() lowercase__ : Dict = model(SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ ) lowercase__ : Tuple = model(SCREAMING_SNAKE_CASE__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCAmelCase__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Tuple: lowercase__ : int = True lowercase__ : str = GPTNeoXModel(SCREAMING_SNAKE_CASE__ ) model.to(SCREAMING_SNAKE_CASE__ ) model.eval() lowercase__ : Optional[Any] = model(SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCAmelCase__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Dict: lowercase__ : str = GPTNeoXForCausalLM(config=SCREAMING_SNAKE_CASE__ ) model.to(SCREAMING_SNAKE_CASE__ ) model.eval() lowercase__ : Union[str, Any] = model(SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def UpperCAmelCase__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Optional[Any]: lowercase__ : Any = self.num_labels lowercase__ : int = GPTNeoXForQuestionAnswering(SCREAMING_SNAKE_CASE__ ) model.to(SCREAMING_SNAKE_CASE__ ) model.eval() lowercase__ : List[str] = model(SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def UpperCAmelCase__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> List[str]: lowercase__ : Dict = self.num_labels lowercase__ : List[str] = GPTNeoXForSequenceClassification(SCREAMING_SNAKE_CASE__ ) model.to(SCREAMING_SNAKE_CASE__ ) model.eval() lowercase__ : str = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowercase__ : List[str] = model(SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def UpperCAmelCase__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Union[str, Any]: lowercase__ : Dict = self.num_labels lowercase__ : Dict = GPTNeoXForTokenClassification(SCREAMING_SNAKE_CASE__ ) model.to(SCREAMING_SNAKE_CASE__ ) model.eval() lowercase__ : str = model(SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def UpperCAmelCase__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Tuple: lowercase__ : Optional[Any] = True lowercase__ : str = GPTNeoXForCausalLM(config=SCREAMING_SNAKE_CASE__ ) model.to(SCREAMING_SNAKE_CASE__ ) model.eval() # first forward pass lowercase__ : Optional[Any] = model(SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , use_cache=SCREAMING_SNAKE_CASE__ ) lowercase__ : Tuple = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids lowercase__ : Optional[Any] = ids_tensor((self.batch_size, 3) , config.vocab_size ) lowercase__ : List[Any] = ids_tensor((self.batch_size, 3) , vocab_size=2 ) # append to next input_ids and lowercase__ : str = torch.cat([input_ids, next_tokens] , dim=-1 ) lowercase__ : Dict = torch.cat([input_mask, next_mask] , dim=-1 ) lowercase__ : int = model(SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , output_hidden_states=SCREAMING_SNAKE_CASE__ ) lowercase__ : Dict = output_from_no_past['hidden_states'][0] lowercase__ : List[str] = model( SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , past_key_values=SCREAMING_SNAKE_CASE__ , output_hidden_states=SCREAMING_SNAKE_CASE__ , )['hidden_states'][0] # select random slice lowercase__ : Any = ids_tensor((1,) , output_from_past.shape[-1] ).item() lowercase__ : Union[str, Any] = output_from_no_past[:, -3:, random_slice_idx].detach() lowercase__ : List[str] = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , atol=1E-3 ) ) def UpperCAmelCase__( self ) -> Optional[int]: lowercase__ : Union[str, Any] = self.prepare_config_and_inputs() lowercase__ : str = config_and_inputs lowercase__ : Tuple = {'input_ids': input_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class _SCREAMING_SNAKE_CASE ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , unittest.TestCase ): """simple docstring""" _a : Optional[int] = ( ( GPTNeoXModel, GPTNeoXForCausalLM, GPTNeoXForQuestionAnswering, GPTNeoXForSequenceClassification, GPTNeoXForTokenClassification, ) if is_torch_available() else () ) _a : str = (GPTNeoXForCausalLM,) if is_torch_available() else () _a : Optional[int] = ( { '''feature-extraction''': GPTNeoXModel, '''question-answering''': GPTNeoXForQuestionAnswering, '''text-classification''': GPTNeoXForSequenceClassification, '''text-generation''': GPTNeoXForCausalLM, '''token-classification''': GPTNeoXForTokenClassification, '''zero-shot''': GPTNeoXForSequenceClassification, } if is_torch_available() else {} ) _a : Union[str, Any] = False _a : Optional[int] = False _a : str = False _a : Any = False def UpperCAmelCase__( self ) -> str: lowercase__ : str = GPTNeoXModelTester(self ) lowercase__ : Union[str, Any] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE__ , hidden_size=64 , num_attention_heads=8 ) def UpperCAmelCase__( self ) -> int: self.config_tester.run_common_tests() def UpperCAmelCase__( self ) -> List[Any]: lowercase__ : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) def UpperCAmelCase__( self ) -> List[str]: lowercase__ : Dict = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) def UpperCAmelCase__( self ) -> List[str]: lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_decoder() lowercase__ : int = None self.model_tester.create_and_check_model_as_decoder(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) def UpperCAmelCase__( self ) -> List[str]: lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past_large_inputs(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) def UpperCAmelCase__( self ) -> Optional[int]: lowercase__ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_causal_lm(*SCREAMING_SNAKE_CASE__ ) def UpperCAmelCase__( self ) -> Optional[Any]: lowercase__ : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*SCREAMING_SNAKE_CASE__ ) def UpperCAmelCase__( self ) -> List[str]: lowercase__ : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*SCREAMING_SNAKE_CASE__ ) def UpperCAmelCase__( self ) -> List[str]: lowercase__ : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*SCREAMING_SNAKE_CASE__ ) @unittest.skip(reason="""Feed forward chunking is not implemented""" ) def UpperCAmelCase__( self ) -> Optional[Any]: pass @parameterized.expand([("""linear""",), ("""dynamic""",)] ) def UpperCAmelCase__( self , lowerCamelCase__ ) -> Optional[Any]: lowercase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common() lowercase__ : int = ids_tensor([1, 10] , config.vocab_size ) lowercase__ : Optional[Any] = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size ) set_seed(42 ) # Fixed seed at init time so the two models get the same random weights lowercase__ : List[str] = GPTNeoXModel(SCREAMING_SNAKE_CASE__ ) original_model.to(SCREAMING_SNAKE_CASE__ ) original_model.eval() lowercase__ : Union[str, Any] = original_model(SCREAMING_SNAKE_CASE__ ).last_hidden_state lowercase__ : int = original_model(SCREAMING_SNAKE_CASE__ ).last_hidden_state set_seed(42 ) # Fixed seed at init time so the two models get the same random weights lowercase__ : str = {'type': scaling_type, 'factor': 10.0} lowercase__ : Any = GPTNeoXModel(SCREAMING_SNAKE_CASE__ ) scaled_model.to(SCREAMING_SNAKE_CASE__ ) scaled_model.eval() lowercase__ : Optional[int] = scaled_model(SCREAMING_SNAKE_CASE__ ).last_hidden_state lowercase__ : Union[str, Any] = scaled_model(SCREAMING_SNAKE_CASE__ ).last_hidden_state # Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original # maximum sequence length, so the outputs for the short input should match. if scaling_type == "dynamic": self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , atol=1E-5 ) ) else: self.assertFalse(torch.allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , atol=1E-5 ) ) # The output should be different for long inputs self.assertFalse(torch.allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , atol=1E-5 ) ) @require_torch class _SCREAMING_SNAKE_CASE ( unittest.TestCase ): """simple docstring""" @slow def UpperCAmelCase__( self ) -> Any: lowercase__ : str = AutoTokenizer.from_pretrained("""EleutherAI/pythia-410m-deduped""" ) for checkpointing in [True, False]: lowercase__ : str = GPTNeoXForCausalLM.from_pretrained("""EleutherAI/pythia-410m-deduped""" ) if checkpointing: model.gradient_checkpointing_enable() else: model.gradient_checkpointing_disable() model.to(SCREAMING_SNAKE_CASE__ ) lowercase__ : Dict = tokenizer("""My favorite food is""" , return_tensors="""pt""" ).to(SCREAMING_SNAKE_CASE__ ) # The hub repo. is updated on 2023-04-04, resulting in poor outputs. # See: https://github.com/huggingface/transformers/pull/24193 lowercase__ : Optional[int] = 'My favorite food is a good old-fashioned, old-fashioned, old-fashioned.\n\nI\'m not sure' lowercase__ : Tuple = model.generate(**SCREAMING_SNAKE_CASE__ , do_sample=SCREAMING_SNAKE_CASE__ , max_new_tokens=20 ) lowercase__ : int = tokenizer.batch_decode(SCREAMING_SNAKE_CASE__ )[0] self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
200
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ = { '''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/config.json''', '''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/config.json''', '''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/config.json''', '''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/config.json''', '''roberta-base-openai-detector''': '''https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json''', '''roberta-large-openai-detector''': '''https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json''', } class _UpperCamelCase( __lowerCamelCase ): __SCREAMING_SNAKE_CASE : Union[str, Any] = '''roberta''' def __init__( self : List[Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any]=5_0_2_6_5 , SCREAMING_SNAKE_CASE__ : Optional[int]=7_6_8 , SCREAMING_SNAKE_CASE__ : str=1_2 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=1_2 , SCREAMING_SNAKE_CASE__ : Optional[Any]=3_0_7_2 , SCREAMING_SNAKE_CASE__ : Any="gelu" , SCREAMING_SNAKE_CASE__ : Optional[Any]=0.1 , SCREAMING_SNAKE_CASE__ : str=0.1 , SCREAMING_SNAKE_CASE__ : Optional[Any]=5_1_2 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=2 , SCREAMING_SNAKE_CASE__ : Any=0.02 , SCREAMING_SNAKE_CASE__ : List[str]=1e-12 , SCREAMING_SNAKE_CASE__ : Optional[Any]=1 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=0 , SCREAMING_SNAKE_CASE__ : List[str]=2 , SCREAMING_SNAKE_CASE__ : Tuple="absolute" , SCREAMING_SNAKE_CASE__ : Any=True , SCREAMING_SNAKE_CASE__ : List[str]=None , **SCREAMING_SNAKE_CASE__ : Any , ): '''simple docstring''' super().__init__(pad_token_id=SCREAMING_SNAKE_CASE__ , bos_token_id=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) __a : Optional[Any] = vocab_size __a : Tuple = hidden_size __a : List[str] = num_hidden_layers __a : List[Any] = num_attention_heads __a : str = hidden_act __a : Optional[Any] = intermediate_size __a : Dict = hidden_dropout_prob __a : List[str] = attention_probs_dropout_prob __a : Optional[Any] = max_position_embeddings __a : Dict = type_vocab_size __a : str = initializer_range __a : List[str] = layer_norm_eps __a : Optional[int] = position_embedding_type __a : Union[str, Any] = use_cache __a : str = classifier_dropout class _UpperCamelCase( __lowerCamelCase ): @property def __lowerCAmelCase ( self : Any ): '''simple docstring''' if self.task == "multiple-choice": __a : List[str] = {0: 'batch', 1: 'choice', 2: 'sequence'} else: __a : Dict = {0: 'batch', 1: 'sequence'} return OrderedDict( [ ('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ] )
47
0
def __magic_name__ ( lowerCAmelCase_): '''simple docstring''' lowerCamelCase_ : Union[str, Any] = [1] lowerCamelCase_ : Optional[int] = 0, 0, 0 lowerCamelCase_ : int = ugly_nums[ia] * 2 lowerCamelCase_ : Tuple = ugly_nums[ia] * 3 lowerCamelCase_ : List[str] = ugly_nums[ia] * 5 for _ in range(1 , lowerCamelCase_): lowerCamelCase_ : Optional[Any] = min(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) ugly_nums.append(lowerCamelCase_) if next_num == next_a: ia += 1 lowerCamelCase_ : List[Any] = ugly_nums[ia] * 2 if next_num == next_a: ia += 1 lowerCamelCase_ : str = ugly_nums[ia] * 3 if next_num == next_a: ia += 1 lowerCamelCase_ : List[str] = ugly_nums[ia] * 5 return ugly_nums[-1] if __name__ == "__main__": from doctest import testmod testmod(verbose=True) print(f'''{ugly_numbers(2_0_0) = }''')
250
import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ = '''▁''' SCREAMING_SNAKE_CASE__ = {'''vocab_file''': '''sentencepiece.bpe.model'''} SCREAMING_SNAKE_CASE__ = { '''vocab_file''': { '''facebook/xglm-564M''': '''https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model''', } } SCREAMING_SNAKE_CASE__ = { '''facebook/xglm-564M''': 2048, } class _UpperCamelCase( __lowerCamelCase ): __SCREAMING_SNAKE_CASE : Optional[Any] = VOCAB_FILES_NAMES __SCREAMING_SNAKE_CASE : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP __SCREAMING_SNAKE_CASE : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __SCREAMING_SNAKE_CASE : Any = ['''input_ids''', '''attention_mask'''] def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : List[Any]="<s>" , SCREAMING_SNAKE_CASE__ : List[str]="</s>" , SCREAMING_SNAKE_CASE__ : Optional[Any]="</s>" , SCREAMING_SNAKE_CASE__ : Union[str, Any]="<s>" , SCREAMING_SNAKE_CASE__ : str="<unk>" , SCREAMING_SNAKE_CASE__ : Dict="<pad>" , SCREAMING_SNAKE_CASE__ : Optional[Dict[str, Any]] = None , **SCREAMING_SNAKE_CASE__ : List[str] , ): '''simple docstring''' __a : Union[str, Any] = {} if sp_model_kwargs is None else sp_model_kwargs # Compatibility with the original tokenizer __a : Any = 7 __a : Union[str, Any] = [f'''<madeupword{i}>''' for i in range(self.num_madeup_words )] __a : Union[str, Any] = kwargs.get('additional_special_tokens' , [] ) kwargs["additional_special_tokens"] += [ word for word in madeup_words if word not in kwargs["additional_special_tokens"] ] super().__init__( bos_token=SCREAMING_SNAKE_CASE__ , eos_token=SCREAMING_SNAKE_CASE__ , unk_token=SCREAMING_SNAKE_CASE__ , sep_token=SCREAMING_SNAKE_CASE__ , cls_token=SCREAMING_SNAKE_CASE__ , pad_token=SCREAMING_SNAKE_CASE__ , sp_model_kwargs=self.sp_model_kwargs , **SCREAMING_SNAKE_CASE__ , ) __a : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(SCREAMING_SNAKE_CASE__ ) ) __a : str = vocab_file # Original fairseq vocab and spm vocab must be "aligned": # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 # -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ---- # fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-' # spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a' # The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab __a : Any = 1 # Mimic fairseq token-to-id alignment for the first 4 token __a : str = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3} __a : List[str] = len(self.sp_model ) __a : Optional[int] = {f'''<madeupword{i}>''': sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words )} self.fairseq_tokens_to_ids.update(SCREAMING_SNAKE_CASE__ ) __a : Dict = {v: k for k, v in self.fairseq_tokens_to_ids.items()} def __getstate__( self : List[str] ): '''simple docstring''' __a : Tuple = self.__dict__.copy() __a : List[str] = None __a : Optional[int] = self.sp_model.serialized_model_proto() return state def __setstate__( self : List[str] , SCREAMING_SNAKE_CASE__ : Dict ): '''simple docstring''' __a : int = d # for backward compatibility if not hasattr(self , 'sp_model_kwargs' ): __a : Dict = {} __a : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.LoadFromSerializedProto(self.sp_model_proto ) def __lowerCAmelCase ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ): '''simple docstring''' if token_ids_a is None: return [self.sep_token_id] + token_ids_a __a : Optional[Any] = [self.sep_token_id] return sep + token_ids_a + sep + sep + token_ids_a def __lowerCAmelCase ( self : Dict , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None , SCREAMING_SNAKE_CASE__ : bool = False ): '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=SCREAMING_SNAKE_CASE__ , token_ids_a=SCREAMING_SNAKE_CASE__ , already_has_special_tokens=SCREAMING_SNAKE_CASE__ ) if token_ids_a is None: return [1] + ([0] * len(SCREAMING_SNAKE_CASE__ )) return [1] + ([0] * len(SCREAMING_SNAKE_CASE__ )) + [1, 1] + ([0] * len(SCREAMING_SNAKE_CASE__ )) def __lowerCAmelCase ( self : List[Any] , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ): '''simple docstring''' __a : Optional[int] = [self.sep_token_id] if token_ids_a is None: return len(sep + token_ids_a ) * [0] return len(sep + token_ids_a + sep + sep + token_ids_a ) * [0] @property def __lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' return len(self.sp_model ) + self.fairseq_offset + self.num_madeup_words def __lowerCAmelCase ( self : Tuple ): '''simple docstring''' __a : str = {self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __lowerCAmelCase ( self : Any , SCREAMING_SNAKE_CASE__ : str ): '''simple docstring''' return self.sp_model.encode(SCREAMING_SNAKE_CASE__ , out_type=SCREAMING_SNAKE_CASE__ ) def __lowerCAmelCase ( self : List[Any] , SCREAMING_SNAKE_CASE__ : List[Any] ): '''simple docstring''' if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] __a : List[str] = self.sp_model.PieceToId(SCREAMING_SNAKE_CASE__ ) # Need to return unknown token if the SP model returned 0 return spm_id + self.fairseq_offset if spm_id else self.unk_token_id def __lowerCAmelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Dict ): '''simple docstring''' if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset ) def __lowerCAmelCase ( self : Tuple , SCREAMING_SNAKE_CASE__ : List[Any] ): '''simple docstring''' __a : Optional[int] = ''.join(SCREAMING_SNAKE_CASE__ ).replace(SCREAMING_SNAKE_CASE__ , ' ' ).strip() return out_string def __lowerCAmelCase ( self : Dict , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[str] = None ): '''simple docstring''' if not os.path.isdir(SCREAMING_SNAKE_CASE__ ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return __a : Any = os.path.join( SCREAMING_SNAKE_CASE__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE__ ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , SCREAMING_SNAKE_CASE__ ) elif not os.path.isfile(self.vocab_file ): with open(SCREAMING_SNAKE_CASE__ , 'wb' ) as fi: __a : List[Any] = self.sp_model.serialized_model_proto() fi.write(SCREAMING_SNAKE_CASE__ ) return (out_vocab_file,)
47
0
"""simple docstring""" from ..utils import DummyObject, requires_backends class UpperCAmelCase_ ( metaclass=__lowerCamelCase ): UpperCamelCase =['''flax'''] def __init__( self , *UpperCamelCase_ , **UpperCamelCase_ ) -> Any: requires_backends(self , ['''flax'''] ) @classmethod def _lowerCamelCase ( cls , *UpperCamelCase_ , **UpperCamelCase_ ) -> List[Any]: requires_backends(cls , ['''flax'''] ) @classmethod def _lowerCamelCase ( cls , *UpperCamelCase_ , **UpperCamelCase_ ) -> Any: requires_backends(cls , ['''flax'''] ) class UpperCAmelCase_ ( metaclass=__lowerCamelCase ): UpperCamelCase =['''flax'''] def __init__( self , *UpperCamelCase_ , **UpperCamelCase_ ) -> Dict: requires_backends(self , ['''flax'''] ) @classmethod def _lowerCamelCase ( cls , *UpperCamelCase_ , **UpperCamelCase_ ) -> Optional[int]: requires_backends(cls , ['''flax'''] ) @classmethod def _lowerCamelCase ( cls , *UpperCamelCase_ , **UpperCamelCase_ ) -> List[str]: requires_backends(cls , ['''flax'''] ) class UpperCAmelCase_ ( metaclass=__lowerCamelCase ): UpperCamelCase =['''flax'''] def __init__( self , *UpperCamelCase_ , **UpperCamelCase_ ) -> List[Any]: requires_backends(self , ['''flax'''] ) @classmethod def _lowerCamelCase ( cls , *UpperCamelCase_ , **UpperCamelCase_ ) -> List[str]: requires_backends(cls , ['''flax'''] ) @classmethod def _lowerCamelCase ( cls , *UpperCamelCase_ , **UpperCamelCase_ ) -> Optional[Any]: requires_backends(cls , ['''flax'''] ) class UpperCAmelCase_ ( metaclass=__lowerCamelCase ): UpperCamelCase =['''flax'''] def __init__( self , *UpperCamelCase_ , **UpperCamelCase_ ) -> List[Any]: requires_backends(self , ['''flax'''] ) @classmethod def _lowerCamelCase ( cls , *UpperCamelCase_ , **UpperCamelCase_ ) -> int: requires_backends(cls , ['''flax'''] ) @classmethod def _lowerCamelCase ( cls , *UpperCamelCase_ , **UpperCamelCase_ ) -> Union[str, Any]: requires_backends(cls , ['''flax'''] ) class UpperCAmelCase_ ( metaclass=__lowerCamelCase ): UpperCamelCase =['''flax'''] def __init__( self , *UpperCamelCase_ , **UpperCamelCase_ ) -> List[Any]: requires_backends(self , ['''flax'''] ) @classmethod def _lowerCamelCase ( cls , *UpperCamelCase_ , **UpperCamelCase_ ) -> Any: requires_backends(cls , ['''flax'''] ) @classmethod def _lowerCamelCase ( cls , *UpperCamelCase_ , **UpperCamelCase_ ) -> Optional[Any]: requires_backends(cls , ['''flax'''] ) class UpperCAmelCase_ ( metaclass=__lowerCamelCase ): UpperCamelCase =['''flax'''] def __init__( self , *UpperCamelCase_ , **UpperCamelCase_ ) -> Union[str, Any]: requires_backends(self , ['''flax'''] ) @classmethod def _lowerCamelCase ( cls , *UpperCamelCase_ , **UpperCamelCase_ ) -> Dict: requires_backends(cls , ['''flax'''] ) @classmethod def _lowerCamelCase ( cls , *UpperCamelCase_ , **UpperCamelCase_ ) -> List[str]: requires_backends(cls , ['''flax'''] ) class UpperCAmelCase_ ( metaclass=__lowerCamelCase ): UpperCamelCase =['''flax'''] def __init__( self , *UpperCamelCase_ , **UpperCamelCase_ ) -> Any: requires_backends(self , ['''flax'''] ) @classmethod def _lowerCamelCase ( cls , *UpperCamelCase_ , **UpperCamelCase_ ) -> List[str]: requires_backends(cls , ['''flax'''] ) @classmethod def _lowerCamelCase ( cls , *UpperCamelCase_ , **UpperCamelCase_ ) -> str: requires_backends(cls , ['''flax'''] ) class UpperCAmelCase_ ( metaclass=__lowerCamelCase ): UpperCamelCase =['''flax'''] def __init__( self , *UpperCamelCase_ , **UpperCamelCase_ ) -> List[str]: requires_backends(self , ['''flax'''] ) @classmethod def _lowerCamelCase ( cls , *UpperCamelCase_ , **UpperCamelCase_ ) -> List[str]: requires_backends(cls , ['''flax'''] ) @classmethod def _lowerCamelCase ( cls , *UpperCamelCase_ , **UpperCamelCase_ ) -> Tuple: requires_backends(cls , ['''flax'''] ) class UpperCAmelCase_ ( metaclass=__lowerCamelCase ): UpperCamelCase =['''flax'''] def __init__( self , *UpperCamelCase_ , **UpperCamelCase_ ) -> List[str]: requires_backends(self , ['''flax'''] ) @classmethod def _lowerCamelCase ( cls , *UpperCamelCase_ , **UpperCamelCase_ ) -> str: requires_backends(cls , ['''flax'''] ) @classmethod def _lowerCamelCase ( cls , *UpperCamelCase_ , **UpperCamelCase_ ) -> List[Any]: requires_backends(cls , ['''flax'''] ) class UpperCAmelCase_ ( metaclass=__lowerCamelCase ): UpperCamelCase =['''flax'''] def __init__( self , *UpperCamelCase_ , **UpperCamelCase_ ) -> Union[str, Any]: requires_backends(self , ['''flax'''] ) @classmethod def _lowerCamelCase ( cls , *UpperCamelCase_ , **UpperCamelCase_ ) -> Optional[int]: requires_backends(cls , ['''flax'''] ) @classmethod def _lowerCamelCase ( cls , *UpperCamelCase_ , **UpperCamelCase_ ) -> Optional[Any]: requires_backends(cls , ['''flax'''] ) class UpperCAmelCase_ ( metaclass=__lowerCamelCase ): UpperCamelCase =['''flax'''] def __init__( self , *UpperCamelCase_ , **UpperCamelCase_ ) -> Dict: requires_backends(self , ['''flax'''] ) @classmethod def _lowerCamelCase ( cls , *UpperCamelCase_ , **UpperCamelCase_ ) -> Dict: requires_backends(cls , ['''flax'''] ) @classmethod def _lowerCamelCase ( cls , *UpperCamelCase_ , **UpperCamelCase_ ) -> Optional[int]: requires_backends(cls , ['''flax'''] ) class UpperCAmelCase_ ( metaclass=__lowerCamelCase ): UpperCamelCase =['''flax'''] def __init__( self , *UpperCamelCase_ , **UpperCamelCase_ ) -> int: requires_backends(self , ['''flax'''] ) @classmethod def _lowerCamelCase ( cls , *UpperCamelCase_ , **UpperCamelCase_ ) -> Tuple: requires_backends(cls , ['''flax'''] ) @classmethod def _lowerCamelCase ( cls , *UpperCamelCase_ , **UpperCamelCase_ ) -> int: requires_backends(cls , ['''flax'''] ) class UpperCAmelCase_ ( metaclass=__lowerCamelCase ): UpperCamelCase =['''flax'''] def __init__( self , *UpperCamelCase_ , **UpperCamelCase_ ) -> Optional[int]: requires_backends(self , ['''flax'''] ) @classmethod def _lowerCamelCase ( cls , *UpperCamelCase_ , **UpperCamelCase_ ) -> Tuple: requires_backends(cls , ['''flax'''] ) @classmethod def _lowerCamelCase ( cls , *UpperCamelCase_ , **UpperCamelCase_ ) -> Tuple: requires_backends(cls , ['''flax'''] )
76
import argparse from collections import OrderedDict from pathlib import Path import torch from transformers import ( VisualBertConfig, VisualBertForMultipleChoice, VisualBertForPreTraining, VisualBertForQuestionAnswering, VisualBertForVisualReasoning, ) from transformers.utils import logging logging.set_verbosity_info() SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ = [ ('''bert.bert''', '''visual_bert'''), ('''bert.cls''', '''cls'''), ('''bert.classifier''', '''cls'''), ('''token_type_embeddings_visual''', '''visual_token_type_embeddings'''), ('''position_embeddings_visual''', '''visual_position_embeddings'''), ('''projection''', '''visual_projection'''), ] SCREAMING_SNAKE_CASE__ = [ '''nlvr2_coco_pre_trained.th''', '''nlvr2_fine_tuned.th''', '''nlvr2_pre_trained.th''', '''vcr_coco_pre_train.th''', '''vcr_fine_tune.th''', '''vcr_pre_train.th''', '''vqa_coco_pre_trained.th''', '''vqa_fine_tuned.th''', '''vqa_pre_trained.th''', ] def UpperCAmelCase__ ( lowerCamelCase_ : Optional[int] ): __a : str = torch.load(lowerCamelCase_ , map_location='cpu' ) return sd def UpperCAmelCase__ ( lowerCamelCase_ : List[Any] , lowerCamelCase_ : int , lowerCamelCase_ : Dict=rename_keys_prefix ): __a : Optional[Any] = OrderedDict() __a : Any = torch.arange(config.max_position_embeddings ).expand((1, -1) ) # detector_d = OrderedDict() for key in d: if "detector" in key: # detector_d[key.replace('detector.','')] = d[key] continue __a : List[Any] = key for name_pair in rename_keys_prefix: __a : List[str] = new_key.replace(name_pair[0] , name_pair[1] ) __a : Any = d[key] if key == "bert.cls.predictions.decoder.weight": # Old bert code didn't have `decoder.bias`, but was added separately __a : int = new_d['cls.predictions.bias'] return new_d @torch.no_grad() def UpperCAmelCase__ ( lowerCamelCase_ : Any , lowerCamelCase_ : Any ): assert ( checkpoint_path.split('/' )[-1] in ACCEPTABLE_CHECKPOINTS ), f'''The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}.''' # Get Config if "pre" in checkpoint_path: __a : Dict = 'pretraining' if "vcr" in checkpoint_path: __a : int = {'visual_embedding_dim': 5_1_2} elif "vqa_advanced" in checkpoint_path: __a : int = {'visual_embedding_dim': 2_0_4_8} elif "vqa" in checkpoint_path: __a : Tuple = {'visual_embedding_dim': 2_0_4_8} elif "nlvr" in checkpoint_path: __a : List[Any] = {'visual_embedding_dim': 1_0_2_4} else: raise NotImplementedError(f'''No implementation found for `{checkpoint_path}`.''' ) else: if "vcr" in checkpoint_path: __a : int = {'visual_embedding_dim': 5_1_2} __a : Any = 'multichoice' elif "vqa_advanced" in checkpoint_path: __a : Any = {'visual_embedding_dim': 2_0_4_8} __a : List[str] = 'vqa_advanced' elif "vqa" in checkpoint_path: __a : List[Any] = {'visual_embedding_dim': 2_0_4_8, 'num_labels': 3_1_2_9} __a : List[Any] = 'vqa' elif "nlvr" in checkpoint_path: __a : Optional[int] = { 'visual_embedding_dim': 1_0_2_4, 'num_labels': 2, } __a : Optional[Any] = 'nlvr' __a : str = VisualBertConfig(**lowerCamelCase_ ) # Load State Dict __a : str = load_state_dict(lowerCamelCase_ ) __a : str = get_new_dict(lowerCamelCase_ , lowerCamelCase_ ) if model_type == "pretraining": __a : Optional[Any] = VisualBertForPreTraining(lowerCamelCase_ ) elif model_type == "vqa": __a : Any = VisualBertForQuestionAnswering(lowerCamelCase_ ) elif model_type == "nlvr": __a : int = VisualBertForVisualReasoning(lowerCamelCase_ ) elif model_type == "multichoice": __a : Optional[int] = VisualBertForMultipleChoice(lowerCamelCase_ ) model.load_state_dict(lowerCamelCase_ ) # Save Checkpoints Path(lowerCamelCase_ ).mkdir(exist_ok=lowerCamelCase_ ) model.save_pretrained(lowerCamelCase_ ) if __name__ == "__main__": SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser() # Required parameters parser.add_argument('''orig_checkpoint_path''', type=str, help='''A path to .th on local filesystem.''') parser.add_argument('''pytorch_dump_folder_path''', type=str, help='''Path to the output PyTorch model.''') SCREAMING_SNAKE_CASE__ = parser.parse_args() convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
47
0
"""simple docstring""" import argparse import json import os import fairseq import torch from fairseq.data import Dictionary from transformers import ( WavaVecaConfig, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaForCTC, WavaVecaForPreTraining, WavaVecaProcessor, logging, ) from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification logging.set_verbosity_info() __lowerCAmelCase : int = logging.get_logger(__name__) __lowerCAmelCase : Union[str, Any] = { '''post_extract_proj''': '''feature_projection.projection''', '''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''', '''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''', '''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''', '''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''', '''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''', '''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''', '''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''', '''fc2''': '''encoder.layers.*.feed_forward.output_dense''', '''final_layer_norm''': '''encoder.layers.*.final_layer_norm''', '''encoder.layer_norm''': '''encoder.layer_norm''', '''adapter_layer''': '''encoder.layers.*.adapter_layer''', '''w2v_model.layer_norm''': '''feature_projection.layer_norm''', '''quantizer.weight_proj''': '''quantizer.weight_proj''', '''quantizer.vars''': '''quantizer.codevectors''', '''project_q''': '''project_q''', '''final_proj''': '''project_hid''', '''w2v_encoder.proj''': '''lm_head''', '''mask_emb''': '''masked_spec_embed''', '''pooling_layer.linear''': '''projector''', '''pooling_layer.projection''': '''classifier''', } __lowerCAmelCase : Any = [ '''lm_head''', '''quantizer.weight_proj''', '''quantizer.codevectors''', '''project_q''', '''project_hid''', '''projector''', '''classifier''', ] def __lowerCAmelCase ( __UpperCamelCase : Tuple ): '''simple docstring''' snake_case_ : str = {} with open(lowerCamelCase_ , """r""" ) as file: for line_number, line in enumerate(lowerCamelCase_ ): snake_case_ : Union[str, Any] = line.strip() if line: snake_case_ : List[Any] = line.split() snake_case_ : Union[str, Any] = line_number snake_case_ : Union[str, Any] = words[0] snake_case_ : Optional[int] = value return result def __lowerCAmelCase ( __UpperCamelCase : Dict , __UpperCamelCase : List[str] , __UpperCamelCase : Tuple , __UpperCamelCase : str , __UpperCamelCase : Optional[int] ): '''simple docstring''' for attribute in key.split(""".""" ): snake_case_ : Tuple = getattr(lowerCamelCase_ , lowerCamelCase_ ) snake_case_ : Optional[int] = None for param_key in PARAM_MAPPING.keys(): if full_name.endswith(lowerCamelCase_ ): snake_case_ : str = PARAM_MAPPING[full_name.split(""".""" )[-1]] snake_case_ : List[str] = 'param' if weight_type is not None and weight_type != "param": snake_case_ : Optional[int] = getattr(lowerCamelCase_ , lowerCamelCase_ ).shape elif weight_type is not None and weight_type == "param": snake_case_ : List[Any] = hf_pointer for attribute in hf_param_name.split(""".""" ): snake_case_ : Optional[Any] = getattr(lowerCamelCase_ , lowerCamelCase_ ) snake_case_ : Tuple = shape_pointer.shape # let's reduce dimension snake_case_ : List[Any] = value[0] else: snake_case_ : Dict = hf_pointer.shape if hf_shape != value.shape: raise ValueError( F'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be' F' {value.shape} for {full_name}' ) if weight_type == "weight": snake_case_ : Union[str, Any] = value elif weight_type == "weight_g": snake_case_ : Optional[Any] = value elif weight_type == "weight_v": snake_case_ : Any = value elif weight_type == "bias": snake_case_ : List[Any] = value elif weight_type == "param": for attribute in hf_param_name.split(""".""" ): snake_case_ : Any = getattr(lowerCamelCase_ , lowerCamelCase_ ) snake_case_ : Union[str, Any] = value else: snake_case_ : Dict = value logger.info(F'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' ) def __lowerCAmelCase ( __UpperCamelCase : int , __UpperCamelCase : int , __UpperCamelCase : Dict , __UpperCamelCase : Any , __UpperCamelCase : Tuple ): '''simple docstring''' snake_case_ : Optional[Any] = None for param_key in PARAM_MAPPING.keys(): if full_name.endswith(lowerCamelCase_ ): snake_case_ : Optional[int] = PARAM_MAPPING[full_name.split(""".""" )[-1]] snake_case_ : Optional[int] = 'param' if weight_type is not None and weight_type != "param": snake_case_ : Dict = '.'.join([key, weight_type] ) elif weight_type is not None and weight_type == "param": snake_case_ : List[str] = '.'.join([key, hf_param_name] ) else: snake_case_ : Optional[int] = key snake_case_ : Tuple = value if 'lm_head' in full_key else value[0] __lowerCAmelCase : str = { '''W_a''': '''linear_1.weight''', '''W_b''': '''linear_2.weight''', '''b_a''': '''linear_1.bias''', '''b_b''': '''linear_2.bias''', '''ln_W''': '''norm.weight''', '''ln_b''': '''norm.bias''', } def __lowerCAmelCase ( __UpperCamelCase : int , __UpperCamelCase : List[str] , __UpperCamelCase : Optional[int]=None , __UpperCamelCase : str=None ): '''simple docstring''' snake_case_ : str = False for key, mapped_key in MAPPING.items(): snake_case_ : int = 'wav2vec2.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]: snake_case_ : str = True if "*" in mapped_key: snake_case_ : List[Any] = name.split(lowerCamelCase_ )[0].split(""".""" )[-2] snake_case_ : List[Any] = mapped_key.replace("""*""" , lowerCamelCase_ ) if "weight_g" in name: snake_case_ : Union[str, Any] = 'weight_g' elif "weight_v" in name: snake_case_ : str = 'weight_v' elif "bias" in name: snake_case_ : Any = 'bias' elif "weight" in name: # TODO: don't match quantizer.weight_proj snake_case_ : Optional[int] = 'weight' else: snake_case_ : Tuple = None if hf_dict is not None: rename_dict(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) else: set_recursively(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) return is_used return is_used def __lowerCAmelCase ( __UpperCamelCase : Dict , __UpperCamelCase : Any , __UpperCamelCase : str ): '''simple docstring''' snake_case_ : Tuple = [] snake_case_ : Dict = fairseq_model.state_dict() snake_case_ : List[str] = hf_model.wavaveca.feature_extractor for name, value in fairseq_dict.items(): snake_case_ : Optional[int] = False if "conv_layers" in name: load_conv_layer( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , hf_model.config.feat_extract_norm == """group""" , ) snake_case_ : int = True else: snake_case_ : Tuple = load_wavaveca_layer(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) if not is_used: unused_weights.append(lowerCamelCase_ ) logger.warning(F'Unused weights: {unused_weights}' ) def __lowerCAmelCase ( __UpperCamelCase : int , __UpperCamelCase : Any , __UpperCamelCase : Tuple , __UpperCamelCase : Optional[int] , __UpperCamelCase : Optional[Any] ): '''simple docstring''' snake_case_ : List[Any] = full_name.split("""conv_layers.""" )[-1] snake_case_ : List[str] = name.split(""".""" ) snake_case_ : int = int(items[0] ) snake_case_ : str = int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( F'{full_name} has size {value.shape}, but' F' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.' ) snake_case_ : str = value logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( F'{full_name} has size {value.shape}, but' F' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.' ) snake_case_ : Optional[int] = value logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( F'{full_name} has size {value.shape}, but' F' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.' ) snake_case_ : Union[str, Any] = value logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( F'{full_name} has size {value.shape}, but' F' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.' ) snake_case_ : Any = value logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' ) else: unused_weights.append(lowerCamelCase_ ) @torch.no_grad() def __lowerCAmelCase ( __UpperCamelCase : List[Any] , __UpperCamelCase : Any , __UpperCamelCase : int=None , __UpperCamelCase : Optional[Any]=None , __UpperCamelCase : Union[str, Any]=True , __UpperCamelCase : Any=False ): '''simple docstring''' if config_path is not None: snake_case_ : Union[str, Any] = WavaVecaConfig.from_pretrained(lowerCamelCase_ ) else: snake_case_ : Tuple = WavaVecaConfig() if is_seq_class: snake_case_ : int = read_txt_into_dict(lowerCamelCase_ ) snake_case_ : Any = idalabel snake_case_ : Optional[Any] = WavaVecaForSequenceClassification(lowerCamelCase_ ) snake_case_ : Union[str, Any] = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=1_6_0_0_0 , padding_value=0 , do_normalize=lowerCamelCase_ , return_attention_mask=lowerCamelCase_ , ) feature_extractor.save_pretrained(lowerCamelCase_ ) elif is_finetuned: if dict_path: snake_case_ : List[str] = Dictionary.load(lowerCamelCase_ ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq snake_case_ : List[str] = target_dict.pad_index snake_case_ : Any = target_dict.bos_index snake_case_ : Dict = target_dict.eos_index snake_case_ : List[str] = len(target_dict.symbols ) snake_case_ : Optional[Any] = os.path.join(lowerCamelCase_ , """vocab.json""" ) if not os.path.isdir(lowerCamelCase_ ): logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(lowerCamelCase_ ) ) return os.makedirs(lowerCamelCase_ , exist_ok=lowerCamelCase_ ) snake_case_ : Optional[Any] = target_dict.indices # fairseq has the <pad> and <s> switched snake_case_ : Optional[Any] = 0 snake_case_ : Union[str, Any] = 1 with open(lowerCamelCase_ , """w""" , encoding="""utf-8""" ) as vocab_handle: json.dump(lowerCamelCase_ , lowerCamelCase_ ) snake_case_ : str = WavaVecaCTCTokenizer( lowerCamelCase_ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=lowerCamelCase_ , ) snake_case_ : Any = True if config.feat_extract_norm == 'layer' else False snake_case_ : Optional[int] = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=1_6_0_0_0 , padding_value=0 , do_normalize=lowerCamelCase_ , return_attention_mask=lowerCamelCase_ , ) snake_case_ : Tuple = WavaVecaProcessor(feature_extractor=lowerCamelCase_ , tokenizer=lowerCamelCase_ ) processor.save_pretrained(lowerCamelCase_ ) snake_case_ : List[Any] = WavaVecaForCTC(lowerCamelCase_ ) else: snake_case_ : List[str] = WavaVecaForPreTraining(lowerCamelCase_ ) if is_finetuned or is_seq_class: snake_case_ : Optional[int] = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} ) else: snake_case_ : int = argparse.Namespace(task="""audio_pretraining""" ) snake_case_ : Optional[int] = fairseq.tasks.setup_task(lowerCamelCase_ ) snake_case_ : Optional[int] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=lowerCamelCase_ ) snake_case_ : str = model[0].eval() recursively_load_weights(lowerCamelCase_ , lowerCamelCase_ , not is_finetuned ) hf_wavavec.save_pretrained(lowerCamelCase_ ) if __name__ == "__main__": __lowerCAmelCase : int = argparse.ArgumentParser() parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''') parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''') parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''') parser.add_argument( '''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not''' ) parser.add_argument( '''--is_seq_class''', action='''store_true''', help='''Whether the model to convert is a fine-tuned sequence classification model or not''', ) __lowerCAmelCase : Any = parser.parse_args() __lowerCAmelCase : str = not args.not_finetuned and not args.is_seq_class convert_wavaveca_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, is_finetuned, args.is_seq_class, )
58
print((lambda quine: quine % quine)('''print((lambda quine: quine %% quine)(%r))'''))
47
0
'''simple docstring''' import argparse import json from typing import List from ltp import LTP from transformers.models.bert.tokenization_bert import BertTokenizer def _UpperCamelCase ( UpperCamelCase__ ): """simple docstring""" if ( (cp >= 0X4_E_0_0 and cp <= 0X9_F_F_F) or (cp >= 0X3_4_0_0 and cp <= 0X4_D_B_F) # or (cp >= 0X2_0_0_0_0 and cp <= 0X2_A_6_D_F) # or (cp >= 0X2_A_7_0_0 and cp <= 0X2_B_7_3_F) # or (cp >= 0X2_B_7_4_0 and cp <= 0X2_B_8_1_F) # or (cp >= 0X2_B_8_2_0 and cp <= 0X2_C_E_A_F) # or (cp >= 0XF_9_0_0 and cp <= 0XF_A_F_F) or (cp >= 0X2_F_8_0_0 and cp <= 0X2_F_A_1_F) # ): # return True return False def _UpperCamelCase ( UpperCamelCase__ ): """simple docstring""" for char in word: __magic_name__ : Optional[int] = ord(lowerCamelCase_ ) if not _is_chinese_char(lowerCamelCase_ ): return 0 return 1 def _UpperCamelCase ( UpperCamelCase__ ): """simple docstring""" __magic_name__ : Union[str, Any] = set() for token in tokens: __magic_name__ : Any = len(lowerCamelCase_ ) > 1 and is_chinese(lowerCamelCase_ ) if chinese_word: word_set.add(lowerCamelCase_ ) __magic_name__ : str = list(lowerCamelCase_ ) return word_list def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ ): """simple docstring""" if not chinese_word_set: return bert_tokens __magic_name__ : Tuple = max([len(lowerCamelCase_ ) for w in chinese_word_set] ) __magic_name__ : List[str] = bert_tokens __magic_name__ : Tuple = 0, len(lowerCamelCase_ ) while start < end: __magic_name__ : List[str] = True if is_chinese(bert_word[start] ): __magic_name__ : List[str] = min(end - start , lowerCamelCase_ ) for i in range(lowerCamelCase_ , 1 , -1 ): __magic_name__ : Optional[int] = ''.join(bert_word[start : start + i] ) if whole_word in chinese_word_set: for j in range(start + 1 , start + i ): __magic_name__ : Optional[Any] = '##' + bert_word[j] __magic_name__ : List[Any] = start + i __magic_name__ : Tuple = False break if single_word: start += 1 return bert_word def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ): """simple docstring""" __magic_name__ : Union[str, Any] = [] for i in range(0 , len(lowerCamelCase_ ) , 100 ): __magic_name__ : List[Any] = ltp_tokenizer.pipeline(lines[i : i + 100] , tasks=["cws"] ).cws __magic_name__ : Optional[int] = [get_chinese_word(lowerCamelCase_ ) for r in res] ltp_res.extend(lowerCamelCase_ ) assert len(lowerCamelCase_ ) == len(lowerCamelCase_ ) __magic_name__ : Optional[Any] = [] for i in range(0 , len(lowerCamelCase_ ) , 100 ): __magic_name__ : Union[str, Any] = bert_tokenizer(lines[i : i + 100] , add_special_tokens=lowerCamelCase_ , truncation=lowerCamelCase_ , max_length=512 ) bert_res.extend(res["input_ids"] ) assert len(lowerCamelCase_ ) == len(lowerCamelCase_ ) __magic_name__ : Union[str, Any] = [] for input_ids, chinese_word in zip(lowerCamelCase_ , lowerCamelCase_ ): __magic_name__ : str = [] for id in input_ids: __magic_name__ : Optional[int] = bert_tokenizer._convert_id_to_token(lowerCamelCase_ ) input_tokens.append(lowerCamelCase_ ) __magic_name__ : int = add_sub_symbol(lowerCamelCase_ , lowerCamelCase_ ) __magic_name__ : List[Any] = [] # We only save pos of chinese subwords start with ##, which mean is part of a whole word. for i, token in enumerate(lowerCamelCase_ ): if token[:2] == "##": __magic_name__ : Dict = token[2:] # save chinese tokens' pos if len(lowerCamelCase_ ) == 1 and _is_chinese_char(ord(lowerCamelCase_ ) ): ref_id.append(lowerCamelCase_ ) ref_ids.append(lowerCamelCase_ ) assert len(lowerCamelCase_ ) == len(lowerCamelCase_ ) return ref_ids def _UpperCamelCase ( UpperCamelCase__ ): """simple docstring""" with open(args.file_name , "r" , encoding="utf-8" ) as f: __magic_name__ : int = f.readlines() __magic_name__ : Optional[Any] = [line.strip() for line in data if len(lowerCamelCase_ ) > 0 and not line.isspace()] # avoid delimiter like '\u2029' __magic_name__ : int = LTP(args.ltp ) # faster in GPU device __magic_name__ : Dict = BertTokenizer.from_pretrained(args.bert ) __magic_name__ : Union[str, Any] = prepare_ref(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) with open(args.save_path , "w" , encoding="utf-8" ) as f: __magic_name__ : Union[str, Any] = [json.dumps(lowerCamelCase_ ) + '\n' for ref in ref_ids] f.writelines(lowerCamelCase_ ) if __name__ == "__main__": _SCREAMING_SNAKE_CASE : List[str] = argparse.ArgumentParser(description="prepare_chinese_ref") parser.add_argument( "--file_name", required=False, type=str, default="./resources/chinese-demo.txt", help="file need process, same as training data in lm", ) parser.add_argument( "--ltp", required=False, type=str, default="./resources/ltp", help="resources for LTP tokenizer, usually a path", ) parser.add_argument( "--bert", required=False, type=str, default="./resources/robert", help="resources for Bert tokenizer", ) parser.add_argument( "--save_path", required=False, type=str, default="./resources/ref.txt", help="path to save res", ) _SCREAMING_SNAKE_CASE : Union[str, Any] = parser.parse_args() main(args)
436
import json import os import shutil import tempfile from unittest import TestCase from transformers import BartTokenizer, BartTokenizerFast, DPRQuestionEncoderTokenizer, DPRQuestionEncoderTokenizerFast from transformers.models.bart.configuration_bart import BartConfig from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES from transformers.models.dpr.configuration_dpr import DPRConfig from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES from transformers.testing_utils import require_faiss, require_tokenizers, require_torch, slow from transformers.utils import is_datasets_available, is_faiss_available, is_torch_available if is_torch_available() and is_datasets_available() and is_faiss_available(): from transformers.models.rag.configuration_rag import RagConfig from transformers.models.rag.tokenization_rag import RagTokenizer @require_faiss @require_torch class _UpperCamelCase( __lowerCamelCase ): def __lowerCAmelCase ( self : List[Any] ): '''simple docstring''' __a : List[Any] = tempfile.mkdtemp() __a : int = 8 # DPR tok __a : Dict = [ '[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing', ',', 'low', 'lowest', ] __a : int = os.path.join(self.tmpdirname , 'dpr_tokenizer' ) os.makedirs(SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ ) __a : Dict = os.path.join(SCREAMING_SNAKE_CASE__ , DPR_VOCAB_FILES_NAMES['vocab_file'] ) with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer: vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) ) # BART tok __a : str = [ 'l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', '\u0120', '\u0120l', '\u0120n', '\u0120lo', '\u0120low', 'er', '\u0120lowest', '\u0120newer', '\u0120wider', '<unk>', ] __a : Optional[int] = dict(zip(SCREAMING_SNAKE_CASE__ , range(len(SCREAMING_SNAKE_CASE__ ) ) ) ) __a : List[str] = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', ''] __a : List[str] = {'unk_token': '<unk>'} __a : Dict = os.path.join(self.tmpdirname , 'bart_tokenizer' ) os.makedirs(SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ ) __a : List[Any] = os.path.join(SCREAMING_SNAKE_CASE__ , BART_VOCAB_FILES_NAMES['vocab_file'] ) __a : Dict = os.path.join(SCREAMING_SNAKE_CASE__ , BART_VOCAB_FILES_NAMES['merges_file'] ) with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp: fp.write(json.dumps(SCREAMING_SNAKE_CASE__ ) + '\n' ) with open(self.merges_file , 'w' , encoding='utf-8' ) as fp: fp.write('\n'.join(SCREAMING_SNAKE_CASE__ ) ) def __lowerCAmelCase ( self : Tuple ): '''simple docstring''' return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , 'dpr_tokenizer' ) ) def __lowerCAmelCase ( self : str ): '''simple docstring''' return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , 'bart_tokenizer' ) ) def __lowerCAmelCase ( self : Union[str, Any] ): '''simple docstring''' shutil.rmtree(self.tmpdirname ) @require_tokenizers def __lowerCAmelCase ( self : Dict ): '''simple docstring''' __a : Tuple = os.path.join(self.tmpdirname , 'rag_tokenizer' ) __a : Optional[Any] = RagConfig(question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() ) __a : Optional[Any] = RagTokenizer(question_encoder=self.get_dpr_tokenizer() , generator=self.get_bart_tokenizer() ) rag_config.save_pretrained(SCREAMING_SNAKE_CASE__ ) rag_tokenizer.save_pretrained(SCREAMING_SNAKE_CASE__ ) __a : List[Any] = RagTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ , config=SCREAMING_SNAKE_CASE__ ) self.assertIsInstance(new_rag_tokenizer.question_encoder , SCREAMING_SNAKE_CASE__ ) self.assertEqual(new_rag_tokenizer.question_encoder.get_vocab() , rag_tokenizer.question_encoder.get_vocab() ) self.assertIsInstance(new_rag_tokenizer.generator , SCREAMING_SNAKE_CASE__ ) self.assertEqual(new_rag_tokenizer.generator.get_vocab() , rag_tokenizer.generator.get_vocab() ) @slow def __lowerCAmelCase ( self : Optional[Any] ): '''simple docstring''' __a : Optional[Any] = RagTokenizer.from_pretrained('facebook/rag-token-nq' ) __a : List[Any] = [ 'who got the first nobel prize in physics', 'when is the next deadpool movie being released', 'which mode is used for short wave broadcast service', 'who is the owner of reading football club', 'when is the next scandal episode coming out', 'when is the last time the philadelphia won the superbowl', 'what is the most current adobe flash player version', 'how many episodes are there in dragon ball z', 'what is the first step in the evolution of the eye', 'where is gall bladder situated in human body', 'what is the main mineral in lithium batteries', 'who is the president of usa right now', 'where do the greasers live in the outsiders', 'panda is a national animal of which country', 'what is the name of manchester united stadium', ] __a : Tuple = tokenizer(SCREAMING_SNAKE_CASE__ ) self.assertIsNotNone(SCREAMING_SNAKE_CASE__ ) @slow def __lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' __a : Any = RagTokenizer.from_pretrained('facebook/rag-sequence-nq' ) __a : Union[str, Any] = [ 'who got the first nobel prize in physics', 'when is the next deadpool movie being released', 'which mode is used for short wave broadcast service', 'who is the owner of reading football club', 'when is the next scandal episode coming out', 'when is the last time the philadelphia won the superbowl', 'what is the most current adobe flash player version', 'how many episodes are there in dragon ball z', 'what is the first step in the evolution of the eye', 'where is gall bladder situated in human body', 'what is the main mineral in lithium batteries', 'who is the president of usa right now', 'where do the greasers live in the outsiders', 'panda is a national animal of which country', 'what is the name of manchester united stadium', ] __a : str = tokenizer(SCREAMING_SNAKE_CASE__ ) self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
47
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available _lowercase : List[str] = { """configuration_bridgetower""": [ """BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BridgeTowerConfig""", """BridgeTowerTextConfig""", """BridgeTowerVisionConfig""", ], """processing_bridgetower""": ["""BridgeTowerProcessor"""], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase : Optional[Any] = ["""BridgeTowerImageProcessor"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase : Dict = [ """BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST""", """BridgeTowerForContrastiveLearning""", """BridgeTowerForImageAndTextRetrieval""", """BridgeTowerForMaskedLM""", """BridgeTowerModel""", """BridgeTowerPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_bridgetower import ( BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP, BridgeTowerConfig, BridgeTowerTextConfig, BridgeTowerVisionConfig, ) from .processing_bridgetower import BridgeTowerProcessor try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_bridgetower import BridgeTowerImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_bridgetower import ( BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST, BridgeTowerForContrastiveLearning, BridgeTowerForImageAndTextRetrieval, BridgeTowerForMaskedLM, BridgeTowerModel, BridgeTowerPreTrainedModel, ) else: import sys _lowercase : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
210
import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ = {'''vocab_file''': '''spiece.model'''} SCREAMING_SNAKE_CASE__ = { '''vocab_file''': { '''bert_for_seq_generation''': ( '''https://huggingface.co/google/bert_for_seq_generation_L-24_bbc_encoder/resolve/main/spiece.model''' ), } } SCREAMING_SNAKE_CASE__ = {'''bert_for_seq_generation''': 512} class _UpperCamelCase( __lowerCamelCase ): __SCREAMING_SNAKE_CASE : int = VOCAB_FILES_NAMES __SCREAMING_SNAKE_CASE : List[str] = PRETRAINED_VOCAB_FILES_MAP __SCREAMING_SNAKE_CASE : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __SCREAMING_SNAKE_CASE : List[int] = [] __SCREAMING_SNAKE_CASE : int = ['''input_ids''', '''attention_mask'''] def __init__( self : str , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : List[Any]="<s>" , SCREAMING_SNAKE_CASE__ : Tuple="</s>" , SCREAMING_SNAKE_CASE__ : Any="<unk>" , SCREAMING_SNAKE_CASE__ : int="<pad>" , SCREAMING_SNAKE_CASE__ : List[str]="<::::>" , SCREAMING_SNAKE_CASE__ : Optional[Dict[str, Any]] = None , **SCREAMING_SNAKE_CASE__ : Tuple , ): '''simple docstring''' __a : Tuple = {} if sp_model_kwargs is None else sp_model_kwargs # Add extra_ids to the special token list super().__init__( bos_token=SCREAMING_SNAKE_CASE__ , eos_token=SCREAMING_SNAKE_CASE__ , unk_token=SCREAMING_SNAKE_CASE__ , pad_token=SCREAMING_SNAKE_CASE__ , sep_token=SCREAMING_SNAKE_CASE__ , sp_model_kwargs=self.sp_model_kwargs , **SCREAMING_SNAKE_CASE__ , ) __a : int = vocab_file __a : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(SCREAMING_SNAKE_CASE__ ) @property def __lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' return self.sp_model.get_piece_size() def __lowerCAmelCase ( self : int ): '''simple docstring''' __a : Dict = {self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self : Optional[Any] ): '''simple docstring''' __a : Union[str, Any] = self.__dict__.copy() __a : Any = None return state def __setstate__( self : int , SCREAMING_SNAKE_CASE__ : List[str] ): '''simple docstring''' __a : str = d # for backward compatibility if not hasattr(self , 'sp_model_kwargs' ): __a : str = {} __a : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def __lowerCAmelCase ( self : Dict , SCREAMING_SNAKE_CASE__ : str ): '''simple docstring''' return self.sp_model.encode(SCREAMING_SNAKE_CASE__ , out_type=SCREAMING_SNAKE_CASE__ ) def __lowerCAmelCase ( self : str , SCREAMING_SNAKE_CASE__ : Optional[int] ): '''simple docstring''' return self.sp_model.piece_to_id(SCREAMING_SNAKE_CASE__ ) def __lowerCAmelCase ( self : int , SCREAMING_SNAKE_CASE__ : str ): '''simple docstring''' __a : int = self.sp_model.IdToPiece(SCREAMING_SNAKE_CASE__ ) return token def __lowerCAmelCase ( self : Tuple , SCREAMING_SNAKE_CASE__ : Optional[int] ): '''simple docstring''' __a : Optional[Any] = [] __a : Optional[int] = '' for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: out_string += self.sp_model.decode(SCREAMING_SNAKE_CASE__ ) + token __a : Dict = [] else: current_sub_tokens.append(SCREAMING_SNAKE_CASE__ ) out_string += self.sp_model.decode(SCREAMING_SNAKE_CASE__ ) return out_string.strip() def __lowerCAmelCase ( self : List[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[str] = None ): '''simple docstring''' if not os.path.isdir(SCREAMING_SNAKE_CASE__ ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return __a : Tuple = os.path.join( SCREAMING_SNAKE_CASE__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE__ ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , SCREAMING_SNAKE_CASE__ ) elif not os.path.isfile(self.vocab_file ): with open(SCREAMING_SNAKE_CASE__ , 'wb' ) as fi: __a : List[str] = self.sp_model.serialized_model_proto() fi.write(SCREAMING_SNAKE_CASE__ ) return (out_vocab_file,)
47
0
from __future__ import annotations from collections.abc import Callable from typing import Generic, TypeVar lowerCamelCase : Optional[int] = TypeVar("T") lowerCamelCase : Optional[Any] = TypeVar("U") class A( Generic[T, U] ): '''simple docstring''' def __init__( self : Optional[Any] , A_ : T | None , A_ : U | None ) -> List[str]: """simple docstring""" lowerCamelCase_ = key lowerCamelCase_ = val lowerCamelCase_ = None lowerCamelCase_ = None def __repr__( self : Any ) -> Tuple: """simple docstring""" return ( f"""Node: key: {self.key}, val: {self.val}, """ f"""has next: {bool(self.next )}, has prev: {bool(self.prev )}""" ) class A( Generic[T, U] ): '''simple docstring''' def __init__( self : Tuple ) -> Any: """simple docstring""" lowerCamelCase_ = DoubleLinkedListNode(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) lowerCamelCase_ = DoubleLinkedListNode(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) lowerCamelCase_ = self.rear, self.head def __repr__( self : List[str] ) -> Dict: """simple docstring""" lowerCamelCase_ = ['DoubleLinkedList'] lowerCamelCase_ = self.head while node.next is not None: rep.append(str(SCREAMING_SNAKE_CASE__ ) ) lowerCamelCase_ = node.next rep.append(str(self.rear ) ) return ",\n ".join(SCREAMING_SNAKE_CASE__ ) def a__ ( self : int , A_ : DoubleLinkedListNode[T, U] ) -> Optional[int]: """simple docstring""" lowerCamelCase_ = self.rear.prev # All nodes other than self.head are guaranteed to have non-None previous assert previous is not None lowerCamelCase_ = node lowerCamelCase_ = previous lowerCamelCase_ = node lowerCamelCase_ = self.rear def a__ ( self : List[Any] , A_ : DoubleLinkedListNode[T, U] ) -> Union[str, Any]: """simple docstring""" if node.prev is None or node.next is None: return None lowerCamelCase_ = node.next lowerCamelCase_ = node.prev lowerCamelCase_ = None lowerCamelCase_ = None return node class A( Generic[T, U] ): '''simple docstring''' UpperCamelCase = {} def __init__( self : str , A_ : int ) -> List[Any]: """simple docstring""" lowerCamelCase_ = DoubleLinkedList() lowerCamelCase_ = capacity lowerCamelCase_ = 0 lowerCamelCase_ = 0 lowerCamelCase_ = 0 lowerCamelCase_ = {} def __repr__( self : Dict ) -> Union[str, Any]: """simple docstring""" return ( f"""CacheInfo(hits={self.hits}, misses={self.miss}, """ f"""capacity={self.capacity}, current size={self.num_keys})""" ) def __contains__( self : Tuple , A_ : T ) -> Dict: """simple docstring""" return key in self.cache def a__ ( self : Union[str, Any] , A_ : T ) -> Optional[Any]: """simple docstring""" if key in self.cache: self.hits += 1 lowerCamelCase_ = self.cache[key] lowerCamelCase_ = self.list.remove(self.cache[key] ) assert node == value_node # node is guaranteed not None because it is in self.cache assert node is not None self.list.add(SCREAMING_SNAKE_CASE__ ) return node.val self.miss += 1 return None def a__ ( self : Any , A_ : T , A_ : U ) -> Union[str, Any]: """simple docstring""" if key not in self.cache: if self.num_keys >= self.capacity: # delete first node (oldest) when over capacity lowerCamelCase_ = self.list.head.next # guaranteed to have a non-None first node when num_keys > 0 # explain to type checker via assertions assert first_node is not None assert first_node.key is not None assert ( self.list.remove(SCREAMING_SNAKE_CASE__ ) is not None ) # node guaranteed to be in list assert node.key is not None del self.cache[first_node.key] self.num_keys -= 1 lowerCamelCase_ = DoubleLinkedListNode(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) self.list.add(self.cache[key] ) self.num_keys += 1 else: # bump node to the end of the list, update value lowerCamelCase_ = self.list.remove(self.cache[key] ) assert node is not None # node guaranteed to be in list lowerCamelCase_ = value self.list.add(SCREAMING_SNAKE_CASE__ ) @classmethod def a__ ( cls : Union[str, Any] , A_ : int = 128 ) -> Dict: """simple docstring""" def cache_decorator_inner(A_ : Callable[[T], U] ) -> Callable[..., U]: def cache_decorator_wrapper(*A_ : T ) -> U: if func not in cls.decorator_function_to_instance_map: lowerCamelCase_ = LRUCache(SCREAMING_SNAKE_CASE__ ) lowerCamelCase_ = cls.decorator_function_to_instance_map[func].get(args[0] ) if result is None: lowerCamelCase_ = func(*SCREAMING_SNAKE_CASE__ ) cls.decorator_function_to_instance_map[func].put(args[0] , SCREAMING_SNAKE_CASE__ ) return result def cache_info() -> LRUCache[T, U]: return cls.decorator_function_to_instance_map[func] setattr(SCREAMING_SNAKE_CASE__ , 'cache_info' , SCREAMING_SNAKE_CASE__ ) # noqa: B010 return cache_decorator_wrapper return cache_decorator_inner if __name__ == "__main__": import doctest doctest.testmod()
70
from ..utils import DummyObject, requires_backends class _UpperCamelCase( metaclass=__lowerCamelCase ): __SCREAMING_SNAKE_CASE : Optional[Any] = ['''torch''', '''transformers''', '''onnx'''] def __init__( self : Dict , *SCREAMING_SNAKE_CASE__ : Any , **SCREAMING_SNAKE_CASE__ : Any ): '''simple docstring''' requires_backends(self , ['torch', 'transformers', 'onnx'] ) @classmethod def __lowerCAmelCase ( cls : Tuple , *SCREAMING_SNAKE_CASE__ : Optional[Any] , **SCREAMING_SNAKE_CASE__ : Optional[int] ): '''simple docstring''' requires_backends(cls , ['torch', 'transformers', 'onnx'] ) @classmethod def __lowerCAmelCase ( cls : Any , *SCREAMING_SNAKE_CASE__ : Optional[Any] , **SCREAMING_SNAKE_CASE__ : Tuple ): '''simple docstring''' requires_backends(cls , ['torch', 'transformers', 'onnx'] ) class _UpperCamelCase( metaclass=__lowerCamelCase ): __SCREAMING_SNAKE_CASE : List[Any] = ['''torch''', '''transformers''', '''onnx'''] def __init__( self : Any , *SCREAMING_SNAKE_CASE__ : int , **SCREAMING_SNAKE_CASE__ : List[Any] ): '''simple docstring''' requires_backends(self , ['torch', 'transformers', 'onnx'] ) @classmethod def __lowerCAmelCase ( cls : List[str] , *SCREAMING_SNAKE_CASE__ : Tuple , **SCREAMING_SNAKE_CASE__ : int ): '''simple docstring''' requires_backends(cls , ['torch', 'transformers', 'onnx'] ) @classmethod def __lowerCAmelCase ( cls : str , *SCREAMING_SNAKE_CASE__ : int , **SCREAMING_SNAKE_CASE__ : List[Any] ): '''simple docstring''' requires_backends(cls , ['torch', 'transformers', 'onnx'] ) class _UpperCamelCase( metaclass=__lowerCamelCase ): __SCREAMING_SNAKE_CASE : Any = ['''torch''', '''transformers''', '''onnx'''] def __init__( self : Tuple , *SCREAMING_SNAKE_CASE__ : List[str] , **SCREAMING_SNAKE_CASE__ : List[Any] ): '''simple docstring''' requires_backends(self , ['torch', 'transformers', 'onnx'] ) @classmethod def __lowerCAmelCase ( cls : str , *SCREAMING_SNAKE_CASE__ : Any , **SCREAMING_SNAKE_CASE__ : Union[str, Any] ): '''simple docstring''' requires_backends(cls , ['torch', 'transformers', 'onnx'] ) @classmethod def __lowerCAmelCase ( cls : Dict , *SCREAMING_SNAKE_CASE__ : List[Any] , **SCREAMING_SNAKE_CASE__ : int ): '''simple docstring''' requires_backends(cls , ['torch', 'transformers', 'onnx'] ) class _UpperCamelCase( metaclass=__lowerCamelCase ): __SCREAMING_SNAKE_CASE : Dict = ['''torch''', '''transformers''', '''onnx'''] def __init__( self : Any , *SCREAMING_SNAKE_CASE__ : Dict , **SCREAMING_SNAKE_CASE__ : int ): '''simple docstring''' requires_backends(self , ['torch', 'transformers', 'onnx'] ) @classmethod def __lowerCAmelCase ( cls : int , *SCREAMING_SNAKE_CASE__ : Tuple , **SCREAMING_SNAKE_CASE__ : Tuple ): '''simple docstring''' requires_backends(cls , ['torch', 'transformers', 'onnx'] ) @classmethod def __lowerCAmelCase ( cls : Any , *SCREAMING_SNAKE_CASE__ : int , **SCREAMING_SNAKE_CASE__ : str ): '''simple docstring''' requires_backends(cls , ['torch', 'transformers', 'onnx'] ) class _UpperCamelCase( metaclass=__lowerCamelCase ): __SCREAMING_SNAKE_CASE : int = ['''torch''', '''transformers''', '''onnx'''] def __init__( self : List[str] , *SCREAMING_SNAKE_CASE__ : Tuple , **SCREAMING_SNAKE_CASE__ : List[Any] ): '''simple docstring''' requires_backends(self , ['torch', 'transformers', 'onnx'] ) @classmethod def __lowerCAmelCase ( cls : int , *SCREAMING_SNAKE_CASE__ : Tuple , **SCREAMING_SNAKE_CASE__ : Union[str, Any] ): '''simple docstring''' requires_backends(cls , ['torch', 'transformers', 'onnx'] ) @classmethod def __lowerCAmelCase ( cls : Optional[Any] , *SCREAMING_SNAKE_CASE__ : List[Any] , **SCREAMING_SNAKE_CASE__ : int ): '''simple docstring''' requires_backends(cls , ['torch', 'transformers', 'onnx'] ) class _UpperCamelCase( metaclass=__lowerCamelCase ): __SCREAMING_SNAKE_CASE : Optional[int] = ['''torch''', '''transformers''', '''onnx'''] def __init__( self : Optional[int] , *SCREAMING_SNAKE_CASE__ : Dict , **SCREAMING_SNAKE_CASE__ : Any ): '''simple docstring''' requires_backends(self , ['torch', 'transformers', 'onnx'] ) @classmethod def __lowerCAmelCase ( cls : Optional[Any] , *SCREAMING_SNAKE_CASE__ : Tuple , **SCREAMING_SNAKE_CASE__ : Union[str, Any] ): '''simple docstring''' requires_backends(cls , ['torch', 'transformers', 'onnx'] ) @classmethod def __lowerCAmelCase ( cls : int , *SCREAMING_SNAKE_CASE__ : Optional[Any] , **SCREAMING_SNAKE_CASE__ : Tuple ): '''simple docstring''' requires_backends(cls , ['torch', 'transformers', 'onnx'] )
47
0
"""simple docstring""" import math def UpperCAmelCase ( a__ = 1_00 ): '''simple docstring''' lowerCAmelCase :Dict = sum(i * i for i in range(1 , n + 1 ) ) lowerCAmelCase :List[Any] = int(math.pow(sum(range(1 , n + 1 ) ) , 2 ) ) return square_of_sum - sum_of_squares if __name__ == "__main__": print(F"""{solution() = }""")
553
import math from datetime import datetime, timedelta def UpperCAmelCase__ ( lowerCamelCase_ : int ): __a : Union[str, Any] = year % 1_9 __a : int = year % 4 __a : Optional[int] = year % 7 __a : Dict = math.floor(year / 1_0_0 ) __a : Optional[Any] = math.floor((1_3 + 8 * leap_day_inhibits) / 2_5 ) __a : Union[str, Any] = leap_day_inhibits / 4 __a : str = ( 1_5 - lunar_orbit_correction + leap_day_inhibits - leap_day_reinstall_number ) % 3_0 __a : Union[str, Any] = (4 + leap_day_inhibits - leap_day_reinstall_number) % 7 # days to be added to March 21 __a : List[Any] = (1_9 * metonic_cycle + secular_moon_shift) % 3_0 # PHM -> Paschal Full Moon __a : List[Any] = ( 2 * julian_leap_year + 4 * non_leap_year + 6 * days_to_add + century_starting_point ) % 7 if days_to_add == 2_9 and days_from_phm_to_sunday == 6: return datetime(lowerCamelCase_ , 4 , 1_9 ) elif days_to_add == 2_8 and days_from_phm_to_sunday == 6: return datetime(lowerCamelCase_ , 4 , 1_8 ) else: return datetime(lowerCamelCase_ , 3 , 2_2 ) + timedelta( days=int(days_to_add + days_from_phm_to_sunday ) ) if __name__ == "__main__": for year in (1994, 2000, 2010, 2021, 2023): SCREAMING_SNAKE_CASE__ = '''will be''' if year > datetime.now().year else '''was''' print(F"Easter in {year} {tense} {gauss_easter(year)}")
47
0
'''simple docstring''' import argparse import pathlib import fairseq import torch from fairseq.models.roberta import RobertaModel as FairseqRobertaModel from fairseq.modules import TransformerSentenceEncoderLayer from packaging import version from transformers import XLMRobertaConfig, XLMRobertaXLForMaskedLM, XLMRobertaXLForSequenceClassification from transformers.models.bert.modeling_bert import ( BertIntermediate, BertLayer, BertOutput, BertSelfAttention, BertSelfOutput, ) from transformers.models.roberta.modeling_roberta import RobertaAttention from transformers.utils import logging if version.parse(fairseq.__version__) < version.parse('''1.0.0a'''): raise Exception('''requires fairseq >= 1.0.0a''') logging.set_verbosity_info() lowerCAmelCase_ : Dict = logging.get_logger(__name__) lowerCAmelCase_ : Any = '''Hello world! cécé herlolip''' def __A ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): _UpperCAmelCase : Union[str, Any] = FairseqRobertaModel.from_pretrained(lowerCamelCase_ ) roberta.eval() # disable dropout _UpperCAmelCase : Optional[int] = roberta.model.encoder.sentence_encoder _UpperCAmelCase : Any = XLMRobertaConfig( vocab_size=roberta_sent_encoder.embed_tokens.num_embeddings , hidden_size=roberta.cfg.model.encoder_embed_dim , num_hidden_layers=roberta.cfg.model.encoder_layers , num_attention_heads=roberta.cfg.model.encoder_attention_heads , intermediate_size=roberta.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=514 , type_vocab_size=1 , layer_norm_eps=1e-5 , ) if classification_head: _UpperCAmelCase : Optional[int] = roberta.model.classification_heads['mnli'].out_proj.weight.shape[0] print("""Our RoBERTa config:""" , lowerCamelCase_ ) _UpperCAmelCase : Dict = XLMRobertaXLForSequenceClassification(lowerCamelCase_ ) if classification_head else XLMRobertaXLForMaskedLM(lowerCamelCase_ ) model.eval() # Now let's copy all the weights. # Embeddings _UpperCAmelCase : Optional[int] = roberta_sent_encoder.embed_tokens.weight _UpperCAmelCase : Union[str, Any] = roberta_sent_encoder.embed_positions.weight _UpperCAmelCase : List[Any] = torch.zeros_like( model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c RoBERTa doesn't use them. _UpperCAmelCase : int = roberta_sent_encoder.layer_norm.weight _UpperCAmelCase : Union[str, Any] = roberta_sent_encoder.layer_norm.bias for i in range(config.num_hidden_layers ): # Encoder: start of layer _UpperCAmelCase : BertLayer = model.roberta.encoder.layer[i] _UpperCAmelCase : TransformerSentenceEncoderLayer = roberta_sent_encoder.layers[i] _UpperCAmelCase : RobertaAttention = layer.attention _UpperCAmelCase : Any = roberta_layer.self_attn_layer_norm.weight _UpperCAmelCase : Any = roberta_layer.self_attn_layer_norm.bias # self attention _UpperCAmelCase : BertSelfAttention = layer.attention.self assert ( roberta_layer.self_attn.k_proj.weight.data.shape == roberta_layer.self_attn.q_proj.weight.data.shape == roberta_layer.self_attn.v_proj.weight.data.shape == torch.Size((config.hidden_size, config.hidden_size) ) ) _UpperCAmelCase : int = roberta_layer.self_attn.q_proj.weight _UpperCAmelCase : Dict = roberta_layer.self_attn.q_proj.bias _UpperCAmelCase : List[Any] = roberta_layer.self_attn.k_proj.weight _UpperCAmelCase : Any = roberta_layer.self_attn.k_proj.bias _UpperCAmelCase : List[str] = roberta_layer.self_attn.v_proj.weight _UpperCAmelCase : Union[str, Any] = roberta_layer.self_attn.v_proj.bias # self-attention output _UpperCAmelCase : BertSelfOutput = layer.attention.output assert self_output.dense.weight.shape == roberta_layer.self_attn.out_proj.weight.shape _UpperCAmelCase : int = roberta_layer.self_attn.out_proj.weight _UpperCAmelCase : Union[str, Any] = roberta_layer.self_attn.out_proj.bias # this one is final layer norm _UpperCAmelCase : Tuple = roberta_layer.final_layer_norm.weight _UpperCAmelCase : Dict = roberta_layer.final_layer_norm.bias # intermediate _UpperCAmelCase : BertIntermediate = layer.intermediate assert intermediate.dense.weight.shape == roberta_layer.fca.weight.shape _UpperCAmelCase : Union[str, Any] = roberta_layer.fca.weight _UpperCAmelCase : Tuple = roberta_layer.fca.bias # output _UpperCAmelCase : BertOutput = layer.output assert bert_output.dense.weight.shape == roberta_layer.fca.weight.shape _UpperCAmelCase : int = roberta_layer.fca.weight _UpperCAmelCase : Optional[Any] = roberta_layer.fca.bias # end of layer if classification_head: _UpperCAmelCase : Union[str, Any] = roberta.model.classification_heads['mnli'].dense.weight _UpperCAmelCase : Dict = roberta.model.classification_heads['mnli'].dense.bias _UpperCAmelCase : Optional[int] = roberta.model.classification_heads['mnli'].out_proj.weight _UpperCAmelCase : Tuple = roberta.model.classification_heads['mnli'].out_proj.bias else: # LM Head _UpperCAmelCase : Optional[Any] = roberta.model.encoder.lm_head.dense.weight _UpperCAmelCase : Tuple = roberta.model.encoder.lm_head.dense.bias _UpperCAmelCase : Dict = roberta.model.encoder.lm_head.layer_norm.weight _UpperCAmelCase : Optional[Any] = roberta.model.encoder.lm_head.layer_norm.bias _UpperCAmelCase : Union[str, Any] = roberta.model.encoder.lm_head.weight _UpperCAmelCase : Optional[Any] = roberta.model.encoder.lm_head.bias # Let's check that we get the same results. _UpperCAmelCase : torch.Tensor = roberta.encode(lowerCamelCase_ ).unsqueeze(0 ) # batch of size 1 _UpperCAmelCase : Tuple = model(lowerCamelCase_ )[0] if classification_head: _UpperCAmelCase : Union[str, Any] = roberta.model.classification_heads['mnli'](roberta.extract_features(lowerCamelCase_ ) ) else: _UpperCAmelCase : int = roberta.model(lowerCamelCase_ )[0] print(our_output.shape , their_output.shape ) _UpperCAmelCase : Any = torch.max(torch.abs(our_output - their_output ) ).item() print(f"max_absolute_diff = {max_absolute_diff}" ) # ~ 1e-7 _UpperCAmelCase : Union[str, Any] = torch.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1e-3 ) print("""Do both models output the same tensors?""" , """🔥""" if success else """💩""" ) if not success: raise Exception("""Something went wRoNg""" ) pathlib.Path(lowerCamelCase_ ).mkdir(parents=lowerCamelCase_ , exist_ok=lowerCamelCase_ ) print(f"Saving model to {pytorch_dump_folder_path}" ) model.save_pretrained(lowerCamelCase_ ) if __name__ == "__main__": lowerCAmelCase_ : List[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--roberta_checkpoint_path''', default=None, type=str, required=True, help='''Path the official PyTorch dump.''' ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) parser.add_argument( '''--classification_head''', action='''store_true''', help='''Whether to convert a final classification head.''' ) lowerCAmelCase_ : Dict = parser.parse_args() convert_xlm_roberta_xl_checkpoint_to_pytorch( args.roberta_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head )
414
from typing import List, Optional, Union from ...configuration_utils import PretrainedConfig from ...utils import logging SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ = { '''huggingface/informer-tourism-monthly''': ( '''https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json''' ), # See all Informer models at https://huggingface.co/models?filter=informer } class _UpperCamelCase( __lowerCamelCase ): __SCREAMING_SNAKE_CASE : List[Any] = '''informer''' __SCREAMING_SNAKE_CASE : List[Any] = { '''hidden_size''': '''d_model''', '''num_attention_heads''': '''encoder_attention_heads''', '''num_hidden_layers''': '''encoder_layers''', } def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : str = "student_t" , SCREAMING_SNAKE_CASE__ : str = "nll" , SCREAMING_SNAKE_CASE__ : int = 1 , SCREAMING_SNAKE_CASE__ : List[int] = None , SCREAMING_SNAKE_CASE__ : Optional[Union[str, bool]] = "mean" , SCREAMING_SNAKE_CASE__ : int = 0 , SCREAMING_SNAKE_CASE__ : int = 0 , SCREAMING_SNAKE_CASE__ : int = 0 , SCREAMING_SNAKE_CASE__ : int = 0 , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None , SCREAMING_SNAKE_CASE__ : int = 6_4 , SCREAMING_SNAKE_CASE__ : int = 3_2 , SCREAMING_SNAKE_CASE__ : int = 3_2 , SCREAMING_SNAKE_CASE__ : int = 2 , SCREAMING_SNAKE_CASE__ : int = 2 , SCREAMING_SNAKE_CASE__ : int = 2 , SCREAMING_SNAKE_CASE__ : int = 2 , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : str = "gelu" , SCREAMING_SNAKE_CASE__ : float = 0.05 , SCREAMING_SNAKE_CASE__ : float = 0.1 , SCREAMING_SNAKE_CASE__ : float = 0.1 , SCREAMING_SNAKE_CASE__ : float = 0.1 , SCREAMING_SNAKE_CASE__ : float = 0.1 , SCREAMING_SNAKE_CASE__ : int = 1_0_0 , SCREAMING_SNAKE_CASE__ : float = 0.02 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=True , SCREAMING_SNAKE_CASE__ : str = "prob" , SCREAMING_SNAKE_CASE__ : int = 5 , SCREAMING_SNAKE_CASE__ : bool = True , **SCREAMING_SNAKE_CASE__ : Tuple , ): '''simple docstring''' __a : Dict = prediction_length __a : Tuple = context_length or prediction_length __a : Tuple = distribution_output __a : Tuple = loss __a : str = input_size __a : Dict = num_time_features __a : Optional[int] = lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7] __a : str = scaling __a : Tuple = num_dynamic_real_features __a : int = num_static_real_features __a : Dict = num_static_categorical_features # set cardinality if cardinality and num_static_categorical_features > 0: if len(SCREAMING_SNAKE_CASE__ ) != num_static_categorical_features: raise ValueError( 'The cardinality should be a list of the same length as `num_static_categorical_features`' ) __a : Optional[Any] = cardinality else: __a : Optional[int] = [0] # set embedding_dimension if embedding_dimension and num_static_categorical_features > 0: if len(SCREAMING_SNAKE_CASE__ ) != num_static_categorical_features: raise ValueError( 'The embedding dimension should be a list of the same length as `num_static_categorical_features`' ) __a : int = embedding_dimension else: __a : List[Any] = [min(5_0 , (cat + 1) // 2 ) for cat in self.cardinality] __a : int = num_parallel_samples # Transformer architecture configuration __a : str = input_size * len(self.lags_sequence ) + self._number_of_features __a : Optional[int] = d_model __a : Union[str, Any] = encoder_attention_heads __a : int = decoder_attention_heads __a : Any = encoder_ffn_dim __a : Union[str, Any] = decoder_ffn_dim __a : List[Any] = encoder_layers __a : Optional[int] = decoder_layers __a : int = dropout __a : Optional[Any] = attention_dropout __a : Dict = activation_dropout __a : Union[str, Any] = encoder_layerdrop __a : Optional[int] = decoder_layerdrop __a : List[str] = activation_function __a : str = init_std __a : Optional[int] = use_cache # Informer __a : Union[str, Any] = attention_type __a : str = sampling_factor __a : Dict = distil super().__init__(is_encoder_decoder=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) @property def __lowerCAmelCase ( self : Any ): '''simple docstring''' return ( sum(self.embedding_dimension ) + self.num_dynamic_real_features + self.num_time_features + self.num_static_real_features + self.input_size * 2 # the log1p(abs(loc)) and log(scale) features )
47
0
'''simple docstring''' import datasets from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py _UpperCAmelCase : str = '''\ @INPROCEEDINGS{Papineni02bleu:a, author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu}, title = {BLEU: a Method for Automatic Evaluation of Machine Translation}, booktitle = {}, year = {2002}, pages = {311--318} } @inproceedings{lin-och-2004-orange, title = "{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation", author = "Lin, Chin-Yew and Och, Franz Josef", booktitle = "{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics", month = "aug 23{--}aug 27", year = "2004", address = "Geneva, Switzerland", publisher = "COLING", url = "https://www.aclweb.org/anthology/C04-1072", pages = "501--507", } ''' _UpperCAmelCase : Dict = '''\ BLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another. Quality is considered to be the correspondence between a machine\'s output and that of a human: "the closer a machine translation is to a professional human translation, the better it is" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and remains one of the most popular automated and inexpensive metrics. Scores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations. Those scores are then averaged over the whole corpus to reach an estimate of the translation\'s overall quality. Intelligibility or grammatical correctness are not taken into account[citation needed]. BLEU\'s output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1 representing more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the reference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional reference translations will increase the BLEU score. ''' _UpperCAmelCase : int = ''' Computes BLEU score of translated segments against one or more references. Args: predictions: list of translations to score. Each translation should be tokenized into a list of tokens. references: list of lists of references for each translation. Each reference should be tokenized into a list of tokens. max_order: Maximum n-gram order to use when computing BLEU score. smooth: Whether or not to apply Lin et al. 2004 smoothing. Returns: \'bleu\': bleu score, \'precisions\': geometric mean of n-gram precisions, \'brevity_penalty\': brevity penalty, \'length_ratio\': ratio of lengths, \'translation_length\': translation_length, \'reference_length\': reference_length Examples: >>> predictions = [ ... ["hello", "there", "general", "kenobi"], # tokenized prediction of the first sample ... ["foo", "bar", "foobar"] # tokenized prediction of the second sample ... ] >>> references = [ ... [["hello", "there", "general", "kenobi"], ["hello", "there", "!"]], # tokenized references for the first sample (2 references) ... [["foo", "bar", "foobar"]] # tokenized references for the second sample (1 reference) ... ] >>> bleu = datasets.load_metric("bleu") >>> results = bleu.compute(predictions=predictions, references=references) >>> print(results["bleu"]) 1.0 ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowercase_ ( datasets.Metric ): """simple docstring""" def __UpperCAmelCase ( self : Dict ) -> List[Any]: return datasets.MetricInfo( description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features( { 'predictions': datasets.Sequence(datasets.Value('string', id='token' ), id='sequence' ), 'references': datasets.Sequence( datasets.Sequence(datasets.Value('string', id='token' ), id='sequence' ), id='references' ), } ), codebase_urls=['https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py'], reference_urls=[ 'https://en.wikipedia.org/wiki/BLEU', 'https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213', ], ) def __UpperCAmelCase ( self : Dict, UpperCamelCase__ : List[Any], UpperCamelCase__ : str, UpperCamelCase__ : Tuple=4, UpperCamelCase__ : Dict=False ) -> List[Any]: _A = compute_bleu( reference_corpus=SCREAMING_SNAKE_CASE__, translation_corpus=SCREAMING_SNAKE_CASE__, max_order=SCREAMING_SNAKE_CASE__, smooth=SCREAMING_SNAKE_CASE__ ) (_A) = score return { "bleu": bleu, "precisions": precisions, "brevity_penalty": bp, "length_ratio": ratio, "translation_length": translation_length, "reference_length": reference_length, }
107
import torch from diffusers import DDIMParallelScheduler from .test_schedulers import SchedulerCommonTest class _UpperCamelCase( __lowerCamelCase ): __SCREAMING_SNAKE_CASE : int = (DDIMParallelScheduler,) __SCREAMING_SNAKE_CASE : Union[str, Any] = (('''eta''', 0.0), ('''num_inference_steps''', 50)) def __lowerCAmelCase ( self : str , **SCREAMING_SNAKE_CASE__ : Optional[int] ): '''simple docstring''' __a : List[Any] = { 'num_train_timesteps': 1_0_0_0, 'beta_start': 0.0_001, 'beta_end': 0.02, 'beta_schedule': 'linear', 'clip_sample': True, } config.update(**SCREAMING_SNAKE_CASE__ ) return config def __lowerCAmelCase ( self : str , **SCREAMING_SNAKE_CASE__ : Optional[int] ): '''simple docstring''' __a : Tuple = self.scheduler_classes[0] __a : Optional[Any] = self.get_scheduler_config(**SCREAMING_SNAKE_CASE__ ) __a : Any = scheduler_class(**SCREAMING_SNAKE_CASE__ ) __a , __a : List[str] = 1_0, 0.0 __a : Dict = self.dummy_model() __a : str = self.dummy_sample_deter scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ ) for t in scheduler.timesteps: __a : str = model(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) __a : List[str] = scheduler.step(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).prev_sample return sample def __lowerCAmelCase ( self : Tuple ): '''simple docstring''' for timesteps in [1_0_0, 5_0_0, 1_0_0_0]: self.check_over_configs(num_train_timesteps=SCREAMING_SNAKE_CASE__ ) def __lowerCAmelCase ( self : Dict ): '''simple docstring''' for steps_offset in [0, 1]: self.check_over_configs(steps_offset=SCREAMING_SNAKE_CASE__ ) __a : Optional[Any] = self.scheduler_classes[0] __a : List[str] = self.get_scheduler_config(steps_offset=1 ) __a : Optional[Any] = scheduler_class(**SCREAMING_SNAKE_CASE__ ) scheduler.set_timesteps(5 ) assert torch.equal(scheduler.timesteps , torch.LongTensor([8_0_1, 6_0_1, 4_0_1, 2_0_1, 1] ) ) def __lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' for beta_start, beta_end in zip([0.0_001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ): self.check_over_configs(beta_start=SCREAMING_SNAKE_CASE__ , beta_end=SCREAMING_SNAKE_CASE__ ) def __lowerCAmelCase ( self : int ): '''simple docstring''' for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=SCREAMING_SNAKE_CASE__ ) def __lowerCAmelCase ( self : Optional[Any] ): '''simple docstring''' for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=SCREAMING_SNAKE_CASE__ ) def __lowerCAmelCase ( self : List[Any] ): '''simple docstring''' for clip_sample in [True, False]: self.check_over_configs(clip_sample=SCREAMING_SNAKE_CASE__ ) def __lowerCAmelCase ( self : Optional[Any] ): '''simple docstring''' for timestep_spacing in ["trailing", "leading"]: self.check_over_configs(timestep_spacing=SCREAMING_SNAKE_CASE__ ) def __lowerCAmelCase ( self : int ): '''simple docstring''' for rescale_betas_zero_snr in [True, False]: self.check_over_configs(rescale_betas_zero_snr=SCREAMING_SNAKE_CASE__ ) def __lowerCAmelCase ( self : str ): '''simple docstring''' self.check_over_configs(thresholding=SCREAMING_SNAKE_CASE__ ) for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs( thresholding=SCREAMING_SNAKE_CASE__ , prediction_type=SCREAMING_SNAKE_CASE__ , sample_max_value=SCREAMING_SNAKE_CASE__ , ) def __lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' for t in [1, 1_0, 4_9]: self.check_over_forward(time_step=SCREAMING_SNAKE_CASE__ ) def __lowerCAmelCase ( self : List[str] ): '''simple docstring''' for t, num_inference_steps in zip([1, 1_0, 5_0] , [1_0, 5_0, 5_0_0] ): self.check_over_forward(time_step=SCREAMING_SNAKE_CASE__ , num_inference_steps=SCREAMING_SNAKE_CASE__ ) def __lowerCAmelCase ( self : List[Any] ): '''simple docstring''' for t, eta in zip([1, 1_0, 4_9] , [0.0, 0.5, 1.0] ): self.check_over_forward(time_step=SCREAMING_SNAKE_CASE__ , eta=SCREAMING_SNAKE_CASE__ ) def __lowerCAmelCase ( self : List[Any] ): '''simple docstring''' __a : List[str] = self.scheduler_classes[0] __a : Union[str, Any] = self.get_scheduler_config() __a : Any = scheduler_class(**SCREAMING_SNAKE_CASE__ ) assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(4_2_0 , 4_0_0 ) - 0.14_771 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(9_8_0 , 9_6_0 ) - 0.32_460 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(4_8_7 , 4_8_6 ) - 0.00_979 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(9_9_9 , 9_9_8 ) - 0.02 ) ) < 1e-5 def __lowerCAmelCase ( self : Optional[Any] ): '''simple docstring''' __a : List[str] = self.scheduler_classes[0] __a : List[str] = self.get_scheduler_config() __a : Optional[Any] = scheduler_class(**SCREAMING_SNAKE_CASE__ ) __a , __a : Any = 1_0, 0.0 scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ ) __a : List[Any] = self.dummy_model() __a : int = self.dummy_sample_deter __a : List[Any] = self.dummy_sample_deter + 0.1 __a : List[str] = self.dummy_sample_deter - 0.1 __a : Optional[Any] = samplea.shape[0] __a : Optional[Any] = torch.stack([samplea, samplea, samplea] , dim=0 ) __a : Union[str, Any] = torch.arange(SCREAMING_SNAKE_CASE__ )[0:3, None].repeat(1 , SCREAMING_SNAKE_CASE__ ) __a : int = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) ) __a : int = scheduler.batch_step_no_noise(SCREAMING_SNAKE_CASE__ , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) , SCREAMING_SNAKE_CASE__ ) __a : Dict = torch.sum(torch.abs(SCREAMING_SNAKE_CASE__ ) ) __a : Optional[Any] = torch.mean(torch.abs(SCREAMING_SNAKE_CASE__ ) ) assert abs(result_sum.item() - 1_147.7_904 ) < 1e-2 assert abs(result_mean.item() - 0.4_982 ) < 1e-3 def __lowerCAmelCase ( self : str ): '''simple docstring''' __a : List[str] = self.full_loop() __a : Tuple = torch.sum(torch.abs(SCREAMING_SNAKE_CASE__ ) ) __a : int = torch.mean(torch.abs(SCREAMING_SNAKE_CASE__ ) ) assert abs(result_sum.item() - 172.0_067 ) < 1e-2 assert abs(result_mean.item() - 0.223_967 ) < 1e-3 def __lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' __a : Optional[int] = self.full_loop(prediction_type='v_prediction' ) __a : str = torch.sum(torch.abs(SCREAMING_SNAKE_CASE__ ) ) __a : Union[str, Any] = torch.mean(torch.abs(SCREAMING_SNAKE_CASE__ ) ) assert abs(result_sum.item() - 52.5_302 ) < 1e-2 assert abs(result_mean.item() - 0.0_684 ) < 1e-3 def __lowerCAmelCase ( self : int ): '''simple docstring''' __a : Union[str, Any] = self.full_loop(set_alpha_to_one=SCREAMING_SNAKE_CASE__ , beta_start=0.01 ) __a : Optional[int] = torch.sum(torch.abs(SCREAMING_SNAKE_CASE__ ) ) __a : Optional[int] = torch.mean(torch.abs(SCREAMING_SNAKE_CASE__ ) ) assert abs(result_sum.item() - 149.8_295 ) < 1e-2 assert abs(result_mean.item() - 0.1_951 ) < 1e-3 def __lowerCAmelCase ( self : str ): '''simple docstring''' __a : Dict = self.full_loop(set_alpha_to_one=SCREAMING_SNAKE_CASE__ , beta_start=0.01 ) __a : str = torch.sum(torch.abs(SCREAMING_SNAKE_CASE__ ) ) __a : Tuple = torch.mean(torch.abs(SCREAMING_SNAKE_CASE__ ) ) assert abs(result_sum.item() - 149.0_784 ) < 1e-2 assert abs(result_mean.item() - 0.1_941 ) < 1e-3
47
0
'''simple docstring''' import warnings from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase : List[str] = logging.get_logger(__name__) lowerCAmelCase : List[str] = { """xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/config.json""", """xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/config.json""", } class _UpperCamelCase ( __lowerCamelCase): '''simple docstring''' _snake_case = '''xlnet''' _snake_case = ['''mems'''] _snake_case = { '''n_token''': '''vocab_size''', # Backward compatibility '''hidden_size''': '''d_model''', '''num_attention_heads''': '''n_head''', '''num_hidden_layers''': '''n_layer''', } def __init__( self , a_=3_2_0_0_0 , a_=1_0_2_4 , a_=2_4 , a_=1_6 , a_=4_0_9_6 , a_="gelu" , a_=True , a_="bi" , a_=0.02 , a_=1e-1_2 , a_=0.1 , a_=5_1_2 , a_=None , a_=True , a_=False , a_=False , a_=-1 , a_=False , a_="last" , a_=True , a_="tanh" , a_=0.1 , a_=5 , a_=5 , a_=5 , a_=1 , a_=2 , **a_ , ) -> Tuple: lowercase : Tuple = vocab_size lowercase : Dict = d_model lowercase : str = n_layer lowercase : str = n_head if d_model % n_head != 0: raise ValueError(F'''\'d_model % n_head\' ({d_model % n_head}) should be equal to 0''' ) if "d_head" in kwargs: if kwargs["d_head"] != d_model // n_head: raise ValueError( F'''`d_head` ({kwargs['d_head']}) should be equal to `d_model // n_head` ({d_model // n_head})''' ) lowercase : str = d_model // n_head lowercase : str = ff_activation lowercase : Any = d_inner lowercase : Dict = untie_r lowercase : List[str] = attn_type lowercase : str = initializer_range lowercase : Tuple = layer_norm_eps lowercase : List[str] = dropout lowercase : str = mem_len lowercase : List[Any] = reuse_len lowercase : List[Any] = bi_data lowercase : str = clamp_len lowercase : List[str] = same_length lowercase : Optional[int] = summary_type lowercase : Any = summary_use_proj lowercase : List[str] = summary_activation lowercase : List[Any] = summary_last_dropout lowercase : Union[str, Any] = start_n_top lowercase : str = end_n_top lowercase : List[str] = bos_token_id lowercase : Union[str, Any] = pad_token_id lowercase : Tuple = eos_token_id if "use_cache" in kwargs: warnings.warn( "The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems_eval`" " instead." , SCREAMING_SNAKE_CASE__ , ) lowercase : Dict = kwargs['use_cache'] lowercase : List[Any] = use_mems_eval lowercase : List[Any] = use_mems_train super().__init__(pad_token_id=SCREAMING_SNAKE_CASE__ , bos_token_id=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) @property def a__ ( self ) -> str: logger.info(F'''The model {self.model_type} is one of the few models that has no sequence length limit.''' ) return -1 @max_position_embeddings.setter def a__ ( self , a_ ) -> int: raise NotImplementedError( F'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
372
def UpperCAmelCase__ ( lowerCamelCase_ : list[int] , lowerCamelCase_ : list[int] ): # Check if the input is valid if not len(lowerCamelCase_ ) == len(lowerCamelCase_ ) == 3: raise ValueError('Please enter a valid equation.' ) if equationa[0] == equationa[1] == equationa[0] == equationa[1] == 0: raise ValueError('Both a & b of two equations can\'t be zero.' ) # Extract the coefficients __a , __a , __a : Optional[Any] = equationa __a , __a , __a : Optional[int] = equationa # Calculate the determinants of the matrices __a : str = aa * ba - aa * ba __a : Tuple = ca * ba - ca * ba __a : Union[str, Any] = aa * ca - aa * ca # Check if the system of linear equations has a solution (using Cramer's rule) if determinant == 0: if determinant_x == determinant_y == 0: raise ValueError('Infinite solutions. (Consistent system)' ) else: raise ValueError('No solution. (Inconsistent system)' ) else: if determinant_x == determinant_y == 0: # Trivial solution (Inconsistent system) return (0.0, 0.0) else: __a : Any = determinant_x / determinant __a : Optional[Any] = determinant_y / determinant # Non-Trivial Solution (Consistent system) return (x, y)
47
0
"""simple docstring""" import math import torch from torch import nn from ..configuration_utils import ConfigMixin, register_to_config from .attention_processor import Attention from .embeddings import get_timestep_embedding from .modeling_utils import ModelMixin class _SCREAMING_SNAKE_CASE ( __lowerCamelCase , __lowerCamelCase ): """simple docstring""" @register_to_config def __init__( self , lowerCamelCase__ = 128 , lowerCamelCase__ = 256 , lowerCamelCase__ = 2000.0 , lowerCamelCase__ = 768 , lowerCamelCase__ = 12 , lowerCamelCase__ = 12 , lowerCamelCase__ = 64 , lowerCamelCase__ = 2048 , lowerCamelCase__ = 0.1 , ) -> int: super().__init__() lowercase__ : List[str] = nn.Sequential( nn.Linear(SCREAMING_SNAKE_CASE__ , d_model * 4 , bias=SCREAMING_SNAKE_CASE__ ) , nn.SiLU() , nn.Linear(d_model * 4 , d_model * 4 , bias=SCREAMING_SNAKE_CASE__ ) , nn.SiLU() , ) lowercase__ : Optional[Any] = nn.Embedding(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) lowercase__ : Optional[Any] = False lowercase__ : Tuple = nn.Linear(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , bias=SCREAMING_SNAKE_CASE__ ) lowercase__ : Tuple = nn.Dropout(p=SCREAMING_SNAKE_CASE__ ) lowercase__ : Union[str, Any] = nn.ModuleList() for lyr_num in range(SCREAMING_SNAKE_CASE__ ): # FiLM conditional T5 decoder lowercase__ : List[Any] = DecoderLayer(d_model=SCREAMING_SNAKE_CASE__ , d_kv=SCREAMING_SNAKE_CASE__ , num_heads=SCREAMING_SNAKE_CASE__ , d_ff=SCREAMING_SNAKE_CASE__ , dropout_rate=SCREAMING_SNAKE_CASE__ ) self.decoders.append(SCREAMING_SNAKE_CASE__ ) lowercase__ : Union[str, Any] = TaLayerNorm(SCREAMING_SNAKE_CASE__ ) lowercase__ : Any = nn.Dropout(p=SCREAMING_SNAKE_CASE__ ) lowercase__ : Union[str, Any] = nn.Linear(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , bias=SCREAMING_SNAKE_CASE__ ) def UpperCAmelCase__( self , lowerCamelCase__ , lowerCamelCase__ ) -> Union[str, Any]: lowercase__ : Any = torch.mul(query_input.unsqueeze(-1 ) , key_input.unsqueeze(-2 ) ) return mask.unsqueeze(-3 ) def UpperCAmelCase__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> str: lowercase__ : Optional[int] = decoder_input_tokens.shape assert decoder_noise_time.shape == (batch,) # decoder_noise_time is in [0, 1), so rescale to expected timing range. lowercase__ : Optional[int] = get_timestep_embedding( decoder_noise_time * self.config.max_decoder_noise_time , embedding_dim=self.config.d_model , max_period=self.config.max_decoder_noise_time , ).to(dtype=self.dtype ) lowercase__ : List[Any] = self.conditioning_emb(SCREAMING_SNAKE_CASE__ ).unsqueeze(1 ) assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4) lowercase__ : Optional[Any] = decoder_input_tokens.shape[1] # If we want to use relative positions for audio context, we can just offset # this sequence by the length of encodings_and_masks. lowercase__ : int = torch.broadcast_to( torch.arange(SCREAMING_SNAKE_CASE__ , device=decoder_input_tokens.device ) , (batch, seq_length) , ) lowercase__ : Any = self.position_encoding(SCREAMING_SNAKE_CASE__ ) lowercase__ : Any = self.continuous_inputs_projection(SCREAMING_SNAKE_CASE__ ) inputs += position_encodings lowercase__ : Union[str, Any] = self.dropout(SCREAMING_SNAKE_CASE__ ) # decoder: No padding present. lowercase__ : Optional[int] = torch.ones( decoder_input_tokens.shape[:2] , device=decoder_input_tokens.device , dtype=inputs.dtype ) # Translate encoding masks to encoder-decoder masks. lowercase__ : List[Any] = [(x, self.encoder_decoder_mask(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )) for x, y in encodings_and_masks] # cross attend style: concat encodings lowercase__ : Optional[Any] = torch.cat([x[0] for x in encodings_and_encdec_masks] , dim=1 ) lowercase__ : List[str] = torch.cat([x[1] for x in encodings_and_encdec_masks] , dim=-1 ) for lyr in self.decoders: lowercase__ : Optional[Any] = lyr( SCREAMING_SNAKE_CASE__ , conditioning_emb=SCREAMING_SNAKE_CASE__ , encoder_hidden_states=SCREAMING_SNAKE_CASE__ , encoder_attention_mask=SCREAMING_SNAKE_CASE__ , )[0] lowercase__ : Optional[int] = self.decoder_norm(SCREAMING_SNAKE_CASE__ ) lowercase__ : Optional[int] = self.post_dropout(SCREAMING_SNAKE_CASE__ ) lowercase__ : str = self.spec_out(SCREAMING_SNAKE_CASE__ ) return spec_out class _SCREAMING_SNAKE_CASE ( nn.Module ): """simple docstring""" def __init__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=1E-6 ) -> List[str]: super().__init__() lowercase__ : str = nn.ModuleList() # cond self attention: layer 0 self.layer.append( TaLayerSelfAttentionCond(d_model=SCREAMING_SNAKE_CASE__ , d_kv=SCREAMING_SNAKE_CASE__ , num_heads=SCREAMING_SNAKE_CASE__ , dropout_rate=SCREAMING_SNAKE_CASE__ ) ) # cross attention: layer 1 self.layer.append( TaLayerCrossAttention( d_model=SCREAMING_SNAKE_CASE__ , d_kv=SCREAMING_SNAKE_CASE__ , num_heads=SCREAMING_SNAKE_CASE__ , dropout_rate=SCREAMING_SNAKE_CASE__ , layer_norm_epsilon=SCREAMING_SNAKE_CASE__ , ) ) # Film Cond MLP + dropout: last layer self.layer.append( TaLayerFFCond(d_model=SCREAMING_SNAKE_CASE__ , d_ff=SCREAMING_SNAKE_CASE__ , dropout_rate=SCREAMING_SNAKE_CASE__ , layer_norm_epsilon=SCREAMING_SNAKE_CASE__ ) ) def UpperCAmelCase__( self , lowerCamelCase__ , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=None , ) -> Tuple: lowercase__ : List[str] = self.layer[0]( SCREAMING_SNAKE_CASE__ , conditioning_emb=SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , ) if encoder_hidden_states is not None: lowercase__ : List[str] = torch.where(encoder_attention_mask > 0 , 0 , -1E10 ).to( encoder_hidden_states.dtype ) lowercase__ : str = self.layer[1]( SCREAMING_SNAKE_CASE__ , key_value_states=SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , ) # Apply Film Conditional Feed Forward layer lowercase__ : List[str] = self.layer[-1](SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) return (hidden_states,) class _SCREAMING_SNAKE_CASE ( nn.Module ): """simple docstring""" def __init__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Dict: super().__init__() lowercase__ : int = TaLayerNorm(SCREAMING_SNAKE_CASE__ ) lowercase__ : str = TaFiLMLayer(in_features=d_model * 4 , out_features=SCREAMING_SNAKE_CASE__ ) lowercase__ : int = Attention(query_dim=SCREAMING_SNAKE_CASE__ , heads=SCREAMING_SNAKE_CASE__ , dim_head=SCREAMING_SNAKE_CASE__ , out_bias=SCREAMING_SNAKE_CASE__ , scale_qk=SCREAMING_SNAKE_CASE__ ) lowercase__ : int = nn.Dropout(SCREAMING_SNAKE_CASE__ ) def UpperCAmelCase__( self , lowerCamelCase__ , lowerCamelCase__=None , lowerCamelCase__=None , ) -> int: lowercase__ : List[Any] = self.layer_norm(SCREAMING_SNAKE_CASE__ ) if conditioning_emb is not None: lowercase__ : int = self.FiLMLayer(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) # Self-attention block lowercase__ : List[str] = self.attention(SCREAMING_SNAKE_CASE__ ) lowercase__ : Union[str, Any] = hidden_states + self.dropout(SCREAMING_SNAKE_CASE__ ) return hidden_states class _SCREAMING_SNAKE_CASE ( nn.Module ): """simple docstring""" def __init__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> List[str]: super().__init__() lowercase__ : int = Attention(query_dim=SCREAMING_SNAKE_CASE__ , heads=SCREAMING_SNAKE_CASE__ , dim_head=SCREAMING_SNAKE_CASE__ , out_bias=SCREAMING_SNAKE_CASE__ , scale_qk=SCREAMING_SNAKE_CASE__ ) lowercase__ : Optional[int] = TaLayerNorm(SCREAMING_SNAKE_CASE__ , eps=SCREAMING_SNAKE_CASE__ ) lowercase__ : Dict = nn.Dropout(SCREAMING_SNAKE_CASE__ ) def UpperCAmelCase__( self , lowerCamelCase__ , lowerCamelCase__=None , lowerCamelCase__=None , ) -> Optional[Any]: lowercase__ : Optional[Any] = self.layer_norm(SCREAMING_SNAKE_CASE__ ) lowercase__ : Optional[Any] = self.attention( SCREAMING_SNAKE_CASE__ , encoder_hidden_states=SCREAMING_SNAKE_CASE__ , attention_mask=attention_mask.squeeze(1 ) , ) lowercase__ : int = hidden_states + self.dropout(SCREAMING_SNAKE_CASE__ ) return layer_output class _SCREAMING_SNAKE_CASE ( nn.Module ): """simple docstring""" def __init__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Tuple: super().__init__() lowercase__ : str = TaDenseGatedActDense(d_model=SCREAMING_SNAKE_CASE__ , d_ff=SCREAMING_SNAKE_CASE__ , dropout_rate=SCREAMING_SNAKE_CASE__ ) lowercase__ : Tuple = TaFiLMLayer(in_features=d_model * 4 , out_features=SCREAMING_SNAKE_CASE__ ) lowercase__ : Union[str, Any] = TaLayerNorm(SCREAMING_SNAKE_CASE__ , eps=SCREAMING_SNAKE_CASE__ ) lowercase__ : Dict = nn.Dropout(SCREAMING_SNAKE_CASE__ ) def UpperCAmelCase__( self , lowerCamelCase__ , lowerCamelCase__=None ) -> List[Any]: lowercase__ : int = self.layer_norm(SCREAMING_SNAKE_CASE__ ) if conditioning_emb is not None: lowercase__ : Optional[int] = self.film(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) lowercase__ : str = self.DenseReluDense(SCREAMING_SNAKE_CASE__ ) lowercase__ : Dict = hidden_states + self.dropout(SCREAMING_SNAKE_CASE__ ) return hidden_states class _SCREAMING_SNAKE_CASE ( nn.Module ): """simple docstring""" def __init__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Optional[int]: super().__init__() lowercase__ : Any = nn.Linear(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , bias=SCREAMING_SNAKE_CASE__ ) lowercase__ : List[str] = nn.Linear(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , bias=SCREAMING_SNAKE_CASE__ ) lowercase__ : List[Any] = nn.Linear(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , bias=SCREAMING_SNAKE_CASE__ ) lowercase__ : Any = nn.Dropout(SCREAMING_SNAKE_CASE__ ) lowercase__ : List[Any] = NewGELUActivation() def UpperCAmelCase__( self , lowerCamelCase__ ) -> Dict: lowercase__ : List[Any] = self.act(self.wi_a(SCREAMING_SNAKE_CASE__ ) ) lowercase__ : List[str] = self.wi_a(SCREAMING_SNAKE_CASE__ ) lowercase__ : int = hidden_gelu * hidden_linear lowercase__ : str = self.dropout(SCREAMING_SNAKE_CASE__ ) lowercase__ : List[str] = self.wo(SCREAMING_SNAKE_CASE__ ) return hidden_states class _SCREAMING_SNAKE_CASE ( nn.Module ): """simple docstring""" def __init__( self , lowerCamelCase__ , lowerCamelCase__=1E-6 ) -> Dict: super().__init__() lowercase__ : int = nn.Parameter(torch.ones(SCREAMING_SNAKE_CASE__ ) ) lowercase__ : Any = eps def UpperCAmelCase__( self , lowerCamelCase__ ) -> List[str]: lowercase__ : str = hidden_states.to(torch.floataa ).pow(2 ).mean(-1 , keepdim=SCREAMING_SNAKE_CASE__ ) lowercase__ : Optional[Any] = hidden_states * torch.rsqrt(variance + self.variance_epsilon ) # convert into half-precision if necessary if self.weight.dtype in [torch.floataa, torch.bfloataa]: lowercase__ : Union[str, Any] = hidden_states.to(self.weight.dtype ) return self.weight * hidden_states class _SCREAMING_SNAKE_CASE ( nn.Module ): """simple docstring""" def UpperCAmelCase__( self , lowerCamelCase__ ) -> str: return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi ) * (input + 0.04_4715 * torch.pow(SCREAMING_SNAKE_CASE__ , 3.0 )) )) class _SCREAMING_SNAKE_CASE ( nn.Module ): """simple docstring""" def __init__( self , lowerCamelCase__ , lowerCamelCase__ ) -> Dict: super().__init__() lowercase__ : Any = nn.Linear(SCREAMING_SNAKE_CASE__ , out_features * 2 , bias=SCREAMING_SNAKE_CASE__ ) def UpperCAmelCase__( self , lowerCamelCase__ , lowerCamelCase__ ) -> Optional[int]: lowercase__ : Dict = self.scale_bias(SCREAMING_SNAKE_CASE__ ) lowercase__ : List[str] = torch.chunk(SCREAMING_SNAKE_CASE__ , 2 , -1 ) lowercase__ : List[Any] = x * (1 + scale) + shift return x
200
from ...utils import is_note_seq_available, is_transformers_available, is_torch_available from ...utils import OptionalDependencyNotAvailable try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .notes_encoder import SpectrogramNotesEncoder from .continous_encoder import SpectrogramContEncoder from .pipeline_spectrogram_diffusion import ( SpectrogramContEncoder, SpectrogramDiffusionPipeline, TaFilmDecoder, ) try: if not (is_transformers_available() and is_torch_available() and is_note_seq_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403 else: from .midi_utils import MidiProcessor
47
0
import json import os from functools import lru_cache from typing import Dict, List, Optional, Tuple, Union import regex as re from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...tokenization_utils_base import BatchEncoding, EncodedInput from ...utils import PaddingStrategy, logging __magic_name__ = logging.get_logger(__name__) __magic_name__ = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt'''} # See all LED models at https://huggingface.co/models?filter=LED __magic_name__ = { '''vocab_file''': { '''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json''', }, '''merges_file''': { '''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt''', }, '''tokenizer_file''': { '''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json''', }, } __magic_name__ = { '''allenai/led-base-16384''': 1_6_3_8_4, } @lru_cache() # Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode def __magic_name__ ( ): '''simple docstring''' lowerCamelCase_ : List[str] = ( list(range(ord("!") , ord("~") + 1)) + list(range(ord("¡") , ord("¬") + 1)) + list(range(ord("®") , ord("ÿ") + 1)) ) lowerCamelCase_ : str = bs[:] lowerCamelCase_ : List[Any] = 0 for b in range(2**8): if b not in bs: bs.append(lowerCamelCase_) cs.append(2**8 + n) n += 1 lowerCamelCase_ : Dict = [chr(lowerCamelCase_) for n in cs] return dict(zip(lowerCamelCase_ , lowerCamelCase_)) def __magic_name__ ( lowerCAmelCase_): '''simple docstring''' lowerCamelCase_ : Optional[Any] = set() lowerCamelCase_ : Optional[Any] = word[0] for char in word[1:]: pairs.add((prev_char, char)) lowerCamelCase_ : Dict = char return pairs class lowerCAmelCase__ ( __lowerCamelCase ): """simple docstring""" __UpperCAmelCase : Tuple = VOCAB_FILES_NAMES __UpperCAmelCase : Any = PRETRAINED_VOCAB_FILES_MAP __UpperCAmelCase : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __UpperCAmelCase : int = ['''input_ids''', '''attention_mask'''] def __init__( self , a_ , a_ , a_="replace" , a_="<s>" , a_="</s>" , a_="</s>" , a_="<s>" , a_="<unk>" , a_="<pad>" , a_="<mask>" , a_=False , **a_ , ): lowerCamelCase_ : Union[str, Any] = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else bos_token lowerCamelCase_ : List[Any] = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else eos_token lowerCamelCase_ : int = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else sep_token lowerCamelCase_ : Optional[int] = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else cls_token lowerCamelCase_ : Optional[int] = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else unk_token lowerCamelCase_ : Dict = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else pad_token # Mask token behave like a normal word, i.e. include the space before it lowerCamelCase_ : Tuple = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else mask_token super().__init__( errors=SCREAMING_SNAKE_CASE__ , bos_token=SCREAMING_SNAKE_CASE__ , eos_token=SCREAMING_SNAKE_CASE__ , unk_token=SCREAMING_SNAKE_CASE__ , sep_token=SCREAMING_SNAKE_CASE__ , cls_token=SCREAMING_SNAKE_CASE__ , pad_token=SCREAMING_SNAKE_CASE__ , mask_token=SCREAMING_SNAKE_CASE__ , add_prefix_space=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , ) with open(SCREAMING_SNAKE_CASE__ , encoding="utf-8" ) as vocab_handle: lowerCamelCase_ : List[Any] = json.load(SCREAMING_SNAKE_CASE__ ) lowerCamelCase_ : List[str] = {v: k for k, v in self.encoder.items()} lowerCamelCase_ : Dict = errors # how to handle errors in decoding lowerCamelCase_ : List[Any] = bytes_to_unicode() lowerCamelCase_ : List[Any] = {v: k for k, v in self.byte_encoder.items()} with open(SCREAMING_SNAKE_CASE__ , encoding="utf-8" ) as merges_handle: lowerCamelCase_ : str = merges_handle.read().split("\n" )[1:-1] lowerCamelCase_ : List[str] = [tuple(merge.split() ) for merge in bpe_merges] lowerCamelCase_ : str = dict(zip(SCREAMING_SNAKE_CASE__ , range(len(SCREAMING_SNAKE_CASE__ ) ) ) ) lowerCamelCase_ : Tuple = {} lowerCamelCase_ : Union[str, Any] = add_prefix_space # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions lowerCamelCase_ : Optional[Any] = re.compile(R"\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" ) @property # Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size def _UpperCamelCase ( self ): return len(self.encoder ) def _UpperCamelCase ( self ): return dict(self.encoder , **self.added_tokens_encoder ) def _UpperCamelCase ( self , a_ ): if token in self.cache: return self.cache[token] lowerCamelCase_ : List[Any] = tuple(SCREAMING_SNAKE_CASE__ ) lowerCamelCase_ : Tuple = get_pairs(SCREAMING_SNAKE_CASE__ ) if not pairs: return token while True: lowerCamelCase_ : Tuple = min(SCREAMING_SNAKE_CASE__ , key=lambda a_ : self.bpe_ranks.get(SCREAMING_SNAKE_CASE__ , float("inf" ) ) ) if bigram not in self.bpe_ranks: break lowerCamelCase_ : str = bigram lowerCamelCase_ : Optional[Any] = [] lowerCamelCase_ : List[str] = 0 while i < len(SCREAMING_SNAKE_CASE__ ): try: lowerCamelCase_ : str = word.index(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) lowerCamelCase_ : List[Any] = j if word[i] == first and i < len(SCREAMING_SNAKE_CASE__ ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 lowerCamelCase_ : Optional[int] = tuple(SCREAMING_SNAKE_CASE__ ) lowerCamelCase_ : Union[str, Any] = new_word if len(SCREAMING_SNAKE_CASE__ ) == 1: break else: lowerCamelCase_ : Dict = get_pairs(SCREAMING_SNAKE_CASE__ ) lowerCamelCase_ : Tuple = ' '.join(SCREAMING_SNAKE_CASE__ ) lowerCamelCase_ : List[Any] = word return word def _UpperCamelCase ( self , a_ ): lowerCamelCase_ : int = [] for token in re.findall(self.pat , SCREAMING_SNAKE_CASE__ ): lowerCamelCase_ : Optional[Any] = ''.join( self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) bpe_tokens.extend(bpe_token for bpe_token in self.bpe(SCREAMING_SNAKE_CASE__ ).split(" " ) ) return bpe_tokens def _UpperCamelCase ( self , a_ ): return self.encoder.get(SCREAMING_SNAKE_CASE__ , self.encoder.get(self.unk_token ) ) def _UpperCamelCase ( self , a_ ): return self.decoder.get(SCREAMING_SNAKE_CASE__ ) def _UpperCamelCase ( self , a_ ): lowerCamelCase_ : Optional[Any] = ''.join(SCREAMING_SNAKE_CASE__ ) lowerCamelCase_ : Optional[Any] = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors ) return text def _UpperCamelCase ( self , a_ , a_ = None ): if not os.path.isdir(SCREAMING_SNAKE_CASE__ ): logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" ) return lowerCamelCase_ : int = os.path.join( SCREAMING_SNAKE_CASE__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) lowerCamelCase_ : Optional[int] = os.path.join( SCREAMING_SNAKE_CASE__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] ) with open(SCREAMING_SNAKE_CASE__ , "w" , encoding="utf-8" ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=SCREAMING_SNAKE_CASE__ , ensure_ascii=SCREAMING_SNAKE_CASE__ ) + "\n" ) lowerCamelCase_ : List[Any] = 0 with open(SCREAMING_SNAKE_CASE__ , "w" , encoding="utf-8" ) as writer: writer.write("#version: 0.2\n" ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda a_ : kv[1] ): if index != token_index: logger.warning( F"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.""" " Please check that the tokenizer is not corrupted!" ) lowerCamelCase_ : Optional[int] = token_index writer.write(" ".join(SCREAMING_SNAKE_CASE__ ) + "\n" ) index += 1 return vocab_file, merge_file def _UpperCamelCase ( self , a_ , a_ = None ): if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] lowerCamelCase_ : Union[str, Any] = [self.cls_token_id] lowerCamelCase_ : str = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def _UpperCamelCase ( self , a_ , a_ = None , a_ = False ): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=SCREAMING_SNAKE_CASE__ , token_ids_a=SCREAMING_SNAKE_CASE__ , already_has_special_tokens=SCREAMING_SNAKE_CASE__ ) if token_ids_a is None: return [1] + ([0] * len(SCREAMING_SNAKE_CASE__ )) + [1] return [1] + ([0] * len(SCREAMING_SNAKE_CASE__ )) + [1, 1] + ([0] * len(SCREAMING_SNAKE_CASE__ )) + [1] def _UpperCamelCase ( self , a_ , a_ = None ): lowerCamelCase_ : Any = [self.sep_token_id] lowerCamelCase_ : Union[str, Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def _UpperCamelCase ( self , a_ , a_=False , **a_ ): lowerCamelCase_ : Dict = kwargs.pop("add_prefix_space" , self.add_prefix_space ) if (is_split_into_words or add_prefix_space) and (len(SCREAMING_SNAKE_CASE__ ) > 0 and not text[0].isspace()): lowerCamelCase_ : Tuple = ' ' + text return (text, kwargs) def _UpperCamelCase ( self , a_ , a_ = None , a_ = PaddingStrategy.DO_NOT_PAD , a_ = None , a_ = None , ): lowerCamelCase_ : Union[str, Any] = super()._pad( encoded_inputs=SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , padding_strategy=SCREAMING_SNAKE_CASE__ , pad_to_multiple_of=SCREAMING_SNAKE_CASE__ , return_attention_mask=SCREAMING_SNAKE_CASE__ , ) # Load from model defaults if return_attention_mask is None: lowerCamelCase_ : Tuple = 'attention_mask' in self.model_input_names if return_attention_mask and "global_attention_mask" in encoded_inputs: lowerCamelCase_ : Optional[Any] = encoded_inputs[self.model_input_names[0]] # `global_attention_mask` need to have the same length as other (sequential) inputs. lowerCamelCase_ : int = len(encoded_inputs["global_attention_mask"] ) != len(SCREAMING_SNAKE_CASE__ ) if needs_to_be_padded: lowerCamelCase_ : Any = len(SCREAMING_SNAKE_CASE__ ) - len(encoded_inputs["global_attention_mask"] ) if self.padding_side == "right": # Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend` lowerCamelCase_ : Optional[Any] = ( encoded_inputs['global_attention_mask'] + [-1] * difference ) elif self.padding_side == "left": lowerCamelCase_ : List[str] = [-1] * difference + encoded_inputs[ 'global_attention_mask' ] else: raise ValueError("Invalid padding strategy:" + str(self.padding_side ) ) return encoded_inputs
250
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available SCREAMING_SNAKE_CASE__ = { '''configuration_bridgetower''': [ '''BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BridgeTowerConfig''', '''BridgeTowerTextConfig''', '''BridgeTowerVisionConfig''', ], '''processing_bridgetower''': ['''BridgeTowerProcessor'''], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ = ['''BridgeTowerImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ = [ '''BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''BridgeTowerForContrastiveLearning''', '''BridgeTowerForImageAndTextRetrieval''', '''BridgeTowerForMaskedLM''', '''BridgeTowerModel''', '''BridgeTowerPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_bridgetower import ( BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP, BridgeTowerConfig, BridgeTowerTextConfig, BridgeTowerVisionConfig, ) from .processing_bridgetower import BridgeTowerProcessor try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_bridgetower import BridgeTowerImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_bridgetower import ( BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST, BridgeTowerForContrastiveLearning, BridgeTowerForImageAndTextRetrieval, BridgeTowerForMaskedLM, BridgeTowerModel, BridgeTowerPreTrainedModel, ) else: import sys SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
47
0
"""simple docstring""" from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging a_ = logging.get_logger(__name__) a_ = { 'roberta-base': 'https://huggingface.co/roberta-base/resolve/main/config.json', 'roberta-large': 'https://huggingface.co/roberta-large/resolve/main/config.json', 'roberta-large-mnli': 'https://huggingface.co/roberta-large-mnli/resolve/main/config.json', 'distilroberta-base': 'https://huggingface.co/distilroberta-base/resolve/main/config.json', 'roberta-base-openai-detector': 'https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json', 'roberta-large-openai-detector': 'https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json', } class UpperCAmelCase_ ( __lowerCamelCase ): UpperCamelCase ='''roberta''' def __init__( self , UpperCamelCase_=5_02_65 , UpperCamelCase_=7_68 , UpperCamelCase_=12 , UpperCamelCase_=12 , UpperCamelCase_=30_72 , UpperCamelCase_="gelu" , UpperCamelCase_=0.1 , UpperCamelCase_=0.1 , UpperCamelCase_=5_12 , UpperCamelCase_=2 , UpperCamelCase_=0.0_2 , UpperCamelCase_=1E-12 , UpperCamelCase_=1 , UpperCamelCase_=0 , UpperCamelCase_=2 , UpperCamelCase_="absolute" , UpperCamelCase_=True , UpperCamelCase_=None , **UpperCamelCase_ , ) -> Optional[Any]: super().__init__(pad_token_id=SCREAMING_SNAKE_CASE__ , bos_token_id=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) __lowercase : Optional[Any] = vocab_size __lowercase : Tuple = hidden_size __lowercase : List[str] = num_hidden_layers __lowercase : List[Any] = num_attention_heads __lowercase : str = hidden_act __lowercase : Optional[Any] = intermediate_size __lowercase : Dict = hidden_dropout_prob __lowercase : List[str] = attention_probs_dropout_prob __lowercase : Optional[Any] = max_position_embeddings __lowercase : Dict = type_vocab_size __lowercase : str = initializer_range __lowercase : List[str] = layer_norm_eps __lowercase : Optional[int] = position_embedding_type __lowercase : Union[str, Any] = use_cache __lowercase : str = classifier_dropout class UpperCAmelCase_ ( __lowerCamelCase ): @property def _lowerCamelCase ( self ) -> Optional[Any]: if self.task == "multiple-choice": __lowercase : List[str] = {0: 'batch', 1: 'choice', 2: 'sequence'} else: __lowercase : Dict = {0: 'batch', 1: 'sequence'} return OrderedDict( [ ('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ] )
76
from string import ascii_lowercase, ascii_uppercase def UpperCAmelCase__ ( lowerCamelCase_ : str ): if not sentence: return "" __a : Union[str, Any] = dict(zip(lowerCamelCase_ , lowerCamelCase_ ) ) return lower_to_upper.get(sentence[0] , sentence[0] ) + sentence[1:] if __name__ == "__main__": from doctest import testmod testmod()
47
0
"""simple docstring""" from typing import TYPE_CHECKING from ..utils import _LazyModule __lowerCAmelCase : Dict = { '''config''': [ '''EXTERNAL_DATA_FORMAT_SIZE_LIMIT''', '''OnnxConfig''', '''OnnxConfigWithPast''', '''OnnxSeq2SeqConfigWithPast''', '''PatchingSpec''', ], '''convert''': ['''export''', '''validate_model_outputs'''], '''features''': ['''FeaturesManager'''], '''utils''': ['''ParameterFormat''', '''compute_serialized_parameters_size'''], } if TYPE_CHECKING: from .config import ( EXTERNAL_DATA_FORMAT_SIZE_LIMIT, OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast, PatchingSpec, ) from .convert import export, validate_model_outputs from .features import FeaturesManager from .utils import ParameterFormat, compute_serialized_parameters_size else: import sys __lowerCAmelCase : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
58
import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ = { '''asapp/sew-d-tiny-100k''': '''https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json''', # See all SEW-D models at https://huggingface.co/models?filter=sew-d } class _UpperCamelCase( __lowerCamelCase ): __SCREAMING_SNAKE_CASE : Optional[Any] = '''sew-d''' def __init__( self : Dict , SCREAMING_SNAKE_CASE__ : Dict=3_2 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=7_6_8 , SCREAMING_SNAKE_CASE__ : Optional[Any]=1_2 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=1_2 , SCREAMING_SNAKE_CASE__ : str=3_0_7_2 , SCREAMING_SNAKE_CASE__ : str=2 , SCREAMING_SNAKE_CASE__ : Optional[Any]=5_1_2 , SCREAMING_SNAKE_CASE__ : List[str]=2_5_6 , SCREAMING_SNAKE_CASE__ : Optional[Any]=True , SCREAMING_SNAKE_CASE__ : Tuple=True , SCREAMING_SNAKE_CASE__ : List[str]=("p2c", "c2p") , SCREAMING_SNAKE_CASE__ : str="layer_norm" , SCREAMING_SNAKE_CASE__ : Tuple="gelu_python" , SCREAMING_SNAKE_CASE__ : Tuple=0.1 , SCREAMING_SNAKE_CASE__ : Optional[int]=0.1 , SCREAMING_SNAKE_CASE__ : Any=0.1 , SCREAMING_SNAKE_CASE__ : Any=0.0 , SCREAMING_SNAKE_CASE__ : str=0.1 , SCREAMING_SNAKE_CASE__ : Optional[int]=0.02 , SCREAMING_SNAKE_CASE__ : int=1e-7 , SCREAMING_SNAKE_CASE__ : Any=1e-5 , SCREAMING_SNAKE_CASE__ : Optional[int]="group" , SCREAMING_SNAKE_CASE__ : Optional[Any]="gelu" , SCREAMING_SNAKE_CASE__ : Optional[int]=(6_4, 1_2_8, 1_2_8, 1_2_8, 1_2_8, 2_5_6, 2_5_6, 2_5_6, 2_5_6, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , SCREAMING_SNAKE_CASE__ : List[Any]=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , SCREAMING_SNAKE_CASE__ : str=(1_0, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , SCREAMING_SNAKE_CASE__ : Optional[int]=False , SCREAMING_SNAKE_CASE__ : Optional[int]=1_2_8 , SCREAMING_SNAKE_CASE__ : Tuple=1_6 , SCREAMING_SNAKE_CASE__ : Any=True , SCREAMING_SNAKE_CASE__ : List[Any]=0.05 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=1_0 , SCREAMING_SNAKE_CASE__ : List[Any]=2 , SCREAMING_SNAKE_CASE__ : int=0.0 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=1_0 , SCREAMING_SNAKE_CASE__ : Optional[int]=0 , SCREAMING_SNAKE_CASE__ : Optional[int]="mean" , SCREAMING_SNAKE_CASE__ : List[Any]=False , SCREAMING_SNAKE_CASE__ : List[str]=False , SCREAMING_SNAKE_CASE__ : str=2_5_6 , SCREAMING_SNAKE_CASE__ : str=0 , SCREAMING_SNAKE_CASE__ : List[Any]=1 , SCREAMING_SNAKE_CASE__ : List[Any]=2 , **SCREAMING_SNAKE_CASE__ : Any , ): '''simple docstring''' super().__init__(**SCREAMING_SNAKE_CASE__ , pad_token_id=SCREAMING_SNAKE_CASE__ , bos_token_id=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ ) __a : Optional[int] = hidden_size __a : Optional[Any] = feat_extract_norm __a : List[str] = feat_extract_activation __a : Dict = list(SCREAMING_SNAKE_CASE__ ) __a : Union[str, Any] = list(SCREAMING_SNAKE_CASE__ ) __a : List[str] = list(SCREAMING_SNAKE_CASE__ ) __a : int = conv_bias __a : Tuple = num_conv_pos_embeddings __a : List[str] = num_conv_pos_embedding_groups __a : Optional[Any] = len(self.conv_dim ) __a : Union[str, Any] = num_hidden_layers __a : Optional[Any] = intermediate_size __a : Union[str, Any] = squeeze_factor __a : List[Any] = max_position_embeddings __a : Tuple = position_buckets __a : Optional[int] = share_att_key __a : List[str] = relative_attention __a : Any = norm_rel_ebd __a : Any = list(SCREAMING_SNAKE_CASE__ ) __a : Union[str, Any] = hidden_act __a : str = num_attention_heads __a : Union[str, Any] = hidden_dropout __a : Optional[int] = attention_dropout __a : List[str] = activation_dropout __a : int = feat_proj_dropout __a : int = final_dropout __a : Dict = layer_norm_eps __a : Tuple = feature_layer_norm_eps __a : str = initializer_range __a : Tuple = vocab_size if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( 'Configuration for convolutional layers is incorrect.' 'It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,' f'''but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)''' f'''= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 __a : Tuple = apply_spec_augment __a : Optional[Any] = mask_time_prob __a : Any = mask_time_length __a : List[str] = mask_time_min_masks __a : List[str] = mask_feature_prob __a : Tuple = mask_feature_length __a : Any = mask_feature_min_masks # ctc loss __a : Optional[int] = ctc_loss_reduction __a : List[Any] = ctc_zero_infinity # sequence classification __a : Dict = use_weighted_layer_sum __a : Optional[Any] = classifier_proj_size @property def __lowerCAmelCase ( self : Optional[Any] ): '''simple docstring''' return functools.reduce(operator.mul , self.conv_stride , 1 )
47
0
'''simple docstring''' def _UpperCamelCase ( UpperCamelCase__ = 1 , UpperCamelCase__ = 1000 ): """simple docstring""" __magic_name__ : Dict = 1 __magic_name__ : Optional[Any] = 0 for divide_by_number in range(lowerCamelCase_ , digit + 1 ): __magic_name__ : list[int] = [] __magic_name__ : List[Any] = numerator for _ in range(1 , digit + 1 ): if now_divide in has_been_divided: if longest_list_length < len(lowerCamelCase_ ): __magic_name__ : Dict = len(lowerCamelCase_ ) __magic_name__ : Optional[int] = divide_by_number else: has_been_divided.append(lowerCamelCase_ ) __magic_name__ : Optional[Any] = now_divide * 10 % divide_by_number return the_digit # Tests if __name__ == "__main__": import doctest doctest.testmod()
436
from __future__ import annotations from sys import maxsize from typing import Generic, TypeVar SCREAMING_SNAKE_CASE__ = TypeVar('''T''') def UpperCAmelCase__ ( lowerCamelCase_ : int ): return (position - 1) // 2 def UpperCAmelCase__ ( lowerCamelCase_ : int ): return (2 * position) + 1 def UpperCAmelCase__ ( lowerCamelCase_ : int ): return (2 * position) + 2 class _UpperCamelCase( Generic[T] ): def __init__( self : List[str] ): '''simple docstring''' __a : list[tuple[T, int]] = [] __a : dict[T, int] = {} __a : int = 0 def __len__( self : Any ): '''simple docstring''' return self.elements def __repr__( self : Any ): '''simple docstring''' return str(self.heap ) def __lowerCAmelCase ( self : Optional[Any] ): '''simple docstring''' return self.elements == 0 def __lowerCAmelCase ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : T , SCREAMING_SNAKE_CASE__ : int ): '''simple docstring''' self.heap.append((elem, weight) ) __a : List[Any] = self.elements self.elements += 1 self._bubble_up(SCREAMING_SNAKE_CASE__ ) def __lowerCAmelCase ( self : Tuple ): '''simple docstring''' if self.elements > 1: self._swap_nodes(0 , self.elements - 1 ) __a , __a : Union[str, Any] = self.heap.pop() del self.position_map[elem] self.elements -= 1 if self.elements > 0: __a , __a : Dict = self.heap[0] self._bubble_down(SCREAMING_SNAKE_CASE__ ) return elem def __lowerCAmelCase ( self : List[str] , SCREAMING_SNAKE_CASE__ : T , SCREAMING_SNAKE_CASE__ : int ): '''simple docstring''' __a : List[Any] = self.position_map[elem] __a : str = (elem, weight) if position > 0: __a : Tuple = get_parent_position(SCREAMING_SNAKE_CASE__ ) __a , __a : Dict = self.heap[parent_position] if parent_weight > weight: self._bubble_up(SCREAMING_SNAKE_CASE__ ) else: self._bubble_down(SCREAMING_SNAKE_CASE__ ) else: self._bubble_down(SCREAMING_SNAKE_CASE__ ) def __lowerCAmelCase ( self : Any , SCREAMING_SNAKE_CASE__ : T ): '''simple docstring''' __a : List[Any] = self.position_map[elem] if curr_pos == 0: return None __a : List[str] = get_parent_position(SCREAMING_SNAKE_CASE__ ) __a , __a : str = self.heap[curr_pos] __a , __a : Optional[int] = self.heap[parent_position] if parent_weight > weight: self._swap_nodes(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) return self._bubble_up(SCREAMING_SNAKE_CASE__ ) return None def __lowerCAmelCase ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : T ): '''simple docstring''' __a : int = self.position_map[elem] __a , __a : Optional[Any] = self.heap[curr_pos] __a : Tuple = get_child_left_position(SCREAMING_SNAKE_CASE__ ) __a : Optional[Any] = get_child_right_position(SCREAMING_SNAKE_CASE__ ) if child_left_position < self.elements and child_right_position < self.elements: __a , __a : str = self.heap[child_left_position] __a , __a : List[str] = self.heap[child_right_position] if child_right_weight < child_left_weight and child_right_weight < weight: self._swap_nodes(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) return self._bubble_down(SCREAMING_SNAKE_CASE__ ) if child_left_position < self.elements: __a , __a : Any = self.heap[child_left_position] if child_left_weight < weight: self._swap_nodes(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) return self._bubble_down(SCREAMING_SNAKE_CASE__ ) else: return None if child_right_position < self.elements: __a , __a : Union[str, Any] = self.heap[child_right_position] if child_right_weight < weight: self._swap_nodes(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) return self._bubble_down(SCREAMING_SNAKE_CASE__ ) return None def __lowerCAmelCase ( self : List[Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ): '''simple docstring''' __a : Optional[Any] = self.heap[nodea_pos][0] __a : str = self.heap[nodea_pos][0] __a , __a : int = ( self.heap[nodea_pos], self.heap[nodea_pos], ) __a : str = nodea_pos __a : Optional[int] = nodea_pos class _UpperCamelCase( Generic[T] ): def __init__( self : List[Any] ): '''simple docstring''' __a : dict[T, dict[T, int]] = {} __a : int = 0 def __repr__( self : Tuple ): '''simple docstring''' return str(self.connections ) def __len__( self : Dict ): '''simple docstring''' return self.nodes def __lowerCAmelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : T ): '''simple docstring''' if node not in self.connections: __a : Tuple = {} self.nodes += 1 def __lowerCAmelCase ( self : Any , SCREAMING_SNAKE_CASE__ : T , SCREAMING_SNAKE_CASE__ : T , SCREAMING_SNAKE_CASE__ : int ): '''simple docstring''' self.add_node(SCREAMING_SNAKE_CASE__ ) self.add_node(SCREAMING_SNAKE_CASE__ ) __a : Optional[Any] = weight __a : Any = weight def UpperCAmelCase__ ( lowerCamelCase_ : GraphUndirectedWeighted[T] , ): __a : dict[T, int] = {node: maxsize for node in graph.connections} __a : dict[T, T | None] = {node: None for node in graph.connections} __a : MinPriorityQueue[T] = MinPriorityQueue() for node, weight in dist.items(): priority_queue.push(lowerCamelCase_ , lowerCamelCase_ ) if priority_queue.is_empty(): return dist, parent # initialization __a : Optional[int] = priority_queue.extract_min() __a : int = 0 for neighbour in graph.connections[node]: if dist[neighbour] > dist[node] + graph.connections[node][neighbour]: __a : str = dist[node] + graph.connections[node][neighbour] priority_queue.update_key(lowerCamelCase_ , dist[neighbour] ) __a : Optional[int] = node # running prim's algorithm while not priority_queue.is_empty(): __a : Any = priority_queue.extract_min() for neighbour in graph.connections[node]: if dist[neighbour] > dist[node] + graph.connections[node][neighbour]: __a : Tuple = dist[node] + graph.connections[node][neighbour] priority_queue.update_key(lowerCamelCase_ , dist[neighbour] ) __a : Dict = node return dist, parent
47
0
'''simple docstring''' import os import random import sys from . import cryptomath_module as cryptomath from . import rabin_miller _lowercase : Union[str, Any] = 3 def lowerCamelCase__ ( A : int ): '''simple docstring''' print('''Generating primitive root of p''' ) while True: UpperCAmelCase = random.randrange(3 , lowerCamelCase_ ) if pow(lowerCamelCase_ , 2 , lowerCamelCase_ ) == 1: continue if pow(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) == 1: continue return g def lowerCamelCase__ ( A : int ): '''simple docstring''' print('''Generating prime p...''' ) UpperCAmelCase = rabin_miller.generate_large_prime(lowerCamelCase_ ) # select large prime number. UpperCAmelCase = primitive_root(lowerCamelCase_ ) # one primitive root on modulo p. UpperCAmelCase = random.randrange(3 , lowerCamelCase_ ) # private_key -> have to be greater than 2 for safety. UpperCAmelCase = cryptomath.find_mod_inverse(pow(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) , lowerCamelCase_ ) UpperCAmelCase = (key_size, e_a, e_a, p) UpperCAmelCase = (key_size, d) return public_key, private_key def lowerCamelCase__ ( A : str , A : int ): '''simple docstring''' if os.path.exists(f"""{name}_pubkey.txt""" ) or os.path.exists(f"""{name}_privkey.txt""" ): print('''\nWARNING:''' ) print( f"""\"{name}_pubkey.txt\" or \"{name}_privkey.txt\" already exists. \n""" '''Use a different name or delete these files and re-run this program.''' ) sys.exit() UpperCAmelCase = generate_key(lowerCamelCase_ ) print(f"""\nWriting public key to file {name}_pubkey.txt...""" ) with open(f"""{name}_pubkey.txt""" , '''w''' ) as fo: fo.write(f"""{public_key[0]},{public_key[1]},{public_key[2]},{public_key[3]}""" ) print(f"""Writing private key to file {name}_privkey.txt...""" ) with open(f"""{name}_privkey.txt""" , '''w''' ) as fo: fo.write(f"""{private_key[0]},{private_key[1]}""" ) def lowerCamelCase__ ( ): '''simple docstring''' print('''Making key files...''' ) make_key_files('''elgamal''' , 20_48 ) print('''Key files generation successful''' ) if __name__ == "__main__": main()
210
from collections.abc import Sequence from queue import Queue class _UpperCamelCase: def __init__( self : Tuple , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : int=None , SCREAMING_SNAKE_CASE__ : Tuple=None ): '''simple docstring''' __a : Tuple = start __a : Dict = end __a : List[str] = val __a : List[Any] = (start + end) // 2 __a : Optional[Any] = left __a : List[str] = right def __repr__( self : Dict ): '''simple docstring''' return f'''SegmentTreeNode(start={self.start}, end={self.end}, val={self.val})''' class _UpperCamelCase: def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : Sequence , SCREAMING_SNAKE_CASE__ : Optional[Any] ): '''simple docstring''' __a : Tuple = collection __a : Dict = function if self.collection: __a : int = self._build_tree(0 , len(SCREAMING_SNAKE_CASE__ ) - 1 ) def __lowerCAmelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : int ): '''simple docstring''' self._update_tree(self.root , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) def __lowerCAmelCase ( self : str , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[str] ): '''simple docstring''' return self._query_range(self.root , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) def __lowerCAmelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Dict ): '''simple docstring''' if start == end: return SegmentTreeNode(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , self.collection[start] ) __a : Tuple = (start + end) // 2 __a : Optional[int] = self._build_tree(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) __a : Tuple = self._build_tree(mid + 1 , SCREAMING_SNAKE_CASE__ ) return SegmentTreeNode(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , self.fn(left.val , right.val ) , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) def __lowerCAmelCase ( self : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : List[Any] ): '''simple docstring''' if node.start == i and node.end == i: __a : Optional[Any] = val return if i <= node.mid: self._update_tree(node.left , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else: self._update_tree(node.right , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) __a : int = self.fn(node.left.val , node.right.val ) def __lowerCAmelCase ( self : Any , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[Any] ): '''simple docstring''' if node.start == i and node.end == j: return node.val if i <= node.mid: if j <= node.mid: # range in left child tree return self._query_range(node.left , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else: # range in left child tree and right child tree return self.fn( self._query_range(node.left , SCREAMING_SNAKE_CASE__ , node.mid ) , self._query_range(node.right , node.mid + 1 , SCREAMING_SNAKE_CASE__ ) , ) else: # range in right child tree return self._query_range(node.right , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) def __lowerCAmelCase ( self : List[Any] ): '''simple docstring''' if self.root is not None: __a : Tuple = Queue() queue.put(self.root ) while not queue.empty(): __a : Tuple = queue.get() yield node if node.left is not None: queue.put(node.left ) if node.right is not None: queue.put(node.right ) if __name__ == "__main__": import operator for fn in [operator.add, max, min]: print('''*''' * 50) SCREAMING_SNAKE_CASE__ = SegmentTree([2, 1, 5, 3, 4], fn) for node in arr.traverse(): print(node) print() arr.update(1, 5) for node in arr.traverse(): print(node) print() print(arr.query_range(3, 4)) # 7 print(arr.query_range(2, 2)) # 5 print(arr.query_range(1, 3)) # 13 print()
47
0
import logging from pathlib import Path import numpy as np import pytorch_lightning as pl import torch from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint from pytorch_lightning.utilities import rank_zero_only from utils_rag import save_json def _SCREAMING_SNAKE_CASE ( lowercase : Optional[int] ): '''simple docstring''' lowerCamelCase_ = filter(lambda lowercase : p.requires_grad , model.parameters() ) lowerCamelCase_ = sum([np.prod(p.size() ) for p in model_parameters] ) return params lowerCamelCase : int = logging.getLogger(__name__) def _SCREAMING_SNAKE_CASE ( lowercase : Tuple , lowercase : Optional[Any] ): '''simple docstring''' if metric == "rouge2": lowerCamelCase_ = '{val_avg_rouge2:.4f}-{step_count}' elif metric == "bleu": lowerCamelCase_ = '{val_avg_bleu:.4f}-{step_count}' elif metric == "em": lowerCamelCase_ = '{val_avg_em:.4f}-{step_count}' elif metric == "loss": lowerCamelCase_ = '{val_avg_loss:.4f}-{step_count}' else: raise NotImplementedError( f"""seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this""" ' function.' ) lowerCamelCase_ = ModelCheckpoint( dirpath=lowerCamelCase_ , filename=lowerCamelCase_ , monitor=f"""val_{metric}""" , mode='max' , save_top_k=1 , every_n_epochs=1 , ) return checkpoint_callback def _SCREAMING_SNAKE_CASE ( lowercase : int , lowercase : Any ): '''simple docstring''' return EarlyStopping( monitor=f"""val_{metric}""" , mode='min' if 'loss' in metric else 'max' , patience=lowerCamelCase_ , verbose=lowerCamelCase_ , ) class A( pl.Callback ): '''simple docstring''' def a__ ( self : List[Any] , A_ : Optional[int] , A_ : Tuple ) -> Optional[Any]: """simple docstring""" lowerCamelCase_ = {f"""lr_group_{i}""": param['lr'] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )} pl_module.logger.log_metrics(SCREAMING_SNAKE_CASE__ ) @rank_zero_only def a__ ( self : str , A_ : pl.Trainer , A_ : pl.LightningModule , A_ : str , A_ : Any=True ) -> Optional[int]: """simple docstring""" logger.info(f"""***** {type_path} results at step {trainer.global_step:05d} *****""" ) lowerCamelCase_ = trainer.callback_metrics trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ['log', 'progress_bar', 'preds']} ) # Log results lowerCamelCase_ = Path(pl_module.hparams.output_dir ) if type_path == "test": lowerCamelCase_ = od / 'test_results.txt' lowerCamelCase_ = od / 'test_generations.txt' else: # this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json # If people want this it will be easy enough to add back. lowerCamelCase_ = od / f"""{type_path}_results/{trainer.global_step:05d}.txt""" lowerCamelCase_ = od / f"""{type_path}_generations/{trainer.global_step:05d}.txt""" results_file.parent.mkdir(exist_ok=SCREAMING_SNAKE_CASE__ ) generations_file.parent.mkdir(exist_ok=SCREAMING_SNAKE_CASE__ ) with open(SCREAMING_SNAKE_CASE__ , 'a+' ) as writer: for key in sorted(SCREAMING_SNAKE_CASE__ ): if key in ["log", "progress_bar", "preds"]: continue lowerCamelCase_ = metrics[key] if isinstance(SCREAMING_SNAKE_CASE__ , torch.Tensor ): lowerCamelCase_ = val.item() lowerCamelCase_ = f"""{key}: {val:.6f}\n""" writer.write(SCREAMING_SNAKE_CASE__ ) if not save_generations: return if "preds" in metrics: lowerCamelCase_ = '\n'.join(metrics['preds'] ) generations_file.open('w+' ).write(SCREAMING_SNAKE_CASE__ ) @rank_zero_only def a__ ( self : Optional[int] , A_ : List[Any] , A_ : str ) -> List[str]: """simple docstring""" try: lowerCamelCase_ = pl_module.model.model.num_parameters() except AttributeError: lowerCamelCase_ = pl_module.model.num_parameters() lowerCamelCase_ = count_trainable_parameters(SCREAMING_SNAKE_CASE__ ) # mp stands for million parameters trainer.logger.log_metrics({'n_params': npars, 'mp': npars / 1E6, 'grad_mp': n_trainable_pars / 1E6} ) @rank_zero_only def a__ ( self : int , A_ : pl.Trainer , A_ : pl.LightningModule ) -> Dict: """simple docstring""" save_json(pl_module.metrics , pl_module.metrics_save_path ) return self._write_logs(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 'test' ) @rank_zero_only def a__ ( self : Any , A_ : pl.Trainer , A_ : Dict ) -> Tuple: """simple docstring""" save_json(pl_module.metrics , pl_module.metrics_save_path ) # Uncommenting this will save val generations # return self._write_logs(trainer, pl_module, "valid")
70
import os import posixpath import uuid from dataclasses import dataclass from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union import numpy as np import pyarrow as pa import datasets from datasets.arrow_writer import ArrowWriter, ParquetWriter from datasets.config import MAX_SHARD_SIZE from datasets.filesystems import ( is_remote_filesystem, rename, ) from datasets.iterable_dataset import _BaseExamplesIterable from datasets.utils.py_utils import convert_file_size_to_int SCREAMING_SNAKE_CASE__ = datasets.utils.logging.get_logger(__name__) if TYPE_CHECKING: import pyspark @dataclass class _UpperCamelCase( datasets.BuilderConfig ): __SCREAMING_SNAKE_CASE : Optional[datasets.Features] = None def UpperCAmelCase__ ( lowerCamelCase_ : "pyspark.sql.DataFrame" , lowerCamelCase_ : List[int] , ): import pyspark def generate_fn(): __a : List[Any] = df.select('*' , pyspark.sql.functions.spark_partition_id().alias('part_id' ) ) for partition_id in partition_order: __a : Optional[int] = df_with_partition_id.select('*' ).where(f'''part_id = {partition_id}''' ).drop('part_id' ) __a : Optional[Any] = partition_df.collect() __a : Union[str, Any] = 0 for row in rows: yield f'''{partition_id}_{row_id}''', row.asDict() row_id += 1 return generate_fn class _UpperCamelCase( _BaseExamplesIterable ): def __init__( self : Any , SCREAMING_SNAKE_CASE__ : "pyspark.sql.DataFrame" , SCREAMING_SNAKE_CASE__ : Dict=None , ): '''simple docstring''' __a : List[str] = df __a : Tuple = partition_order or range(self.df.rdd.getNumPartitions() ) __a : List[Any] = _generate_iterable_examples(self.df , self.partition_order ) def __iter__( self : Tuple ): '''simple docstring''' yield from self.generate_examples_fn() def __lowerCAmelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : np.random.Generator ): '''simple docstring''' __a : Union[str, Any] = list(range(self.df.rdd.getNumPartitions() ) ) generator.shuffle(SCREAMING_SNAKE_CASE__ ) return SparkExamplesIterable(self.df , partition_order=SCREAMING_SNAKE_CASE__ ) def __lowerCAmelCase ( self : str , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ): '''simple docstring''' __a : Union[str, Any] = self.split_shard_indices_by_worker(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) return SparkExamplesIterable(self.df , partition_order=SCREAMING_SNAKE_CASE__ ) @property def __lowerCAmelCase ( self : Dict ): '''simple docstring''' return len(self.partition_order ) class _UpperCamelCase( datasets.DatasetBuilder ): __SCREAMING_SNAKE_CASE : List[str] = SparkConfig def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : "pyspark.sql.DataFrame" , SCREAMING_SNAKE_CASE__ : str = None , SCREAMING_SNAKE_CASE__ : str = None , **SCREAMING_SNAKE_CASE__ : Optional[int] , ): '''simple docstring''' import pyspark __a : int = pyspark.sql.SparkSession.builder.getOrCreate() __a : Optional[int] = df __a : List[Any] = working_dir super().__init__( cache_dir=SCREAMING_SNAKE_CASE__ , config_name=str(self.df.semanticHash() ) , **SCREAMING_SNAKE_CASE__ , ) def __lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' def create_cache_and_write_probe(SCREAMING_SNAKE_CASE__ : List[str] ): # makedirs with exist_ok will recursively create the directory. It will not throw an error if directories # already exist. os.makedirs(self._cache_dir , exist_ok=SCREAMING_SNAKE_CASE__ ) __a : List[Any] = os.path.join(self._cache_dir , 'fs_test' + uuid.uuida().hex ) # Opening the file in append mode will create a new file unless it already exists, in which case it will not # change the file contents. open(SCREAMING_SNAKE_CASE__ , 'a' ) return [probe_file] if self._spark.conf.get('spark.master' , '' ).startswith('local' ): return # If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS # accessible to the driver. # TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error. if self._cache_dir: __a : List[Any] = ( self._spark.sparkContext.parallelize(range(1 ) , 1 ).mapPartitions(SCREAMING_SNAKE_CASE__ ).collect() ) if os.path.isfile(probe[0] ): return raise ValueError( 'When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir' ) def __lowerCAmelCase ( self : List[str] ): '''simple docstring''' return datasets.DatasetInfo(features=self.config.features ) def __lowerCAmelCase ( self : int , SCREAMING_SNAKE_CASE__ : datasets.download.download_manager.DownloadManager ): '''simple docstring''' return [datasets.SplitGenerator(name=datasets.Split.TRAIN )] def __lowerCAmelCase ( self : List[str] , SCREAMING_SNAKE_CASE__ : Any ): '''simple docstring''' import pyspark def get_arrow_batch_size(SCREAMING_SNAKE_CASE__ : int ): for batch in it: yield pa.RecordBatch.from_pydict({'batch_bytes': [batch.nbytes]} ) __a : List[str] = self.df.count() __a : Dict = df_num_rows if df_num_rows <= 1_0_0 else 1_0_0 # Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample. __a : List[str] = ( self.df.limit(SCREAMING_SNAKE_CASE__ ) .repartition(1 ) .mapInArrow(SCREAMING_SNAKE_CASE__ , 'batch_bytes: long' ) .agg(pyspark.sql.functions.sum('batch_bytes' ).alias('sample_bytes' ) ) .collect()[0] .sample_bytes / sample_num_rows ) __a : Dict = approx_bytes_per_row * df_num_rows if approx_total_size > max_shard_size: # Make sure there is at least one row per partition. __a : Union[str, Any] = min(SCREAMING_SNAKE_CASE__ , int(approx_total_size / max_shard_size ) ) __a : int = self.df.repartition(SCREAMING_SNAKE_CASE__ ) def __lowerCAmelCase ( self : Tuple , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : int , ): '''simple docstring''' import pyspark __a : Any = ParquetWriter if file_format == 'parquet' else ArrowWriter __a : Union[str, Any] = os.path.join(self._working_dir , os.path.basename(SCREAMING_SNAKE_CASE__ ) ) if self._working_dir else fpath __a : Optional[int] = file_format == 'parquet' # Define these so that we don't reference self in write_arrow, which will result in a pickling error due to # pickling the SparkContext. __a : List[str] = self.config.features __a : int = self._writer_batch_size __a : Union[str, Any] = self._fs.storage_options def write_arrow(SCREAMING_SNAKE_CASE__ : Optional[int] ): # Within the same SparkContext, no two task attempts will share the same attempt ID. __a : Any = pyspark.TaskContext().taskAttemptId() __a : str = next(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) if first_batch is None: # Some partitions might not receive any data. return pa.RecordBatch.from_arrays( [[task_id], [0], [0]] , names=['task_id', 'num_examples', 'num_bytes'] , ) __a : Any = 0 __a : List[str] = writer_class( features=SCREAMING_SNAKE_CASE__ , path=working_fpath.replace('SSSSS' , f'''{shard_id:05d}''' ).replace('TTTTT' , f'''{task_id:05d}''' ) , writer_batch_size=SCREAMING_SNAKE_CASE__ , storage_options=SCREAMING_SNAKE_CASE__ , embed_local_files=SCREAMING_SNAKE_CASE__ , ) __a : Optional[Any] = pa.Table.from_batches([first_batch] ) writer.write_table(SCREAMING_SNAKE_CASE__ ) for batch in it: if max_shard_size is not None and writer._num_bytes >= max_shard_size: __a , __a : Optional[int] = writer.finalize() writer.close() yield pa.RecordBatch.from_arrays( [[task_id], [num_examples], [num_bytes]] , names=['task_id', 'num_examples', 'num_bytes'] , ) shard_id += 1 __a : Optional[Any] = writer_class( features=writer._features , path=working_fpath.replace('SSSSS' , f'''{shard_id:05d}''' ).replace('TTTTT' , f'''{task_id:05d}''' ) , writer_batch_size=SCREAMING_SNAKE_CASE__ , storage_options=SCREAMING_SNAKE_CASE__ , embed_local_files=SCREAMING_SNAKE_CASE__ , ) __a : Union[str, Any] = pa.Table.from_batches([batch] ) writer.write_table(SCREAMING_SNAKE_CASE__ ) if writer._num_bytes > 0: __a , __a : str = writer.finalize() writer.close() yield pa.RecordBatch.from_arrays( [[task_id], [num_examples], [num_bytes]] , names=['task_id', 'num_examples', 'num_bytes'] , ) if working_fpath != fpath: for file in os.listdir(os.path.dirname(SCREAMING_SNAKE_CASE__ ) ): __a : Any = os.path.join(os.path.dirname(SCREAMING_SNAKE_CASE__ ) , os.path.basename(SCREAMING_SNAKE_CASE__ ) ) shutil.move(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) __a : Dict = ( self.df.mapInArrow(SCREAMING_SNAKE_CASE__ , 'task_id: long, num_examples: long, num_bytes: long' ) .groupBy('task_id' ) .agg( pyspark.sql.functions.sum('num_examples' ).alias('total_num_examples' ) , pyspark.sql.functions.sum('num_bytes' ).alias('total_num_bytes' ) , pyspark.sql.functions.count('num_bytes' ).alias('num_shards' ) , pyspark.sql.functions.collect_list('num_examples' ).alias('shard_lengths' ) , ) .collect() ) for row in stats: yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths) def __lowerCAmelCase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : "datasets.SplitGenerator" , SCREAMING_SNAKE_CASE__ : str = "arrow" , SCREAMING_SNAKE_CASE__ : Optional[Union[str, int]] = None , SCREAMING_SNAKE_CASE__ : Optional[int] = None , **SCREAMING_SNAKE_CASE__ : Optional[Any] , ): '''simple docstring''' self._validate_cache_dir() __a : List[str] = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE ) self._repartition_df_if_needed(SCREAMING_SNAKE_CASE__ ) __a : Union[str, Any] = not is_remote_filesystem(self._fs ) __a : Optional[Any] = os.path.join if is_local else posixpath.join __a : Any = '-TTTTT-SSSSS-of-NNNNN' __a : Union[str, Any] = f'''{self.name}-{split_generator.name}{SUFFIX}.{file_format}''' __a : Any = path_join(self._output_dir , SCREAMING_SNAKE_CASE__ ) __a : Any = 0 __a : Dict = 0 __a : int = 0 __a : List[str] = [] __a : Optional[int] = [] for task_id, content in self._prepare_split_single(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): ( ( __a ) , ( __a ) , ( __a ) , ( __a ) , ) : Optional[int] = content if num_bytes > 0: total_num_examples += num_examples total_num_bytes += num_bytes total_shards += num_shards task_id_and_num_shards.append((task_id, num_shards) ) all_shard_lengths.extend(SCREAMING_SNAKE_CASE__ ) __a : List[str] = total_num_examples __a : Optional[int] = total_num_bytes # should rename everything at the end logger.debug(f'''Renaming {total_shards} shards.''' ) if total_shards > 1: __a : Any = all_shard_lengths # Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a # pickling error due to pickling the SparkContext. __a : Dict = self._fs # use the -SSSSS-of-NNNNN pattern def _rename_shard( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , ): rename( SCREAMING_SNAKE_CASE__ , fpath.replace('SSSSS' , f'''{shard_id:05d}''' ).replace('TTTTT' , f'''{task_id:05d}''' ) , fpath.replace('TTTTT-SSSSS' , f'''{global_shard_id:05d}''' ).replace('NNNNN' , f'''{total_shards:05d}''' ) , ) __a : Union[str, Any] = [] __a : List[str] = 0 for i in range(len(SCREAMING_SNAKE_CASE__ ) ): __a , __a : Union[str, Any] = task_id_and_num_shards[i] for shard_id in range(SCREAMING_SNAKE_CASE__ ): args.append([task_id, shard_id, global_shard_id] ) global_shard_id += 1 self._spark.sparkContext.parallelize(SCREAMING_SNAKE_CASE__ , len(SCREAMING_SNAKE_CASE__ ) ).map(lambda SCREAMING_SNAKE_CASE__ : _rename_shard(*SCREAMING_SNAKE_CASE__ ) ).collect() else: # don't use any pattern __a : List[Any] = 0 __a : Any = task_id_and_num_shards[0][0] self._rename( fpath.replace('SSSSS' , f'''{shard_id:05d}''' ).replace('TTTTT' , f'''{task_id:05d}''' ) , fpath.replace(SCREAMING_SNAKE_CASE__ , '' ) , ) def __lowerCAmelCase ( self : str , SCREAMING_SNAKE_CASE__ : "datasets.SplitGenerator" , ): '''simple docstring''' return SparkExamplesIterable(self.df )
47
0
"""simple docstring""" from __future__ import annotations from bisect import bisect_left from functools import total_ordering from heapq import merge @total_ordering class __UpperCamelCase ( __lowerCamelCase ): def __lt__( self : List[Any] , UpperCAmelCase : Tuple ) -> Dict: return self[-1] < other[-1] def __eq__( self : Optional[Any] , UpperCAmelCase : List[Any] ) -> Dict: return self[-1] == other[-1] def UpperCAmelCase ( a__ ): '''simple docstring''' lowerCAmelCase :list[Stack] = [] # sort into stacks for element in collection: lowerCAmelCase :Dict = Stack([element] ) lowerCAmelCase :Union[str, Any] = bisect_left(lowerCamelCase_ , lowerCamelCase_ ) if i != len(lowerCamelCase_ ): stacks[i].append(lowerCamelCase_ ) else: stacks.append(lowerCamelCase_ ) # use a heap-based merge to merge stack efficiently lowerCAmelCase :List[str] = merge(*(reversed(lowerCamelCase_ ) for stack in stacks) ) return collection if __name__ == "__main__": __SCREAMING_SNAKE_CASE = input('Enter numbers separated by a comma:\n').strip() __SCREAMING_SNAKE_CASE = [int(item) for item in user_input.split(',')] print(patience_sort(unsorted))
553
import argparse import logging import os from datetime import datetime import numpy as np import torch from torch import nn from torch.utils.data import DataLoader, RandomSampler, TensorDataset from tqdm import tqdm from transformers import GPTaLMHeadModel SCREAMING_SNAKE_CASE__ = logging.getLogger(__name__) def UpperCAmelCase__ ( lowerCamelCase_ : str , lowerCamelCase_ : int ): # save results if os.path.exists(lowerCamelCase_ ): if os.path.exists(os.path.join(lowerCamelCase_ , 'config.json' ) ) and os.path.isfile( os.path.join(lowerCamelCase_ , 'config.json' ) ): os.remove(os.path.join(lowerCamelCase_ , 'config.json' ) ) if os.path.exists(os.path.join(lowerCamelCase_ , 'pytorch_model.bin' ) ) and os.path.isfile( os.path.join(lowerCamelCase_ , 'pytorch_model.bin' ) ): os.remove(os.path.join(lowerCamelCase_ , 'pytorch_model.bin' ) ) else: os.makedirs(lowerCamelCase_ ) model.save_pretrained(lowerCamelCase_ ) def UpperCAmelCase__ ( lowerCamelCase_ : int , lowerCamelCase_ : Any=False ): __a : Dict = 2 if unlogit: __a : Optional[Any] = torch.pow(lowerCamelCase_ , lowerCamelCase_ ) __a : Any = p * torch.log(lowerCamelCase_ ) __a : Union[str, Any] = 0 return -plogp.sum(dim=-1 ) def UpperCAmelCase__ ( lowerCamelCase_ : Any ): logger.info('lv, h >\t' + '\t'.join(f'''{x + 1}''' for x in range(len(lowerCamelCase_ ) ) ) ) for row in range(len(lowerCamelCase_ ) ): if tensor.dtype != torch.long: logger.info(f'''layer {row + 1}:\t''' + '\t'.join(f'''{x:.5f}''' for x in tensor[row].cpu().data ) ) else: logger.info(f'''layer {row + 1}:\t''' + '\t'.join(f'''{x:d}''' for x in tensor[row].cpu().data ) ) def UpperCAmelCase__ ( lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Any , lowerCamelCase_ : int , lowerCamelCase_ : int=True , lowerCamelCase_ : Optional[Any]=True , lowerCamelCase_ : List[Any]=None , lowerCamelCase_ : List[Any]=False ): __a , __a : Optional[int] = model.config.num_hidden_layers, model.config.num_attention_heads __a : str = torch.zeros(lowerCamelCase_ , lowerCamelCase_ ).to(args.device ) __a : int = torch.zeros(lowerCamelCase_ , lowerCamelCase_ ).to(args.device ) if head_mask is None: __a : Union[str, Any] = torch.ones(lowerCamelCase_ , lowerCamelCase_ ).to(args.device ) head_mask.requires_grad_(requires_grad=lowerCamelCase_ ) # If actually pruned attention multi-head, set head mask to None to avoid shape mismatch if actually_pruned: __a : Any = None __a : Optional[int] = 0.0 __a : Optional[Any] = 0.0 for step, inputs in enumerate(tqdm(lowerCamelCase_ , desc='Iteration' , disable=args.local_rank not in [-1, 0] ) ): __a : Dict = tuple(t.to(args.device ) for t in inputs ) ((__a) , ) : Dict = inputs # Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below) __a : List[Any] = model(lowerCamelCase_ , labels=lowerCamelCase_ , head_mask=lowerCamelCase_ ) # (loss), lm_logits, presents, (all hidden_states), (attentions) __a , __a , __a : int = ( outputs[0], outputs[1], outputs[-1], ) # Loss and logits are the first, attention the last loss.backward() # Backpropagate to populate the gradients in the head mask total_loss += loss.detach().cpu().numpy() if compute_entropy: for layer, attn in enumerate(lowerCamelCase_ ): __a : List[str] = entropy(attn.detach() , lowerCamelCase_ ) attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach() if compute_importance: head_importance += head_mask.grad.abs().detach() tot_tokens += torch.ones_like(lowerCamelCase_ ).float().detach().sum().data # Normalize attn_entropy /= tot_tokens head_importance /= tot_tokens # Layerwise importance normalization if not args.dont_normalize_importance_by_layer: __a : Optional[Any] = 2 __a : Union[str, Any] = torch.pow(torch.pow(lowerCamelCase_ , lowerCamelCase_ ).sum(-1 ) , 1 / exponent ) head_importance /= norm_by_layer.unsqueeze(-1 ) + 1e-20 if not args.dont_normalize_global_importance: __a : List[str] = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min()) # Print matrices if compute_entropy: logger.info('Attention entropies' ) print_ad_tensor(lowerCamelCase_ ) if compute_importance: logger.info('Head importance scores' ) print_ad_tensor(lowerCamelCase_ ) logger.info('Head ranked by importance scores' ) __a : Optional[Any] = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device ) __a : str = torch.arange( head_importance.numel() , device=args.device ) __a : Tuple = head_ranks.view_as(lowerCamelCase_ ) print_ad_tensor(lowerCamelCase_ ) return attn_entropy, head_importance, total_loss def UpperCAmelCase__ ( lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Tuple , lowerCamelCase_ : int ): __a , __a , __a : Optional[int] = compute_heads_importance(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , compute_entropy=lowerCamelCase_ ) __a : Tuple = 1 / loss # instead of downsteam score use the LM loss logger.info('Pruning: original score: %f, threshold: %f' , lowerCamelCase_ , original_score * args.masking_threshold ) __a : Tuple = torch.ones_like(lowerCamelCase_ ) __a : int = max(1 , int(new_head_mask.numel() * args.masking_amount ) ) __a : Tuple = original_score while current_score >= original_score * args.masking_threshold: __a : Optional[Any] = new_head_mask.clone().detach() # save current head mask # heads from least important to most - keep only not-masked heads __a : List[str] = float('Inf' ) __a : List[Any] = head_importance.view(-1 ).sort()[1] if len(lowerCamelCase_ ) <= num_to_mask: print('BREAK BY num_to_mask' ) break # mask heads __a : Any = current_heads_to_mask[:num_to_mask] logger.info('Heads to mask: %s' , str(current_heads_to_mask.tolist() ) ) __a : int = new_head_mask.view(-1 ) __a : Tuple = 0.0 __a : int = new_head_mask.view_as(lowerCamelCase_ ) __a : Optional[int] = new_head_mask.clone().detach() print_ad_tensor(lowerCamelCase_ ) # Compute metric and head importance again __a , __a , __a : int = compute_heads_importance( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , compute_entropy=lowerCamelCase_ , head_mask=lowerCamelCase_ ) __a : List[Any] = 1 / loss logger.info( 'Masking: current score: %f, remaining heads %d (%.1f percents)' , lowerCamelCase_ , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 1_0_0 , ) logger.info('Final head mask' ) print_ad_tensor(lowerCamelCase_ ) np.save(os.path.join(args.output_dir , 'head_mask.npy' ) , head_mask.detach().cpu().numpy() ) return head_mask def UpperCAmelCase__ ( lowerCamelCase_ : Dict , lowerCamelCase_ : Tuple , lowerCamelCase_ : Any , lowerCamelCase_ : Union[str, Any] ): __a : List[Any] = datetime.now() __a , __a , __a : List[str] = compute_heads_importance( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , compute_entropy=lowerCamelCase_ , compute_importance=lowerCamelCase_ , head_mask=lowerCamelCase_ ) __a : List[str] = 1 / loss __a : List[Any] = datetime.now() - before_time __a : List[str] = sum(p.numel() for p in model.parameters() ) __a : Dict = { layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(lowerCamelCase_ ) ) } for k, v in heads_to_prune.items(): if isinstance(lowerCamelCase_ , lowerCamelCase_ ): __a : Tuple = [ v, ] assert sum(len(lowerCamelCase_ ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item() model.prune_heads(lowerCamelCase_ ) __a : Optional[Any] = sum(p.numel() for p in model.parameters() ) __a : Tuple = datetime.now() __a , __a , __a : Tuple = compute_heads_importance( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , compute_entropy=lowerCamelCase_ , compute_importance=lowerCamelCase_ , head_mask=lowerCamelCase_ , actually_pruned=lowerCamelCase_ , ) __a : Optional[Any] = 1 / loss __a : List[Any] = datetime.now() - before_time logger.info( 'Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)' , lowerCamelCase_ , lowerCamelCase_ , pruned_num_params / original_num_params * 1_0_0 , ) logger.info('Pruning: score with masking: %f score with pruning: %f' , lowerCamelCase_ , lowerCamelCase_ ) logger.info('Pruning: speed ratio (original timing / new timing): %f percents' , original_time / new_time * 1_0_0 ) save_model(lowerCamelCase_ , args.output_dir ) def UpperCAmelCase__ ( ): __a : List[str] = argparse.ArgumentParser() # Required parameters parser.add_argument( '--data_dir' , default=lowerCamelCase_ , type=lowerCamelCase_ , required=lowerCamelCase_ , help='The input data dir. Should contain the .tsv files (or other data files) for the task.' , ) parser.add_argument( '--model_name_or_path' , default=lowerCamelCase_ , type=lowerCamelCase_ , required=lowerCamelCase_ , help='Path to pretrained model or model identifier from huggingface.co/models' , ) parser.add_argument( '--output_dir' , default=lowerCamelCase_ , type=lowerCamelCase_ , required=lowerCamelCase_ , help='The output directory where the model predictions and checkpoints will be written.' , ) # Other parameters parser.add_argument( '--config_name' , default='' , type=lowerCamelCase_ , help='Pretrained config name or path if not the same as model_name_or_path' , ) parser.add_argument( '--tokenizer_name' , default='' , type=lowerCamelCase_ , help='Pretrained tokenizer name or path if not the same as model_name_or_path' , ) parser.add_argument( '--cache_dir' , default=lowerCamelCase_ , type=lowerCamelCase_ , help='Where do you want to store the pre-trained models downloaded from s3' , ) parser.add_argument( '--data_subset' , type=lowerCamelCase_ , default=-1 , help='If > 0: limit the data to a subset of data_subset instances.' ) parser.add_argument( '--overwrite_output_dir' , action='store_true' , help='Whether to overwrite data in output directory' ) parser.add_argument( '--overwrite_cache' , action='store_true' , help='Overwrite the cached training and evaluation sets' ) parser.add_argument( '--dont_normalize_importance_by_layer' , action='store_true' , help='Don\'t normalize importance score by layers' ) parser.add_argument( '--dont_normalize_global_importance' , action='store_true' , help='Don\'t normalize all importance scores between 0 and 1' , ) parser.add_argument( '--try_masking' , action='store_true' , help='Whether to try to mask head until a threshold of accuracy.' ) parser.add_argument( '--masking_threshold' , default=0.9 , type=lowerCamelCase_ , help='masking threshold in term of metrics (stop masking when metric < threshold * original metric value).' , ) parser.add_argument( '--masking_amount' , default=0.1 , type=lowerCamelCase_ , help='Amount to heads to masking at each masking step.' ) parser.add_argument('--metric_name' , default='acc' , type=lowerCamelCase_ , help='Metric to use for head masking.' ) parser.add_argument( '--max_seq_length' , default=1_2_8 , type=lowerCamelCase_ , help=( 'The maximum total input sequence length after WordPiece tokenization. \n' 'Sequences longer than this will be truncated, sequences shorter padded.' ) , ) parser.add_argument('--batch_size' , default=1 , type=lowerCamelCase_ , help='Batch size.' ) parser.add_argument('--seed' , type=lowerCamelCase_ , default=4_2 ) parser.add_argument('--local_rank' , type=lowerCamelCase_ , default=-1 , help='local_rank for distributed training on gpus' ) parser.add_argument('--no_cuda' , action='store_true' , help='Whether not to use CUDA when available' ) parser.add_argument('--server_ip' , type=lowerCamelCase_ , default='' , help='Can be used for distant debugging.' ) parser.add_argument('--server_port' , type=lowerCamelCase_ , default='' , help='Can be used for distant debugging.' ) __a : Optional[Any] = parser.parse_args() if args.server_ip and args.server_port: # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script import ptvsd print('Waiting for debugger attach' ) ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=lowerCamelCase_ ) ptvsd.wait_for_attach() # Setup devices and distributed training if args.local_rank == -1 or args.no_cuda: __a : List[str] = torch.device('cuda' if torch.cuda.is_available() and not args.no_cuda else 'cpu' ) __a : Tuple = 0 if args.no_cuda else torch.cuda.device_count() else: torch.cuda.set_device(args.local_rank ) __a : Union[str, Any] = torch.device('cuda' , args.local_rank ) __a : Any = 1 torch.distributed.init_process_group(backend='nccl' ) # Initializes the distributed backend # Setup logging logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN ) logger.info('device: {} n_gpu: {}, distributed: {}'.format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) ) __a : Optional[Any] = GPTaLMHeadModel.from_pretrained(args.model_name_or_path ) # Distributed and parallel training model.to(args.device ) if args.local_rank != -1: __a : List[Any] = nn.parallel.DistributedDataParallel( lowerCamelCase_ , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=lowerCamelCase_ ) elif args.n_gpu > 1: __a : Union[str, Any] = nn.DataParallel(lowerCamelCase_ ) # Print/save training arguments os.makedirs(args.output_dir , exist_ok=lowerCamelCase_ ) torch.save(lowerCamelCase_ , os.path.join(args.output_dir , 'run_args.bin' ) ) logger.info('Training/evaluation parameters %s' , lowerCamelCase_ ) # Prepare dataset __a : Tuple = np.concatenate( [ np.loadtxt(args.data_dir , dtype=np.intaa ), ] ) __a : str = (torch.from_numpy(lowerCamelCase_ ),) __a : List[str] = TensorDataset(*lowerCamelCase_ ) __a : Optional[Any] = RandomSampler(lowerCamelCase_ ) __a : Union[str, Any] = DataLoader(lowerCamelCase_ , sampler=lowerCamelCase_ , batch_size=args.batch_size ) # Compute head entropy and importance score compute_heads_importance(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) # Try head masking (set heads to zero until the score goes under a threshole) # and head pruning (remove masked heads and see the effect on the network) if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0: __a : Union[str, Any] = mask_heads(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) prune_heads(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) if __name__ == "__main__": main()
47
0
'''simple docstring''' import math import tensorflow as tf from packaging import version def __A ( lowerCAmelCase_ ): _UpperCAmelCase : Any = tf.convert_to_tensor(lowerCamelCase_ ) _UpperCAmelCase : Any = 0.5 * (1.0 + tf.math.erf(x / tf.cast(tf.sqrt(2.0 ) , x.dtype ) )) return x * cdf def __A ( lowerCAmelCase_ ): _UpperCAmelCase : Optional[Any] = tf.convert_to_tensor(lowerCamelCase_ ) _UpperCAmelCase : Union[str, Any] = tf.cast(math.pi , x.dtype ) _UpperCAmelCase : str = tf.cast(0.04_4715 , x.dtype ) _UpperCAmelCase : int = 0.5 * (1.0 + tf.tanh(tf.sqrt(2.0 / pi ) * (x + coeff * tf.pow(lowerCamelCase_ , 3 )) )) return x * cdf def __A ( lowerCAmelCase_ ): _UpperCAmelCase : Union[str, Any] = tf.convert_to_tensor(lowerCamelCase_ ) return x * tf.tanh(tf.math.softplus(lowerCamelCase_ ) ) def __A ( lowerCAmelCase_ ): _UpperCAmelCase : List[str] = tf.convert_to_tensor(lowerCamelCase_ ) _UpperCAmelCase : int = tf.cast(0.04_4715 , x.dtype ) _UpperCAmelCase : List[Any] = tf.cast(0.79_7884_5608 , x.dtype ) return 0.5 * x * (1.0 + tf.tanh(x * coeffa * (1.0 + coeffa * x * x) )) def __A ( lowerCAmelCase_ ): _UpperCAmelCase : Dict = tf.convert_to_tensor(lowerCamelCase_ ) _UpperCAmelCase : int = tf.cast(1.702 , x.dtype ) return x * tf.math.sigmoid(coeff * x ) def __A ( lowerCAmelCase_ ): return tf.clip_by_value(_gelu(lowerCamelCase_ ) , -10 , 10 ) def __A ( lowerCAmelCase_ , lowerCAmelCase_=-1 ): _UpperCAmelCase : Optional[Any] = tf.split(lowerCamelCase_ , 2 , axis=lowerCamelCase_ ) return a * tf.math.sigmoid(lowerCamelCase_ ) if version.parse(tf.version.VERSION) >= version.parse('''2.4'''): def __A ( lowerCAmelCase_ ): return tf.keras.activations.gelu(lowerCamelCase_ , approximate=lowerCamelCase_ ) lowerCAmelCase_ : Union[str, Any] = tf.keras.activations.gelu lowerCAmelCase_ : Union[str, Any] = approximate_gelu_wrap else: lowerCAmelCase_ : str = _gelu lowerCAmelCase_ : Dict = _gelu_new lowerCAmelCase_ : List[str] = { '''gelu''': gelu, '''gelu_10''': gelu_aa, '''gelu_fast''': gelu_fast, '''gelu_new''': gelu_new, '''glu''': glu, '''mish''': mish, '''quick_gelu''': quick_gelu, '''relu''': tf.keras.activations.relu, '''sigmoid''': tf.keras.activations.sigmoid, '''silu''': tf.keras.activations.swish, '''swish''': tf.keras.activations.swish, '''tanh''': tf.keras.activations.tanh, } def __A ( lowerCAmelCase_ ): if activation_string in ACTaFN: return ACTaFN[activation_string] else: raise KeyError(f"function {activation_string} not found in ACT2FN mapping {list(ACTaFN.keys() )}" )
414
import argparse import os import gluonnlp as nlp import mxnet as mx import numpy as np import torch from gluonnlp.base import get_home_dir from gluonnlp.model.bert import BERTEncoder from gluonnlp.model.utils import _load_vocab from gluonnlp.vocab import Vocab from packaging import version from torch import nn from transformers import BertConfig, BertForMaskedLM, BertModel, RobertaTokenizer from transformers.models.bert.modeling_bert import ( BertIntermediate, BertLayer, BertOutput, BertSelfAttention, BertSelfOutput, ) from transformers.utils import logging if version.parse(nlp.__version__) != version.parse('''0.8.3'''): raise Exception('''requires gluonnlp == 0.8.3''') if version.parse(mx.__version__) != version.parse('''1.5.0'''): raise Exception('''requires mxnet == 1.5.0''') logging.set_verbosity_info() SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ = '''The Nymphenburg Palace is a beautiful palace in Munich!''' def UpperCAmelCase__ ( lowerCamelCase_ : str , lowerCamelCase_ : str ): __a : List[Any] = { 'attention_cell': 'multi_head', 'num_layers': 4, 'units': 1_0_2_4, 'hidden_size': 7_6_8, 'max_length': 5_1_2, 'num_heads': 8, 'scaled': True, 'dropout': 0.1, 'use_residual': True, 'embed_size': 1_0_2_4, 'embed_dropout': 0.1, 'word_embed': None, 'layer_norm_eps': 1e-5, 'token_type_vocab_size': 2, } __a : Optional[int] = bort_4_8_768_1024_hparams # Let's construct the original Bort model here # Taken from official BERT implementation, see: # https://github.com/alexa/bort/blob/master/bort/bort.py __a : List[str] = BERTEncoder( attention_cell=predefined_args['attention_cell'] , num_layers=predefined_args['num_layers'] , units=predefined_args['units'] , hidden_size=predefined_args['hidden_size'] , max_length=predefined_args['max_length'] , num_heads=predefined_args['num_heads'] , scaled=predefined_args['scaled'] , dropout=predefined_args['dropout'] , output_attention=lowerCamelCase_ , output_all_encodings=lowerCamelCase_ , use_residual=predefined_args['use_residual'] , activation=predefined_args.get('activation' , 'gelu' ) , layer_norm_eps=predefined_args.get('layer_norm_eps' , lowerCamelCase_ ) , ) # Vocab information needs to be fetched first # It's the same as RoBERTa, so RobertaTokenizer can be used later __a : int = 'openwebtext_ccnews_stories_books_cased' # Specify download folder to Gluonnlp's vocab __a : Optional[Any] = os.path.join(get_home_dir() , 'models' ) __a : Optional[Any] = _load_vocab(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , cls=lowerCamelCase_ ) __a : Any = nlp.model.BERTModel( lowerCamelCase_ , len(lowerCamelCase_ ) , units=predefined_args['units'] , embed_size=predefined_args['embed_size'] , embed_dropout=predefined_args['embed_dropout'] , word_embed=predefined_args['word_embed'] , use_pooler=lowerCamelCase_ , use_token_type_embed=lowerCamelCase_ , token_type_vocab_size=predefined_args['token_type_vocab_size'] , use_classifier=lowerCamelCase_ , use_decoder=lowerCamelCase_ , ) original_bort.load_parameters(lowerCamelCase_ , cast_dtype=lowerCamelCase_ , ignore_extra=lowerCamelCase_ ) __a : Dict = original_bort._collect_params_with_prefix() # Build our config 🤗 __a : Optional[Any] = { 'architectures': ['BertForMaskedLM'], 'attention_probs_dropout_prob': predefined_args['dropout'], 'hidden_act': 'gelu', 'hidden_dropout_prob': predefined_args['dropout'], 'hidden_size': predefined_args['embed_size'], 'initializer_range': 0.02, 'intermediate_size': predefined_args['hidden_size'], 'layer_norm_eps': predefined_args['layer_norm_eps'], 'max_position_embeddings': predefined_args['max_length'], 'model_type': 'bort', 'num_attention_heads': predefined_args['num_heads'], 'num_hidden_layers': predefined_args['num_layers'], 'pad_token_id': 1, # 2 = BERT, 1 = RoBERTa 'type_vocab_size': 1, # 2 = BERT, 1 = RoBERTa 'vocab_size': len(lowerCamelCase_ ), } __a : str = BertConfig.from_dict(lowerCamelCase_ ) __a : Optional[int] = BertForMaskedLM(lowerCamelCase_ ) hf_bort_model.eval() # Parameter mapping table (Gluonnlp to Transformers) # * denotes layer index # # | Gluon Parameter | Transformers Parameter # | -------------------------------------------------------------- | ---------------------- # | `encoder.layer_norm.beta` | `bert.embeddings.LayerNorm.bias` # | `encoder.layer_norm.gamma` | `bert.embeddings.LayerNorm.weight` # | `encoder.position_weight` | `bert.embeddings.position_embeddings.weight` # | `word_embed.0.weight` | `bert.embeddings.word_embeddings.weight` # | `encoder.transformer_cells.*.attention_cell.proj_key.bias` | `bert.encoder.layer.*.attention.self.key.bias` # | `encoder.transformer_cells.*.attention_cell.proj_key.weight` | `bert.encoder.layer.*.attention.self.key.weight` # | `encoder.transformer_cells.*.attention_cell.proj_query.bias` | `bert.encoder.layer.*.attention.self.query.bias` # | `encoder.transformer_cells.*.attention_cell.proj_query.weight` | `bert.encoder.layer.*.attention.self.query.weight` # | `encoder.transformer_cells.*.attention_cell.proj_value.bias` | `bert.encoder.layer.*.attention.self.value.bias` # | `encoder.transformer_cells.*.attention_cell.proj_value.weight` | `bert.encoder.layer.*.attention.self.value.weight` # | `encoder.transformer_cells.*.ffn.ffn_2.bias` | `bert.encoder.layer.*.attention.output.dense.bias` # | `encoder.transformer_cells.*.ffn.ffn_2.weight` | `bert.encoder.layer.*.attention.output.dense.weight` # | `encoder.transformer_cells.*.layer_norm.beta` | `bert.encoder.layer.*.attention.output.LayerNorm.bias` # | `encoder.transformer_cells.*.layer_norm.gamma` | `bert.encoder.layer.*.attention.output.LayerNorm.weight` # | `encoder.transformer_cells.*.ffn.ffn_1.bias` | `bert.encoder.layer.*.intermediate.dense.bias` # | `encoder.transformer_cells.*.ffn.ffn_1.weight` | `bert.encoder.layer.*.intermediate.dense.weight` # | `encoder.transformer_cells.*.ffn.layer_norm.beta` | `bert.encoder.layer.*.output.LayerNorm.bias` # | `encoder.transformer_cells.*.ffn.layer_norm.gamma` | `bert.encoder.layer.*.output.LayerNorm.weight` # | `encoder.transformer_cells.*.proj.bias` | `bert.encoder.layer.*.output.dense.bias` # | `encoder.transformer_cells.*.proj.weight` | `bert.encoder.layer.*.output.dense.weight` # Helper function to convert MXNET Arrays to PyTorch def to_torch(lowerCamelCase_ : Optional[Any] ) -> nn.Parameter: return nn.Parameter(torch.FloatTensor(mx_array.data().asnumpy() ) ) # Check param shapes and map new HF param back def check_and_map_params(lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : List[str] ): __a : Optional[int] = hf_param.shape __a : int = to_torch(params[gluon_param] ) __a : int = gluon_param.shape assert ( shape_hf == shape_gluon ), f'''The gluon parameter {gluon_param} has shape {shape_gluon}, but expects shape {shape_hf} for Transformers''' return gluon_param __a : str = check_and_map_params( hf_bort_model.bert.embeddings.word_embeddings.weight , 'word_embed.0.weight' ) __a : str = check_and_map_params( hf_bort_model.bert.embeddings.position_embeddings.weight , 'encoder.position_weight' ) __a : Tuple = check_and_map_params( hf_bort_model.bert.embeddings.LayerNorm.bias , 'encoder.layer_norm.beta' ) __a : Union[str, Any] = check_and_map_params( hf_bort_model.bert.embeddings.LayerNorm.weight , 'encoder.layer_norm.gamma' ) # Inspired by RoBERTa conversion script, we just zero them out (Bort does not use them) __a : Union[str, Any] = torch.zeros_like( hf_bort_model.bert.embeddings.token_type_embeddings.weight.data ) for i in range(hf_bort_config.num_hidden_layers ): __a : BertLayer = hf_bort_model.bert.encoder.layer[i] # self attention __a : BertSelfAttention = layer.attention.self __a : Optional[int] = check_and_map_params( self_attn.key.bias.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_key.bias''' ) __a : str = check_and_map_params( self_attn.key.weight.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_key.weight''' ) __a : List[str] = check_and_map_params( self_attn.query.bias.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_query.bias''' ) __a : str = check_and_map_params( self_attn.query.weight.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_query.weight''' ) __a : Dict = check_and_map_params( self_attn.value.bias.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_value.bias''' ) __a : str = check_and_map_params( self_attn.value.weight.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_value.weight''' ) # self attention output __a : BertSelfOutput = layer.attention.output __a : Tuple = check_and_map_params( self_output.dense.bias , f'''encoder.transformer_cells.{i}.proj.bias''' ) __a : Dict = check_and_map_params( self_output.dense.weight , f'''encoder.transformer_cells.{i}.proj.weight''' ) __a : Optional[Any] = check_and_map_params( self_output.LayerNorm.bias , f'''encoder.transformer_cells.{i}.layer_norm.beta''' ) __a : Optional[Any] = check_and_map_params( self_output.LayerNorm.weight , f'''encoder.transformer_cells.{i}.layer_norm.gamma''' ) # intermediate __a : BertIntermediate = layer.intermediate __a : List[str] = check_and_map_params( intermediate.dense.bias , f'''encoder.transformer_cells.{i}.ffn.ffn_1.bias''' ) __a : Optional[Any] = check_and_map_params( intermediate.dense.weight , f'''encoder.transformer_cells.{i}.ffn.ffn_1.weight''' ) # output __a : BertOutput = layer.output __a : str = check_and_map_params( bert_output.dense.bias , f'''encoder.transformer_cells.{i}.ffn.ffn_2.bias''' ) __a : List[Any] = check_and_map_params( bert_output.dense.weight , f'''encoder.transformer_cells.{i}.ffn.ffn_2.weight''' ) __a : str = check_and_map_params( bert_output.LayerNorm.bias , f'''encoder.transformer_cells.{i}.ffn.layer_norm.beta''' ) __a : List[str] = check_and_map_params( bert_output.LayerNorm.weight , f'''encoder.transformer_cells.{i}.ffn.layer_norm.gamma''' ) # Save space and energy 🎄 hf_bort_model.half() # Compare output of both models __a : Union[str, Any] = RobertaTokenizer.from_pretrained('roberta-base' ) __a : Union[str, Any] = tokenizer.encode_plus(lowerCamelCase_ )['input_ids'] # Get gluon output __a : Optional[int] = mx.nd.array([input_ids] ) __a : Tuple = original_bort(inputs=lowerCamelCase_ , token_types=[] ) # Get Transformer output (save and reload model again) hf_bort_model.save_pretrained(lowerCamelCase_ ) __a : Optional[Any] = BertModel.from_pretrained(lowerCamelCase_ ) hf_bort_model.eval() __a : Union[str, Any] = tokenizer.encode_plus(lowerCamelCase_ , return_tensors='pt' ) __a : int = hf_bort_model(**lowerCamelCase_ )[0] __a : Dict = output_gluon[0].asnumpy() __a : str = output_hf[0].detach().numpy() __a : List[Any] = np.max(np.abs(hf_layer - gluon_layer ) ).item() __a : str = np.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1e-3 ) if success: print('✔️ Both model do output the same tensors' ) else: print('❌ Both model do **NOT** output the same tensors' ) print('Absolute difference is:' , lowerCamelCase_ ) if __name__ == "__main__": SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--bort_checkpoint_path''', default=None, type=str, required=True, help='''Path the official Bort params file.''' ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) SCREAMING_SNAKE_CASE__ = parser.parse_args() convert_bort_checkpoint_to_pytorch(args.bort_checkpoint_path, args.pytorch_dump_folder_path)
47
0
'''simple docstring''' from dataclasses import dataclass from typing import List, Optional, Union import numpy as np import PIL from PIL import Image from ...utils import ( BaseOutput, OptionalDependencyNotAvailable, is_flax_available, is_k_diffusion_available, is_k_diffusion_version, is_onnx_available, is_torch_available, is_transformers_available, is_transformers_version, ) @dataclass class lowercase_ ( __lowerCamelCase ): """simple docstring""" __lowerCAmelCase = 42 __lowerCAmelCase = 42 try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .pipeline_cycle_diffusion import CycleDiffusionPipeline from .pipeline_stable_diffusion import StableDiffusionPipeline from .pipeline_stable_diffusion_attend_and_excite import StableDiffusionAttendAndExcitePipeline from .pipeline_stable_diffusion_imgaimg import StableDiffusionImgaImgPipeline from .pipeline_stable_diffusion_inpaint import StableDiffusionInpaintPipeline from .pipeline_stable_diffusion_inpaint_legacy import StableDiffusionInpaintPipelineLegacy from .pipeline_stable_diffusion_instruct_pixapix import StableDiffusionInstructPixaPixPipeline from .pipeline_stable_diffusion_latent_upscale import StableDiffusionLatentUpscalePipeline from .pipeline_stable_diffusion_ldmad import StableDiffusionLDMaDPipeline from .pipeline_stable_diffusion_model_editing import StableDiffusionModelEditingPipeline from .pipeline_stable_diffusion_panorama import StableDiffusionPanoramaPipeline from .pipeline_stable_diffusion_paradigms import StableDiffusionParadigmsPipeline from .pipeline_stable_diffusion_sag import StableDiffusionSAGPipeline from .pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline from .pipeline_stable_unclip import StableUnCLIPPipeline from .pipeline_stable_unclip_imgaimg import StableUnCLIPImgaImgPipeline from .safety_checker import StableDiffusionSafetyChecker from .stable_unclip_image_normalizer import StableUnCLIPImageNormalizer try: if not (is_transformers_available() and is_torch_available() and is_transformers_version('''>=''', '''4.25.0''')): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import StableDiffusionImageVariationPipeline else: from .pipeline_stable_diffusion_image_variation import StableDiffusionImageVariationPipeline try: if not (is_transformers_available() and is_torch_available() and is_transformers_version('''>=''', '''4.26.0''')): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import ( StableDiffusionDepthaImgPipeline, StableDiffusionDiffEditPipeline, StableDiffusionPixaPixZeroPipeline, ) else: from .pipeline_stable_diffusion_depthaimg import StableDiffusionDepthaImgPipeline from .pipeline_stable_diffusion_diffedit import StableDiffusionDiffEditPipeline from .pipeline_stable_diffusion_pixapix_zero import StableDiffusionPixaPixZeroPipeline try: if not ( is_torch_available() and is_transformers_available() and is_k_diffusion_available() and is_k_diffusion_version('''>=''', '''0.0.12''') ): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403 else: from .pipeline_stable_diffusion_k_diffusion import StableDiffusionKDiffusionPipeline try: if not (is_transformers_available() and is_onnx_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_onnx_objects import * # noqa F403 else: from .pipeline_onnx_stable_diffusion import OnnxStableDiffusionPipeline, StableDiffusionOnnxPipeline from .pipeline_onnx_stable_diffusion_imgaimg import OnnxStableDiffusionImgaImgPipeline from .pipeline_onnx_stable_diffusion_inpaint import OnnxStableDiffusionInpaintPipeline from .pipeline_onnx_stable_diffusion_inpaint_legacy import OnnxStableDiffusionInpaintPipelineLegacy from .pipeline_onnx_stable_diffusion_upscale import OnnxStableDiffusionUpscalePipeline if is_transformers_available() and is_flax_available(): import flax @flax.struct.dataclass class lowercase_ ( __lowerCamelCase ): """simple docstring""" __lowerCAmelCase = 42 __lowerCAmelCase = 42 from ...schedulers.scheduling_pndm_flax import PNDMSchedulerState from .pipeline_flax_stable_diffusion import FlaxStableDiffusionPipeline from .pipeline_flax_stable_diffusion_imgaimg import FlaxStableDiffusionImgaImgPipeline from .pipeline_flax_stable_diffusion_inpaint import FlaxStableDiffusionInpaintPipeline from .safety_checker_flax import FlaxStableDiffusionSafetyChecker
107
def UpperCAmelCase__ ( lowerCamelCase_ : Optional[int] , lowerCamelCase_ : List[str] ): __a : Any = '' for i in table: res += inp[i - 1] return res def UpperCAmelCase__ ( lowerCamelCase_ : Optional[Any] ): return data[1:] + data[0] def UpperCAmelCase__ ( lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Optional[int] ): __a : Optional[int] = '' for i in range(len(lowerCamelCase_ ) ): if a[i] == b[i]: res += "0" else: res += "1" return res def UpperCAmelCase__ ( lowerCamelCase_ : List[Any] , lowerCamelCase_ : str ): __a : List[str] = int('0b' + data[0] + data[-1] , 2 ) __a : List[str] = int('0b' + data[1:3] , 2 ) return bin(s[row][col] )[2:] def UpperCAmelCase__ ( lowerCamelCase_ : Any , lowerCamelCase_ : List[str] , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : Optional[Any] ): __a : List[Any] = message[:4] __a : str = message[4:] __a : Any = apply_table(lowerCamelCase_ , lowerCamelCase_ ) __a : int = xor(lowerCamelCase_ , lowerCamelCase_ ) __a : Dict = apply_sbox(lowerCamelCase_ , temp[:4] ) # noqa: E741 __a : Tuple = apply_sbox(lowerCamelCase_ , temp[4:] ) __a : List[Any] = '0' * (2 - len(lowerCamelCase_ )) + l # noqa: E741 __a : List[str] = '0' * (2 - len(lowerCamelCase_ )) + r __a : List[Any] = apply_table(l + r , lowerCamelCase_ ) __a : Dict = xor(lowerCamelCase_ , lowerCamelCase_ ) return temp + right if __name__ == "__main__": SCREAMING_SNAKE_CASE__ = input('''Enter 10 bit key: ''') SCREAMING_SNAKE_CASE__ = input('''Enter 8 bit message: ''') SCREAMING_SNAKE_CASE__ = [6, 3, 7, 4, 8, 5, 10, 9] SCREAMING_SNAKE_CASE__ = [3, 5, 2, 7, 4, 10, 1, 9, 8, 6] SCREAMING_SNAKE_CASE__ = [2, 4, 3, 1] SCREAMING_SNAKE_CASE__ = [2, 6, 3, 1, 4, 8, 5, 7] SCREAMING_SNAKE_CASE__ = [4, 1, 3, 5, 7, 2, 8, 6] SCREAMING_SNAKE_CASE__ = [4, 1, 2, 3, 2, 3, 4, 1] SCREAMING_SNAKE_CASE__ = [[1, 0, 3, 2], [3, 2, 1, 0], [0, 2, 1, 3], [3, 1, 3, 2]] SCREAMING_SNAKE_CASE__ = [[0, 1, 2, 3], [2, 0, 1, 3], [3, 0, 1, 0], [2, 1, 0, 3]] # key generation SCREAMING_SNAKE_CASE__ = apply_table(key, paa_table) SCREAMING_SNAKE_CASE__ = temp[:5] SCREAMING_SNAKE_CASE__ = temp[5:] SCREAMING_SNAKE_CASE__ = left_shift(left) SCREAMING_SNAKE_CASE__ = left_shift(right) SCREAMING_SNAKE_CASE__ = apply_table(left + right, pa_table) SCREAMING_SNAKE_CASE__ = left_shift(left) SCREAMING_SNAKE_CASE__ = left_shift(right) SCREAMING_SNAKE_CASE__ = left_shift(left) SCREAMING_SNAKE_CASE__ = left_shift(right) SCREAMING_SNAKE_CASE__ = apply_table(left + right, pa_table) # encryption SCREAMING_SNAKE_CASE__ = apply_table(message, IP) SCREAMING_SNAKE_CASE__ = function(expansion, sa, sa, keya, temp) SCREAMING_SNAKE_CASE__ = temp[4:] + temp[:4] SCREAMING_SNAKE_CASE__ = function(expansion, sa, sa, keya, temp) SCREAMING_SNAKE_CASE__ = apply_table(temp, IP_inv) print('''Cipher text is:''', CT) # decryption SCREAMING_SNAKE_CASE__ = apply_table(CT, IP) SCREAMING_SNAKE_CASE__ = function(expansion, sa, sa, keya, temp) SCREAMING_SNAKE_CASE__ = temp[4:] + temp[:4] SCREAMING_SNAKE_CASE__ = function(expansion, sa, sa, keya, temp) SCREAMING_SNAKE_CASE__ = apply_table(temp, IP_inv) print('''Plain text after decypting is:''', PT)
47
0
'''simple docstring''' from packaging import version from .import_utils import is_accelerate_available if is_accelerate_available(): import accelerate def _A ( A ) -> List[Any]: if not is_accelerate_available(): return method lowercase : Union[str, Any] = version.parse(accelerate.__version__ ).base_version if version.parse(lowerCamelCase_ ) < version.parse("0.17.0" ): return method def wrapper(self ,*A ,**A ): if hasattr(self ,"_hf_hook" ) and hasattr(self._hf_hook ,"pre_forward" ): self._hf_hook.pre_forward(self ) return method(self ,*lowerCamelCase_ ,**lowerCamelCase_ ) return wrapper
372
import unittest from transformers import is_tf_available from transformers.testing_utils import require_tf if is_tf_available(): import tensorflow as tf from tensorflow.python.eager import context from tensorflow.python.framework import ops from transformers import GradientAccumulator, create_optimizer @require_tf class _UpperCamelCase( unittest.TestCase ): def __lowerCAmelCase ( self : List[str] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Union[str, Any] ): '''simple docstring''' self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , len(SCREAMING_SNAKE_CASE__ ) ) for a, b in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): self.assertAlmostEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , delta=SCREAMING_SNAKE_CASE__ ) def __lowerCAmelCase ( self : Any ): '''simple docstring''' __a : List[Any] = GradientAccumulator() accumulator([tf.constant([1.0, 2.0] )] ) accumulator([tf.constant([-2.0, 1.0] )] ) accumulator([tf.constant([-1.0, 2.0] )] ) with self.assertRaises(SCREAMING_SNAKE_CASE__ ): accumulator([tf.constant([1.0, 1.0] ), tf.constant([2.0, 2.0] )] ) self.assertEqual(accumulator.step , 3 ) self.assertEqual(len(accumulator.gradients ) , 1 ) self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [-2.0, 5.0] , tol=1e-2 ) accumulator.reset() self.assertEqual(accumulator.step , 0 ) self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [0.0, 0.0] , tol=1e-2 ) def __lowerCAmelCase ( self : Dict ): '''simple docstring''' __a : int = None ops.enable_eager_execution_internal() __a : Optional[Any] = tf.config.list_physical_devices('CPU' ) if len(SCREAMING_SNAKE_CASE__ ) == 1: tf.config.set_logical_device_configuration( physical_devices[0] , [tf.config.LogicalDeviceConfiguration(), tf.config.LogicalDeviceConfiguration()] ) __a : int = tf.config.list_logical_devices(device_type='CPU' ) __a : str = tf.distribute.MirroredStrategy(devices=devices[:2] ) with strategy.scope(): __a : List[str] = GradientAccumulator() __a : Tuple = tf.Variable([4.0, 3.0] ) __a , __a : int = create_optimizer(5e-5 , 1_0 , 5 ) __a : List[Any] = tf.Variable([0.0, 0.0] , trainable=SCREAMING_SNAKE_CASE__ ) def accumulate_on_replica(SCREAMING_SNAKE_CASE__ : Optional[Any] ): accumulator([gradient] ) def apply_on_replica(): optimizer.apply_gradients(list(zip(accumulator.gradients , [variable] ) ) ) @tf.function def accumulate(SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Tuple ): with strategy.scope(): __a : Optional[Any] = strategy.experimental_local_results(SCREAMING_SNAKE_CASE__ ) local_variables[0].assign(SCREAMING_SNAKE_CASE__ ) local_variables[1].assign(SCREAMING_SNAKE_CASE__ ) strategy.run(SCREAMING_SNAKE_CASE__ , args=(gradient_placeholder,) ) @tf.function def apply_grad(): with strategy.scope(): strategy.run(SCREAMING_SNAKE_CASE__ ) def _check_local_values(SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : int ): __a : Union[str, Any] = strategy.experimental_local_results(accumulator._gradients[0] ) self.assertListAlmostEqual(values[0].value() , SCREAMING_SNAKE_CASE__ , tol=1e-2 ) self.assertListAlmostEqual(values[1].value() , SCREAMING_SNAKE_CASE__ , tol=1e-2 ) accumulate([1.0, 2.0] , [-1.0, 1.0] ) accumulate([3.0, -1.0] , [-1.0, -1.0] ) accumulate([-2.0, 2.0] , [3.0, -2.0] ) self.assertEqual(accumulator.step , 3 ) _check_local_values([2.0, 3.0] , [1.0, -2.0] ) apply_grad() self.assertListAlmostEqual(variable.value() , [4.0, 3.0] , tol=1e-2 ) accumulator.reset() self.assertEqual(accumulator.step , 0 ) _check_local_values([0.0, 0.0] , [0.0, 0.0] )
47
0
"""simple docstring""" def _lowerCamelCase ( lowerCamelCase__ : list[list] ): lowercase__ : List[str] = current_set.copy() for row_index, row in enumerate(lowerCamelCase_ ): lowercase__ : Optional[int] = row[0] for column_index, column in enumerate(lowerCamelCase_ ): if magnitude == 0: lowercase__ : Optional[Any] = column continue lowercase__ : Any = column / magnitude # Subtract to cancel term lowercase__ : Optional[int] = current_set[0] lowercase__ : Union[str, Any] = [first_row] lowercase__ : str = current_set[1::] for row in current_set: lowercase__ : List[Any] = [] # If first term is 0, it is already in form we want, so we preserve it if row[0] == 0: final_set.append(lowerCamelCase_ ) continue for column_index in range(len(lowerCamelCase_ ) ): temp_row.append(first_row[column_index] - row[column_index] ) final_set.append(lowerCamelCase_ ) # Create next recursion iteration set if len(final_set[0] ) != 3: lowercase__ : int = final_set[0] lowercase__ : List[Any] = [] lowercase__ : Optional[int] = [] for row in final_set[1::]: current_first_column.append(row[0] ) next_iteration.append(row[1::] ) lowercase__ : int = simplify(lowerCamelCase_ ) for i in range(len(lowerCamelCase_ ) ): resultant[i].insert(0 , current_first_column[i] ) resultant.insert(0 , lowerCamelCase_ ) lowercase__ : List[str] = resultant return final_set def _lowerCamelCase ( lowerCamelCase__ : list[list] ): if len(lowerCamelCase_ ) == 0: raise IndexError("""solve_simultaneous() requires n lists of length n+1""" ) lowercase__ : Tuple = len(lowerCamelCase_ ) + 1 if any(len(lowerCamelCase_ ) != _length for item in equations ): raise IndexError("""solve_simultaneous() requires n lists of length n+1""" ) for row in equations: if any(not isinstance(lowerCamelCase_ , (int, float) ) for column in row ): raise ValueError("""solve_simultaneous() requires lists of integers""" ) if len(lowerCamelCase_ ) == 1: return [equations[0][-1] / equations[0][0]] lowercase__ : Tuple = equations.copy() if any(0 in row for row in data_set ): lowercase__ : Union[str, Any] = data_set.copy() lowercase__ : List[str] = [] for row_index, row in enumerate(lowerCamelCase_ ): if 0 not in row: lowercase__ : List[str] = data_set.pop(lowerCamelCase_ ) break if not full_row: raise ValueError("""solve_simultaneous() requires at least 1 full equation""" ) data_set.insert(0 , lowerCamelCase_ ) lowercase__ : int = data_set.copy() lowercase__ : List[Any] = simplify(lowerCamelCase_ ) lowercase__ : List[Any] = simplified[::-1] lowercase__ : list = [] for row in simplified: lowercase__ : List[str] = row[-1] if not solutions: if row[-2] == 0: solutions.append(0 ) continue solutions.append(current_solution / row[-2] ) continue lowercase__ : Union[str, Any] = row.copy()[: len(lowerCamelCase_ ) - 1 :] while temp_row[0] == 0: temp_row.pop(0 ) if len(lowerCamelCase_ ) == 0: solutions.append(0 ) continue lowercase__ : Dict = temp_row[1::] lowercase__ : Any = temp_row[::-1] for column_index, column in enumerate(lowerCamelCase_ ): current_solution -= column * solutions[column_index] solutions.append(lowerCamelCase_ ) lowercase__ : Optional[Any] = [] for item in solutions: final.append(float(round(lowerCamelCase_ , 5 ) ) ) return final[::-1] if __name__ == "__main__": import doctest doctest.testmod() __snake_case = [ [2, 1, 1, 1, 1, 4], [1, 2, 1, 1, 1, 5], [1, 1, 2, 1, 1, 6], [1, 1, 1, 2, 1, 7], [1, 1, 1, 1, 2, 8], ] print(solve_simultaneous(eq)) print(solve_simultaneous([[4, 2]]))
200
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ = { '''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/config.json''', '''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/config.json''', '''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/config.json''', '''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/config.json''', '''roberta-base-openai-detector''': '''https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json''', '''roberta-large-openai-detector''': '''https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json''', } class _UpperCamelCase( __lowerCamelCase ): __SCREAMING_SNAKE_CASE : Union[str, Any] = '''roberta''' def __init__( self : List[Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any]=5_0_2_6_5 , SCREAMING_SNAKE_CASE__ : Optional[int]=7_6_8 , SCREAMING_SNAKE_CASE__ : str=1_2 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=1_2 , SCREAMING_SNAKE_CASE__ : Optional[Any]=3_0_7_2 , SCREAMING_SNAKE_CASE__ : Any="gelu" , SCREAMING_SNAKE_CASE__ : Optional[Any]=0.1 , SCREAMING_SNAKE_CASE__ : str=0.1 , SCREAMING_SNAKE_CASE__ : Optional[Any]=5_1_2 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=2 , SCREAMING_SNAKE_CASE__ : Any=0.02 , SCREAMING_SNAKE_CASE__ : List[str]=1e-12 , SCREAMING_SNAKE_CASE__ : Optional[Any]=1 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=0 , SCREAMING_SNAKE_CASE__ : List[str]=2 , SCREAMING_SNAKE_CASE__ : Tuple="absolute" , SCREAMING_SNAKE_CASE__ : Any=True , SCREAMING_SNAKE_CASE__ : List[str]=None , **SCREAMING_SNAKE_CASE__ : Any , ): '''simple docstring''' super().__init__(pad_token_id=SCREAMING_SNAKE_CASE__ , bos_token_id=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) __a : Optional[Any] = vocab_size __a : Tuple = hidden_size __a : List[str] = num_hidden_layers __a : List[Any] = num_attention_heads __a : str = hidden_act __a : Optional[Any] = intermediate_size __a : Dict = hidden_dropout_prob __a : List[str] = attention_probs_dropout_prob __a : Optional[Any] = max_position_embeddings __a : Dict = type_vocab_size __a : str = initializer_range __a : List[str] = layer_norm_eps __a : Optional[int] = position_embedding_type __a : Union[str, Any] = use_cache __a : str = classifier_dropout class _UpperCamelCase( __lowerCamelCase ): @property def __lowerCAmelCase ( self : Any ): '''simple docstring''' if self.task == "multiple-choice": __a : List[str] = {0: 'batch', 1: 'choice', 2: 'sequence'} else: __a : Dict = {0: 'batch', 1: 'sequence'} return OrderedDict( [ ('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ] )
47
0
import unittest from transformers import is_flax_available from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow if is_flax_available(): import optax from flax.training.common_utils import onehot from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration from transformers.models.ta.modeling_flax_ta import shift_tokens_right @require_torch @require_sentencepiece @require_tokenizers @require_flax class lowerCAmelCase__ ( unittest.TestCase ): """simple docstring""" @slow def _UpperCamelCase ( self ): lowerCamelCase_ : Any = FlaxMTaForConditionalGeneration.from_pretrained("google/mt5-small" ) lowerCamelCase_ : int = AutoTokenizer.from_pretrained("google/mt5-small" ) lowerCamelCase_ : Any = tokenizer("Hello there" , return_tensors="np" ).input_ids lowerCamelCase_ : Any = tokenizer("Hi I am" , return_tensors="np" ).input_ids lowerCamelCase_ : List[str] = shift_tokens_right(SCREAMING_SNAKE_CASE__ , model.config.pad_token_id , model.config.decoder_start_token_id ) lowerCamelCase_ : Tuple = model(SCREAMING_SNAKE_CASE__ , decoder_input_ids=SCREAMING_SNAKE_CASE__ ).logits lowerCamelCase_ : Union[str, Any] = optax.softmax_cross_entropy(SCREAMING_SNAKE_CASE__ , onehot(SCREAMING_SNAKE_CASE__ , logits.shape[-1] ) ).mean() lowerCamelCase_ : Tuple = -(labels.shape[-1] * loss.item()) lowerCamelCase_ : Optional[Any] = -84.91_27 self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 )
250
import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ = '''▁''' SCREAMING_SNAKE_CASE__ = {'''vocab_file''': '''sentencepiece.bpe.model'''} SCREAMING_SNAKE_CASE__ = { '''vocab_file''': { '''facebook/xglm-564M''': '''https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model''', } } SCREAMING_SNAKE_CASE__ = { '''facebook/xglm-564M''': 2048, } class _UpperCamelCase( __lowerCamelCase ): __SCREAMING_SNAKE_CASE : Optional[Any] = VOCAB_FILES_NAMES __SCREAMING_SNAKE_CASE : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP __SCREAMING_SNAKE_CASE : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __SCREAMING_SNAKE_CASE : Any = ['''input_ids''', '''attention_mask'''] def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : List[Any]="<s>" , SCREAMING_SNAKE_CASE__ : List[str]="</s>" , SCREAMING_SNAKE_CASE__ : Optional[Any]="</s>" , SCREAMING_SNAKE_CASE__ : Union[str, Any]="<s>" , SCREAMING_SNAKE_CASE__ : str="<unk>" , SCREAMING_SNAKE_CASE__ : Dict="<pad>" , SCREAMING_SNAKE_CASE__ : Optional[Dict[str, Any]] = None , **SCREAMING_SNAKE_CASE__ : List[str] , ): '''simple docstring''' __a : Union[str, Any] = {} if sp_model_kwargs is None else sp_model_kwargs # Compatibility with the original tokenizer __a : Any = 7 __a : Union[str, Any] = [f'''<madeupword{i}>''' for i in range(self.num_madeup_words )] __a : Union[str, Any] = kwargs.get('additional_special_tokens' , [] ) kwargs["additional_special_tokens"] += [ word for word in madeup_words if word not in kwargs["additional_special_tokens"] ] super().__init__( bos_token=SCREAMING_SNAKE_CASE__ , eos_token=SCREAMING_SNAKE_CASE__ , unk_token=SCREAMING_SNAKE_CASE__ , sep_token=SCREAMING_SNAKE_CASE__ , cls_token=SCREAMING_SNAKE_CASE__ , pad_token=SCREAMING_SNAKE_CASE__ , sp_model_kwargs=self.sp_model_kwargs , **SCREAMING_SNAKE_CASE__ , ) __a : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(SCREAMING_SNAKE_CASE__ ) ) __a : str = vocab_file # Original fairseq vocab and spm vocab must be "aligned": # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 # -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ---- # fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-' # spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a' # The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab __a : Any = 1 # Mimic fairseq token-to-id alignment for the first 4 token __a : str = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3} __a : List[str] = len(self.sp_model ) __a : Optional[int] = {f'''<madeupword{i}>''': sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words )} self.fairseq_tokens_to_ids.update(SCREAMING_SNAKE_CASE__ ) __a : Dict = {v: k for k, v in self.fairseq_tokens_to_ids.items()} def __getstate__( self : List[str] ): '''simple docstring''' __a : Tuple = self.__dict__.copy() __a : List[str] = None __a : Optional[int] = self.sp_model.serialized_model_proto() return state def __setstate__( self : List[str] , SCREAMING_SNAKE_CASE__ : Dict ): '''simple docstring''' __a : int = d # for backward compatibility if not hasattr(self , 'sp_model_kwargs' ): __a : Dict = {} __a : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.LoadFromSerializedProto(self.sp_model_proto ) def __lowerCAmelCase ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ): '''simple docstring''' if token_ids_a is None: return [self.sep_token_id] + token_ids_a __a : Optional[Any] = [self.sep_token_id] return sep + token_ids_a + sep + sep + token_ids_a def __lowerCAmelCase ( self : Dict , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None , SCREAMING_SNAKE_CASE__ : bool = False ): '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=SCREAMING_SNAKE_CASE__ , token_ids_a=SCREAMING_SNAKE_CASE__ , already_has_special_tokens=SCREAMING_SNAKE_CASE__ ) if token_ids_a is None: return [1] + ([0] * len(SCREAMING_SNAKE_CASE__ )) return [1] + ([0] * len(SCREAMING_SNAKE_CASE__ )) + [1, 1] + ([0] * len(SCREAMING_SNAKE_CASE__ )) def __lowerCAmelCase ( self : List[Any] , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ): '''simple docstring''' __a : Optional[int] = [self.sep_token_id] if token_ids_a is None: return len(sep + token_ids_a ) * [0] return len(sep + token_ids_a + sep + sep + token_ids_a ) * [0] @property def __lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' return len(self.sp_model ) + self.fairseq_offset + self.num_madeup_words def __lowerCAmelCase ( self : Tuple ): '''simple docstring''' __a : str = {self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __lowerCAmelCase ( self : Any , SCREAMING_SNAKE_CASE__ : str ): '''simple docstring''' return self.sp_model.encode(SCREAMING_SNAKE_CASE__ , out_type=SCREAMING_SNAKE_CASE__ ) def __lowerCAmelCase ( self : List[Any] , SCREAMING_SNAKE_CASE__ : List[Any] ): '''simple docstring''' if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] __a : List[str] = self.sp_model.PieceToId(SCREAMING_SNAKE_CASE__ ) # Need to return unknown token if the SP model returned 0 return spm_id + self.fairseq_offset if spm_id else self.unk_token_id def __lowerCAmelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Dict ): '''simple docstring''' if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset ) def __lowerCAmelCase ( self : Tuple , SCREAMING_SNAKE_CASE__ : List[Any] ): '''simple docstring''' __a : Optional[int] = ''.join(SCREAMING_SNAKE_CASE__ ).replace(SCREAMING_SNAKE_CASE__ , ' ' ).strip() return out_string def __lowerCAmelCase ( self : Dict , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[str] = None ): '''simple docstring''' if not os.path.isdir(SCREAMING_SNAKE_CASE__ ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return __a : Any = os.path.join( SCREAMING_SNAKE_CASE__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE__ ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , SCREAMING_SNAKE_CASE__ ) elif not os.path.isfile(self.vocab_file ): with open(SCREAMING_SNAKE_CASE__ , 'wb' ) as fi: __a : List[Any] = self.sp_model.serialized_model_proto() fi.write(SCREAMING_SNAKE_CASE__ ) return (out_vocab_file,)
47
0
"""simple docstring""" import argparse import os import numpy as np import tensorflow as tf import torch from transformers import BertModel def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): __lowercase : str = ('dense.weight', 'attention.self.query', 'attention.self.key', 'attention.self.value') __lowercase : int = ( ('layer.', 'layer_'), ('word_embeddings.weight', 'word_embeddings'), ('position_embeddings.weight', 'position_embeddings'), ('token_type_embeddings.weight', 'token_type_embeddings'), ('.', '/'), ('LayerNorm/weight', 'LayerNorm/gamma'), ('LayerNorm/bias', 'LayerNorm/beta'), ('weight', 'kernel'), ) if not os.path.isdir(lowerCamelCase_ ): os.makedirs(lowerCamelCase_ ) __lowercase : Union[str, Any] = model.state_dict() def to_tf_var_name(__UpperCamelCase ): for patt, repl in iter(lowerCamelCase_ ): __lowercase : Union[str, Any] = name.replace(lowerCamelCase_ , lowerCamelCase_ ) return f"""bert/{name}""" def create_tf_var(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): __lowercase : List[Any] = tf.dtypes.as_dtype(tensor.dtype ) __lowercase : List[str] = tf.get_variable(dtype=lowerCamelCase_ , shape=tensor.shape , name=lowerCamelCase_ , initializer=tf.zeros_initializer() ) session.run(tf.variables_initializer([tf_var] ) ) session.run(lowerCamelCase_ ) return tf_var tf.reset_default_graph() with tf.Session() as session: for var_name in state_dict: __lowercase : Optional[Any] = to_tf_var_name(lowerCamelCase_ ) __lowercase : Optional[Any] = state_dict[var_name].numpy() if any(x in var_name for x in tensors_to_transpose ): __lowercase : Union[str, Any] = torch_tensor.T __lowercase : str = create_tf_var(tensor=lowerCamelCase_ , name=lowerCamelCase_ , session=lowerCamelCase_ ) tf.keras.backend.set_value(lowerCamelCase_ , lowerCamelCase_ ) __lowercase : Union[str, Any] = session.run(lowerCamelCase_ ) print(f"""Successfully created {tf_name}: {np.allclose(lowerCamelCase_ , lowerCamelCase_ )}""" ) __lowercase : Any = tf.train.Saver(tf.trainable_variables() ) saver.save(lowerCamelCase_ , os.path.join(lowerCamelCase_ , model_name.replace('''-''' , '''_''' ) + '''.ckpt''' ) ) def __UpperCAmelCase ( __UpperCamelCase=None ): __lowercase : str = argparse.ArgumentParser() parser.add_argument('''--model_name''' , type=lowerCamelCase_ , required=lowerCamelCase_ , help='''model name e.g. bert-base-uncased''' ) parser.add_argument( '''--cache_dir''' , type=lowerCamelCase_ , default=lowerCamelCase_ , required=lowerCamelCase_ , help='''Directory containing pytorch model''' ) parser.add_argument('''--pytorch_model_path''' , type=lowerCamelCase_ , required=lowerCamelCase_ , help='''/path/to/<pytorch-model-name>.bin''' ) parser.add_argument('''--tf_cache_dir''' , type=lowerCamelCase_ , required=lowerCamelCase_ , help='''Directory in which to save tensorflow model''' ) __lowercase : List[Any] = parser.parse_args(lowerCamelCase_ ) __lowercase : Optional[Any] = BertModel.from_pretrained( pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path ) , cache_dir=args.cache_dir , ) convert_pytorch_checkpoint_to_tf(model=lowerCamelCase_ , ckpt_dir=args.tf_cache_dir , model_name=args.model_name ) if __name__ == "__main__": main()
76
import argparse from collections import OrderedDict from pathlib import Path import torch from transformers import ( VisualBertConfig, VisualBertForMultipleChoice, VisualBertForPreTraining, VisualBertForQuestionAnswering, VisualBertForVisualReasoning, ) from transformers.utils import logging logging.set_verbosity_info() SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ = [ ('''bert.bert''', '''visual_bert'''), ('''bert.cls''', '''cls'''), ('''bert.classifier''', '''cls'''), ('''token_type_embeddings_visual''', '''visual_token_type_embeddings'''), ('''position_embeddings_visual''', '''visual_position_embeddings'''), ('''projection''', '''visual_projection'''), ] SCREAMING_SNAKE_CASE__ = [ '''nlvr2_coco_pre_trained.th''', '''nlvr2_fine_tuned.th''', '''nlvr2_pre_trained.th''', '''vcr_coco_pre_train.th''', '''vcr_fine_tune.th''', '''vcr_pre_train.th''', '''vqa_coco_pre_trained.th''', '''vqa_fine_tuned.th''', '''vqa_pre_trained.th''', ] def UpperCAmelCase__ ( lowerCamelCase_ : Optional[int] ): __a : str = torch.load(lowerCamelCase_ , map_location='cpu' ) return sd def UpperCAmelCase__ ( lowerCamelCase_ : List[Any] , lowerCamelCase_ : int , lowerCamelCase_ : Dict=rename_keys_prefix ): __a : Optional[Any] = OrderedDict() __a : Any = torch.arange(config.max_position_embeddings ).expand((1, -1) ) # detector_d = OrderedDict() for key in d: if "detector" in key: # detector_d[key.replace('detector.','')] = d[key] continue __a : List[Any] = key for name_pair in rename_keys_prefix: __a : List[str] = new_key.replace(name_pair[0] , name_pair[1] ) __a : Any = d[key] if key == "bert.cls.predictions.decoder.weight": # Old bert code didn't have `decoder.bias`, but was added separately __a : int = new_d['cls.predictions.bias'] return new_d @torch.no_grad() def UpperCAmelCase__ ( lowerCamelCase_ : Any , lowerCamelCase_ : Any ): assert ( checkpoint_path.split('/' )[-1] in ACCEPTABLE_CHECKPOINTS ), f'''The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}.''' # Get Config if "pre" in checkpoint_path: __a : Dict = 'pretraining' if "vcr" in checkpoint_path: __a : int = {'visual_embedding_dim': 5_1_2} elif "vqa_advanced" in checkpoint_path: __a : int = {'visual_embedding_dim': 2_0_4_8} elif "vqa" in checkpoint_path: __a : Tuple = {'visual_embedding_dim': 2_0_4_8} elif "nlvr" in checkpoint_path: __a : List[Any] = {'visual_embedding_dim': 1_0_2_4} else: raise NotImplementedError(f'''No implementation found for `{checkpoint_path}`.''' ) else: if "vcr" in checkpoint_path: __a : int = {'visual_embedding_dim': 5_1_2} __a : Any = 'multichoice' elif "vqa_advanced" in checkpoint_path: __a : Any = {'visual_embedding_dim': 2_0_4_8} __a : List[str] = 'vqa_advanced' elif "vqa" in checkpoint_path: __a : List[Any] = {'visual_embedding_dim': 2_0_4_8, 'num_labels': 3_1_2_9} __a : List[Any] = 'vqa' elif "nlvr" in checkpoint_path: __a : Optional[int] = { 'visual_embedding_dim': 1_0_2_4, 'num_labels': 2, } __a : Optional[Any] = 'nlvr' __a : str = VisualBertConfig(**lowerCamelCase_ ) # Load State Dict __a : str = load_state_dict(lowerCamelCase_ ) __a : str = get_new_dict(lowerCamelCase_ , lowerCamelCase_ ) if model_type == "pretraining": __a : Optional[Any] = VisualBertForPreTraining(lowerCamelCase_ ) elif model_type == "vqa": __a : Any = VisualBertForQuestionAnswering(lowerCamelCase_ ) elif model_type == "nlvr": __a : int = VisualBertForVisualReasoning(lowerCamelCase_ ) elif model_type == "multichoice": __a : Optional[int] = VisualBertForMultipleChoice(lowerCamelCase_ ) model.load_state_dict(lowerCamelCase_ ) # Save Checkpoints Path(lowerCamelCase_ ).mkdir(exist_ok=lowerCamelCase_ ) model.save_pretrained(lowerCamelCase_ ) if __name__ == "__main__": SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser() # Required parameters parser.add_argument('''orig_checkpoint_path''', type=str, help='''A path to .th on local filesystem.''') parser.add_argument('''pytorch_dump_folder_path''', type=str, help='''Path to the output PyTorch model.''') SCREAMING_SNAKE_CASE__ = parser.parse_args() convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
47
0
"""simple docstring""" from __future__ import annotations import unittest from transformers import is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow if is_tf_available(): import numpy as np import tensorflow as tf from transformers import TFCamembertModel @require_tf @require_sentencepiece @require_tokenizers class _lowerCAmelCase ( unittest.TestCase ): """simple docstring""" @slow def UpperCAmelCase__ ( self ) -> Dict: '''simple docstring''' snake_case_ : Union[str, Any] = TFCamembertModel.from_pretrained("""jplu/tf-camembert-base""" ) snake_case_ : Union[str, Any] = tf.convert_to_tensor( [[5, 1_2_1, 1_1, 6_6_0, 1_6, 7_3_0, 2_5_5_4_3, 1_1_0, 8_3, 6]] , dtype=tf.intaa , ) # J'aime le camembert !" snake_case_ : str = model(SCREAMING_SNAKE_CASE__ )['last_hidden_state'] snake_case_ : List[Any] = tf.TensorShape((1, 1_0, 7_6_8) ) self.assertEqual(output.shape , SCREAMING_SNAKE_CASE__ ) # compare the actual values for a slice. snake_case_ : str = tf.convert_to_tensor( [[[-0.0254, 0.0235, 0.1027], [0.0606, -0.1811, -0.0418], [-0.1561, -0.1127, 0.2687]]] , dtype=tf.floataa , ) # camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0') # camembert.eval() # expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach() self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
58
print((lambda quine: quine % quine)('''print((lambda quine: quine %% quine)(%r))'''))
47
0
'''simple docstring''' import logging import os import sys from pathlib import Path from unittest.mock import patch from parameterized import parameterized from run_eval import run_generate from run_eval_search import run_search from transformers.testing_utils import CaptureStdout, TestCasePlus, slow from utils import ROUGE_KEYS logging.basicConfig(level=logging.DEBUG) _SCREAMING_SNAKE_CASE : Optional[Any] = logging.getLogger() def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ ): """simple docstring""" __magic_name__ : Tuple = '\n'.join(lowerCamelCase_ ) Path(lowerCamelCase_ ).open("w" ).writelines(lowerCamelCase_ ) _SCREAMING_SNAKE_CASE : Dict = "patrickvonplaten/t5-tiny-random" _SCREAMING_SNAKE_CASE : Tuple = "sshleifer/bart-tiny-random" _SCREAMING_SNAKE_CASE : List[str] = "sshleifer/tiny-mbart" _SCREAMING_SNAKE_CASE : int = logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) logging.disable(logging.CRITICAL) # remove noisy download output from tracebacks class _snake_case ( __lowerCamelCase ): '''simple docstring''' def lowerCAmelCase__ ( self: str , __UpperCamelCase: Optional[int] ) -> Any: __magic_name__ : Dict = Path(self.get_auto_remove_tmp_dir() ) / 'utest_input.source' __magic_name__ : str = input_file_name.parent / 'utest_output.txt' assert not output_file_name.exists() __magic_name__ : Optional[Any] = [' New York (CNN)When Liana Barrientos was 23 years old, she got married in Westchester County.'] _dump_articles(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) __magic_name__ : List[str] = str(Path(self.get_auto_remove_tmp_dir() ) / "scores.json" ) __magic_name__ : Any = 'translation_en_to_de' if model == T5_TINY else 'summarization' __magic_name__ : Union[str, Any] = f""" run_eval_search.py {model} {input_file_name} {output_file_name} --score_path {score_path} --task {task} --num_beams 2 --length_penalty 2.0 """.split() with patch.object(SCREAMING_SNAKE_CASE__ , "argv" , SCREAMING_SNAKE_CASE__ ): run_generate() assert Path(SCREAMING_SNAKE_CASE__ ).exists() # os.remove(Path(output_file_name)) def lowerCAmelCase__ ( self: str ) -> int: self.run_eval_tester(SCREAMING_SNAKE_CASE__ ) @parameterized.expand([BART_TINY, MBART_TINY] ) @slow def lowerCAmelCase__ ( self: Any , __UpperCamelCase: List[Any] ) -> int: self.run_eval_tester(SCREAMING_SNAKE_CASE__ ) @parameterized.expand([T5_TINY, MBART_TINY] ) @slow def lowerCAmelCase__ ( self: int , __UpperCamelCase: Dict ) -> Tuple: __magic_name__ : str = Path(self.get_auto_remove_tmp_dir() ) / 'utest_input.source' __magic_name__ : Any = input_file_name.parent / 'utest_output.txt' assert not output_file_name.exists() __magic_name__ : Dict = { 'en': ['Machine learning is great, isn\'t it?', 'I like to eat bananas', 'Tomorrow is another great day!'], 'de': [ 'Maschinelles Lernen ist großartig, oder?', 'Ich esse gerne Bananen', 'Morgen ist wieder ein toller Tag!', ], } __magic_name__ : Dict = Path(self.get_auto_remove_tmp_dir() ) __magic_name__ : Tuple = str(tmp_dir / "scores.json" ) __magic_name__ : List[str] = str(tmp_dir / "val.target" ) _dump_articles(SCREAMING_SNAKE_CASE__ , text["en"] ) _dump_articles(SCREAMING_SNAKE_CASE__ , text["de"] ) __magic_name__ : Dict = 'translation_en_to_de' if model == T5_TINY else 'summarization' __magic_name__ : Any = f""" run_eval_search.py {model} {str(SCREAMING_SNAKE_CASE__ )} {str(SCREAMING_SNAKE_CASE__ )} --score_path {score_path} --reference_path {reference_path} --task {task} """.split() testargs.extend(["--search", "num_beams=1:2 length_penalty=0.9:1.0"] ) with patch.object(SCREAMING_SNAKE_CASE__ , "argv" , SCREAMING_SNAKE_CASE__ ): with CaptureStdout() as cs: run_search() __magic_name__ : List[str] = [' num_beams | length_penalty', model, 'Best score args'] __magic_name__ : Optional[Any] = ['Info'] if "translation" in task: expected_strings.append("bleu" ) else: expected_strings.extend(SCREAMING_SNAKE_CASE__ ) for w in expected_strings: assert w in cs.out for w in un_expected_strings: assert w not in cs.out assert Path(SCREAMING_SNAKE_CASE__ ).exists() os.remove(Path(SCREAMING_SNAKE_CASE__ ) )
436
import json import os import shutil import tempfile from unittest import TestCase from transformers import BartTokenizer, BartTokenizerFast, DPRQuestionEncoderTokenizer, DPRQuestionEncoderTokenizerFast from transformers.models.bart.configuration_bart import BartConfig from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES from transformers.models.dpr.configuration_dpr import DPRConfig from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES from transformers.testing_utils import require_faiss, require_tokenizers, require_torch, slow from transformers.utils import is_datasets_available, is_faiss_available, is_torch_available if is_torch_available() and is_datasets_available() and is_faiss_available(): from transformers.models.rag.configuration_rag import RagConfig from transformers.models.rag.tokenization_rag import RagTokenizer @require_faiss @require_torch class _UpperCamelCase( __lowerCamelCase ): def __lowerCAmelCase ( self : List[Any] ): '''simple docstring''' __a : List[Any] = tempfile.mkdtemp() __a : int = 8 # DPR tok __a : Dict = [ '[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing', ',', 'low', 'lowest', ] __a : int = os.path.join(self.tmpdirname , 'dpr_tokenizer' ) os.makedirs(SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ ) __a : Dict = os.path.join(SCREAMING_SNAKE_CASE__ , DPR_VOCAB_FILES_NAMES['vocab_file'] ) with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer: vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) ) # BART tok __a : str = [ 'l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', '\u0120', '\u0120l', '\u0120n', '\u0120lo', '\u0120low', 'er', '\u0120lowest', '\u0120newer', '\u0120wider', '<unk>', ] __a : Optional[int] = dict(zip(SCREAMING_SNAKE_CASE__ , range(len(SCREAMING_SNAKE_CASE__ ) ) ) ) __a : List[str] = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', ''] __a : List[str] = {'unk_token': '<unk>'} __a : Dict = os.path.join(self.tmpdirname , 'bart_tokenizer' ) os.makedirs(SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ ) __a : List[Any] = os.path.join(SCREAMING_SNAKE_CASE__ , BART_VOCAB_FILES_NAMES['vocab_file'] ) __a : Dict = os.path.join(SCREAMING_SNAKE_CASE__ , BART_VOCAB_FILES_NAMES['merges_file'] ) with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp: fp.write(json.dumps(SCREAMING_SNAKE_CASE__ ) + '\n' ) with open(self.merges_file , 'w' , encoding='utf-8' ) as fp: fp.write('\n'.join(SCREAMING_SNAKE_CASE__ ) ) def __lowerCAmelCase ( self : Tuple ): '''simple docstring''' return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , 'dpr_tokenizer' ) ) def __lowerCAmelCase ( self : str ): '''simple docstring''' return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , 'bart_tokenizer' ) ) def __lowerCAmelCase ( self : Union[str, Any] ): '''simple docstring''' shutil.rmtree(self.tmpdirname ) @require_tokenizers def __lowerCAmelCase ( self : Dict ): '''simple docstring''' __a : Tuple = os.path.join(self.tmpdirname , 'rag_tokenizer' ) __a : Optional[Any] = RagConfig(question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() ) __a : Optional[Any] = RagTokenizer(question_encoder=self.get_dpr_tokenizer() , generator=self.get_bart_tokenizer() ) rag_config.save_pretrained(SCREAMING_SNAKE_CASE__ ) rag_tokenizer.save_pretrained(SCREAMING_SNAKE_CASE__ ) __a : List[Any] = RagTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ , config=SCREAMING_SNAKE_CASE__ ) self.assertIsInstance(new_rag_tokenizer.question_encoder , SCREAMING_SNAKE_CASE__ ) self.assertEqual(new_rag_tokenizer.question_encoder.get_vocab() , rag_tokenizer.question_encoder.get_vocab() ) self.assertIsInstance(new_rag_tokenizer.generator , SCREAMING_SNAKE_CASE__ ) self.assertEqual(new_rag_tokenizer.generator.get_vocab() , rag_tokenizer.generator.get_vocab() ) @slow def __lowerCAmelCase ( self : Optional[Any] ): '''simple docstring''' __a : Optional[Any] = RagTokenizer.from_pretrained('facebook/rag-token-nq' ) __a : List[Any] = [ 'who got the first nobel prize in physics', 'when is the next deadpool movie being released', 'which mode is used for short wave broadcast service', 'who is the owner of reading football club', 'when is the next scandal episode coming out', 'when is the last time the philadelphia won the superbowl', 'what is the most current adobe flash player version', 'how many episodes are there in dragon ball z', 'what is the first step in the evolution of the eye', 'where is gall bladder situated in human body', 'what is the main mineral in lithium batteries', 'who is the president of usa right now', 'where do the greasers live in the outsiders', 'panda is a national animal of which country', 'what is the name of manchester united stadium', ] __a : Tuple = tokenizer(SCREAMING_SNAKE_CASE__ ) self.assertIsNotNone(SCREAMING_SNAKE_CASE__ ) @slow def __lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' __a : Any = RagTokenizer.from_pretrained('facebook/rag-sequence-nq' ) __a : Union[str, Any] = [ 'who got the first nobel prize in physics', 'when is the next deadpool movie being released', 'which mode is used for short wave broadcast service', 'who is the owner of reading football club', 'when is the next scandal episode coming out', 'when is the last time the philadelphia won the superbowl', 'what is the most current adobe flash player version', 'how many episodes are there in dragon ball z', 'what is the first step in the evolution of the eye', 'where is gall bladder situated in human body', 'what is the main mineral in lithium batteries', 'who is the president of usa right now', 'where do the greasers live in the outsiders', 'panda is a national animal of which country', 'what is the name of manchester united stadium', ] __a : str = tokenizer(SCREAMING_SNAKE_CASE__ ) self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
47
0
'''simple docstring''' from datetime import datetime import requests def lowerCamelCase__ ( A : str ): '''simple docstring''' UpperCAmelCase = 'https://downloadgram.net/wp-json/wppress/video-downloader/video?url=' UpperCAmelCase = requests.get(base_url + url ).json()[0]['urls'][0]['src'] return requests.get(lowerCamelCase_ ).content if __name__ == "__main__": _lowercase : Any = input("""Enter Video/IGTV url: """).strip() _lowercase : Optional[int] = F"""{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4""" with open(file_name, """wb""") as fp: fp.write(download_video(url)) print(F"""Done. Video saved to disk as {file_name}.""")
210
import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ = {'''vocab_file''': '''spiece.model'''} SCREAMING_SNAKE_CASE__ = { '''vocab_file''': { '''bert_for_seq_generation''': ( '''https://huggingface.co/google/bert_for_seq_generation_L-24_bbc_encoder/resolve/main/spiece.model''' ), } } SCREAMING_SNAKE_CASE__ = {'''bert_for_seq_generation''': 512} class _UpperCamelCase( __lowerCamelCase ): __SCREAMING_SNAKE_CASE : int = VOCAB_FILES_NAMES __SCREAMING_SNAKE_CASE : List[str] = PRETRAINED_VOCAB_FILES_MAP __SCREAMING_SNAKE_CASE : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __SCREAMING_SNAKE_CASE : List[int] = [] __SCREAMING_SNAKE_CASE : int = ['''input_ids''', '''attention_mask'''] def __init__( self : str , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : List[Any]="<s>" , SCREAMING_SNAKE_CASE__ : Tuple="</s>" , SCREAMING_SNAKE_CASE__ : Any="<unk>" , SCREAMING_SNAKE_CASE__ : int="<pad>" , SCREAMING_SNAKE_CASE__ : List[str]="<::::>" , SCREAMING_SNAKE_CASE__ : Optional[Dict[str, Any]] = None , **SCREAMING_SNAKE_CASE__ : Tuple , ): '''simple docstring''' __a : Tuple = {} if sp_model_kwargs is None else sp_model_kwargs # Add extra_ids to the special token list super().__init__( bos_token=SCREAMING_SNAKE_CASE__ , eos_token=SCREAMING_SNAKE_CASE__ , unk_token=SCREAMING_SNAKE_CASE__ , pad_token=SCREAMING_SNAKE_CASE__ , sep_token=SCREAMING_SNAKE_CASE__ , sp_model_kwargs=self.sp_model_kwargs , **SCREAMING_SNAKE_CASE__ , ) __a : int = vocab_file __a : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(SCREAMING_SNAKE_CASE__ ) @property def __lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' return self.sp_model.get_piece_size() def __lowerCAmelCase ( self : int ): '''simple docstring''' __a : Dict = {self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self : Optional[Any] ): '''simple docstring''' __a : Union[str, Any] = self.__dict__.copy() __a : Any = None return state def __setstate__( self : int , SCREAMING_SNAKE_CASE__ : List[str] ): '''simple docstring''' __a : str = d # for backward compatibility if not hasattr(self , 'sp_model_kwargs' ): __a : str = {} __a : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def __lowerCAmelCase ( self : Dict , SCREAMING_SNAKE_CASE__ : str ): '''simple docstring''' return self.sp_model.encode(SCREAMING_SNAKE_CASE__ , out_type=SCREAMING_SNAKE_CASE__ ) def __lowerCAmelCase ( self : str , SCREAMING_SNAKE_CASE__ : Optional[int] ): '''simple docstring''' return self.sp_model.piece_to_id(SCREAMING_SNAKE_CASE__ ) def __lowerCAmelCase ( self : int , SCREAMING_SNAKE_CASE__ : str ): '''simple docstring''' __a : int = self.sp_model.IdToPiece(SCREAMING_SNAKE_CASE__ ) return token def __lowerCAmelCase ( self : Tuple , SCREAMING_SNAKE_CASE__ : Optional[int] ): '''simple docstring''' __a : Optional[Any] = [] __a : Optional[int] = '' for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: out_string += self.sp_model.decode(SCREAMING_SNAKE_CASE__ ) + token __a : Dict = [] else: current_sub_tokens.append(SCREAMING_SNAKE_CASE__ ) out_string += self.sp_model.decode(SCREAMING_SNAKE_CASE__ ) return out_string.strip() def __lowerCAmelCase ( self : List[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[str] = None ): '''simple docstring''' if not os.path.isdir(SCREAMING_SNAKE_CASE__ ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return __a : Tuple = os.path.join( SCREAMING_SNAKE_CASE__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE__ ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , SCREAMING_SNAKE_CASE__ ) elif not os.path.isfile(self.vocab_file ): with open(SCREAMING_SNAKE_CASE__ , 'wb' ) as fi: __a : List[str] = self.sp_model.serialized_model_proto() fi.write(SCREAMING_SNAKE_CASE__ ) return (out_vocab_file,)
47
0
import inspect import re from hashlib import shaaaa from typing import Dict, List from .arrow import arrow from .audiofolder import audiofolder from .csv import csv from .imagefolder import imagefolder from .json import json from .pandas import pandas from .parquet import parquet from .sql import sql # noqa F401 from .text import text def _SCREAMING_SNAKE_CASE ( lowercase : List[str] ): '''simple docstring''' lowerCamelCase_ = [] for line in lines: lowerCamelCase_ = re.sub(r'#.*' , '' , lowerCamelCase_ ) # remove comments if line: filtered_lines.append(lowerCamelCase_ ) lowerCamelCase_ = '\n'.join(lowerCamelCase_ ) # Make a hash from all this code lowerCamelCase_ = full_str.encode('utf-8' ) return shaaaa(lowerCamelCase_ ).hexdigest() # get importable module names and hash for caching lowerCamelCase : List[str] = { "csv": (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())), "json": (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())), "pandas": (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())), "parquet": (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())), "arrow": (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())), "text": (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())), "imagefolder": (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())), "audiofolder": (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())), } # Used to infer the module to use based on the data files extensions lowerCamelCase : int = { ".csv": ("csv", {}), ".tsv": ("csv", {"sep": "\t"}), ".json": ("json", {}), ".jsonl": ("json", {}), ".parquet": ("parquet", {}), ".arrow": ("arrow", {}), ".txt": ("text", {}), } _EXTENSION_TO_MODULE.update({ext: ("imagefolder", {}) for ext in imagefolder.ImageFolder.EXTENSIONS}) _EXTENSION_TO_MODULE.update({ext.upper(): ("imagefolder", {}) for ext in imagefolder.ImageFolder.EXTENSIONS}) _EXTENSION_TO_MODULE.update({ext: ("audiofolder", {}) for ext in audiofolder.AudioFolder.EXTENSIONS}) _EXTENSION_TO_MODULE.update({ext.upper(): ("audiofolder", {}) for ext in audiofolder.AudioFolder.EXTENSIONS}) lowerCamelCase : List[Any] = {"imagefolder", "audiofolder"} # Used to filter data files based on extensions given a module name lowerCamelCase : List[str] = {} for _ext, (_module, _) in _EXTENSION_TO_MODULE.items(): _MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext) _MODULE_TO_EXTENSIONS["imagefolder"].append(".zip") _MODULE_TO_EXTENSIONS["audiofolder"].append(".zip")
70
from ..utils import DummyObject, requires_backends class _UpperCamelCase( metaclass=__lowerCamelCase ): __SCREAMING_SNAKE_CASE : Optional[Any] = ['''torch''', '''transformers''', '''onnx'''] def __init__( self : Dict , *SCREAMING_SNAKE_CASE__ : Any , **SCREAMING_SNAKE_CASE__ : Any ): '''simple docstring''' requires_backends(self , ['torch', 'transformers', 'onnx'] ) @classmethod def __lowerCAmelCase ( cls : Tuple , *SCREAMING_SNAKE_CASE__ : Optional[Any] , **SCREAMING_SNAKE_CASE__ : Optional[int] ): '''simple docstring''' requires_backends(cls , ['torch', 'transformers', 'onnx'] ) @classmethod def __lowerCAmelCase ( cls : Any , *SCREAMING_SNAKE_CASE__ : Optional[Any] , **SCREAMING_SNAKE_CASE__ : Tuple ): '''simple docstring''' requires_backends(cls , ['torch', 'transformers', 'onnx'] ) class _UpperCamelCase( metaclass=__lowerCamelCase ): __SCREAMING_SNAKE_CASE : List[Any] = ['''torch''', '''transformers''', '''onnx'''] def __init__( self : Any , *SCREAMING_SNAKE_CASE__ : int , **SCREAMING_SNAKE_CASE__ : List[Any] ): '''simple docstring''' requires_backends(self , ['torch', 'transformers', 'onnx'] ) @classmethod def __lowerCAmelCase ( cls : List[str] , *SCREAMING_SNAKE_CASE__ : Tuple , **SCREAMING_SNAKE_CASE__ : int ): '''simple docstring''' requires_backends(cls , ['torch', 'transformers', 'onnx'] ) @classmethod def __lowerCAmelCase ( cls : str , *SCREAMING_SNAKE_CASE__ : int , **SCREAMING_SNAKE_CASE__ : List[Any] ): '''simple docstring''' requires_backends(cls , ['torch', 'transformers', 'onnx'] ) class _UpperCamelCase( metaclass=__lowerCamelCase ): __SCREAMING_SNAKE_CASE : Any = ['''torch''', '''transformers''', '''onnx'''] def __init__( self : Tuple , *SCREAMING_SNAKE_CASE__ : List[str] , **SCREAMING_SNAKE_CASE__ : List[Any] ): '''simple docstring''' requires_backends(self , ['torch', 'transformers', 'onnx'] ) @classmethod def __lowerCAmelCase ( cls : str , *SCREAMING_SNAKE_CASE__ : Any , **SCREAMING_SNAKE_CASE__ : Union[str, Any] ): '''simple docstring''' requires_backends(cls , ['torch', 'transformers', 'onnx'] ) @classmethod def __lowerCAmelCase ( cls : Dict , *SCREAMING_SNAKE_CASE__ : List[Any] , **SCREAMING_SNAKE_CASE__ : int ): '''simple docstring''' requires_backends(cls , ['torch', 'transformers', 'onnx'] ) class _UpperCamelCase( metaclass=__lowerCamelCase ): __SCREAMING_SNAKE_CASE : Dict = ['''torch''', '''transformers''', '''onnx'''] def __init__( self : Any , *SCREAMING_SNAKE_CASE__ : Dict , **SCREAMING_SNAKE_CASE__ : int ): '''simple docstring''' requires_backends(self , ['torch', 'transformers', 'onnx'] ) @classmethod def __lowerCAmelCase ( cls : int , *SCREAMING_SNAKE_CASE__ : Tuple , **SCREAMING_SNAKE_CASE__ : Tuple ): '''simple docstring''' requires_backends(cls , ['torch', 'transformers', 'onnx'] ) @classmethod def __lowerCAmelCase ( cls : Any , *SCREAMING_SNAKE_CASE__ : int , **SCREAMING_SNAKE_CASE__ : str ): '''simple docstring''' requires_backends(cls , ['torch', 'transformers', 'onnx'] ) class _UpperCamelCase( metaclass=__lowerCamelCase ): __SCREAMING_SNAKE_CASE : int = ['''torch''', '''transformers''', '''onnx'''] def __init__( self : List[str] , *SCREAMING_SNAKE_CASE__ : Tuple , **SCREAMING_SNAKE_CASE__ : List[Any] ): '''simple docstring''' requires_backends(self , ['torch', 'transformers', 'onnx'] ) @classmethod def __lowerCAmelCase ( cls : int , *SCREAMING_SNAKE_CASE__ : Tuple , **SCREAMING_SNAKE_CASE__ : Union[str, Any] ): '''simple docstring''' requires_backends(cls , ['torch', 'transformers', 'onnx'] ) @classmethod def __lowerCAmelCase ( cls : Optional[Any] , *SCREAMING_SNAKE_CASE__ : List[Any] , **SCREAMING_SNAKE_CASE__ : int ): '''simple docstring''' requires_backends(cls , ['torch', 'transformers', 'onnx'] ) class _UpperCamelCase( metaclass=__lowerCamelCase ): __SCREAMING_SNAKE_CASE : Optional[int] = ['''torch''', '''transformers''', '''onnx'''] def __init__( self : Optional[int] , *SCREAMING_SNAKE_CASE__ : Dict , **SCREAMING_SNAKE_CASE__ : Any ): '''simple docstring''' requires_backends(self , ['torch', 'transformers', 'onnx'] ) @classmethod def __lowerCAmelCase ( cls : Optional[Any] , *SCREAMING_SNAKE_CASE__ : Tuple , **SCREAMING_SNAKE_CASE__ : Union[str, Any] ): '''simple docstring''' requires_backends(cls , ['torch', 'transformers', 'onnx'] ) @classmethod def __lowerCAmelCase ( cls : int , *SCREAMING_SNAKE_CASE__ : Optional[Any] , **SCREAMING_SNAKE_CASE__ : Tuple ): '''simple docstring''' requires_backends(cls , ['torch', 'transformers', 'onnx'] )
47
0
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging __SCREAMING_SNAKE_CASE = logging.get_logger(__name__) __SCREAMING_SNAKE_CASE = { 'tiiuae/falcon-40b': 'https://huggingface.co/tiiuae/falcon-40b/resolve/main/config.json', 'tiiuae/falcon-7b': 'https://huggingface.co/tiiuae/falcon-7b/resolve/main/config.json', } class __UpperCamelCase ( __lowerCamelCase ): lowercase_ : List[str] = '''falcon''' lowercase_ : Optional[Any] = ['''past_key_values'''] def __init__( self : List[Any] , UpperCAmelCase : Any=6_5024 , UpperCAmelCase : Any=4544 , UpperCAmelCase : Any=32 , UpperCAmelCase : List[str]=71 , UpperCAmelCase : int=1e-5 , UpperCAmelCase : Union[str, Any]=0.0_2 , UpperCAmelCase : str=True , UpperCAmelCase : List[Any]=0.0 , UpperCAmelCase : str=0.0 , UpperCAmelCase : Optional[Any]=None , UpperCAmelCase : Optional[Any]=False , UpperCAmelCase : List[str]=False , UpperCAmelCase : Optional[int]=True , UpperCAmelCase : List[str]=True , UpperCAmelCase : List[Any]=False , UpperCAmelCase : str=11 , UpperCAmelCase : Union[str, Any]=11 , **UpperCAmelCase : Union[str, Any] , ) -> Any: lowerCAmelCase :Any = vocab_size # Backward compatibility with n_embed kwarg lowerCAmelCase :int = kwargs.pop('n_embed' , SCREAMING_SNAKE_CASE__ ) lowerCAmelCase :str = hidden_size if n_embed is None else n_embed lowerCAmelCase :Any = num_hidden_layers lowerCAmelCase :Dict = num_attention_heads lowerCAmelCase :Union[str, Any] = layer_norm_epsilon lowerCAmelCase :List[str] = initializer_range lowerCAmelCase :Optional[Any] = use_cache lowerCAmelCase :List[str] = hidden_dropout lowerCAmelCase :Dict = attention_dropout lowerCAmelCase :Any = bos_token_id lowerCAmelCase :Optional[int] = eos_token_id lowerCAmelCase :List[Any] = num_attention_heads if num_kv_heads is None else num_kv_heads lowerCAmelCase :int = alibi lowerCAmelCase :Tuple = new_decoder_architecture lowerCAmelCase :str = multi_query # Ignored when new_decoder_architecture is True lowerCAmelCase :Any = parallel_attn lowerCAmelCase :Dict = bias super().__init__(bos_token_id=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) @property def UpperCAmelCase__ ( self : Optional[Any] ) -> Tuple: return self.hidden_size // self.num_attention_heads @property def UpperCAmelCase__ ( self : Tuple ) -> Tuple: return not self.alibi
553
import math from datetime import datetime, timedelta def UpperCAmelCase__ ( lowerCamelCase_ : int ): __a : Union[str, Any] = year % 1_9 __a : int = year % 4 __a : Optional[int] = year % 7 __a : Dict = math.floor(year / 1_0_0 ) __a : Optional[Any] = math.floor((1_3 + 8 * leap_day_inhibits) / 2_5 ) __a : Union[str, Any] = leap_day_inhibits / 4 __a : str = ( 1_5 - lunar_orbit_correction + leap_day_inhibits - leap_day_reinstall_number ) % 3_0 __a : Union[str, Any] = (4 + leap_day_inhibits - leap_day_reinstall_number) % 7 # days to be added to March 21 __a : List[Any] = (1_9 * metonic_cycle + secular_moon_shift) % 3_0 # PHM -> Paschal Full Moon __a : List[Any] = ( 2 * julian_leap_year + 4 * non_leap_year + 6 * days_to_add + century_starting_point ) % 7 if days_to_add == 2_9 and days_from_phm_to_sunday == 6: return datetime(lowerCamelCase_ , 4 , 1_9 ) elif days_to_add == 2_8 and days_from_phm_to_sunday == 6: return datetime(lowerCamelCase_ , 4 , 1_8 ) else: return datetime(lowerCamelCase_ , 3 , 2_2 ) + timedelta( days=int(days_to_add + days_from_phm_to_sunday ) ) if __name__ == "__main__": for year in (1994, 2000, 2010, 2021, 2023): SCREAMING_SNAKE_CASE__ = '''will be''' if year > datetime.now().year else '''was''' print(F"Easter in {year} {tense} {gauss_easter(year)}")
47
0
'''simple docstring''' from __future__ import annotations from collections.abc import Callable from typing import Any, Generic, TypeVar lowerCAmelCase_ : Dict = TypeVar('''T''') class __lowerCAmelCase ( Generic[T] ): def __init__(self , lowerCAmelCase__ , lowerCAmelCase__ ): _UpperCAmelCase : Any | T = None _UpperCAmelCase : int = len(SCREAMING_SNAKE_CASE__ ) _UpperCAmelCase : list[T] = [any_type for _ in range(self.N )] + arr _UpperCAmelCase : List[Any] = fnc self.build() def snake_case_ (self ): for p in range(self.N - 1 , 0 , -1 ): _UpperCAmelCase : Dict = self.fn(self.st[p * 2] , self.st[p * 2 + 1] ) def snake_case_ (self , lowerCAmelCase__ , lowerCAmelCase__ ): p += self.N _UpperCAmelCase : Dict = v while p > 1: _UpperCAmelCase : Tuple = p // 2 _UpperCAmelCase : Any = self.fn(self.st[p * 2] , self.st[p * 2 + 1] ) def snake_case_ (self , lowerCAmelCase__ , lowerCAmelCase__ ): # noqa: E741 _UpperCAmelCase : Any = l + self.N, r + self.N _UpperCAmelCase : T | None = None while l <= r: if l % 2 == 1: _UpperCAmelCase : str = self.st[l] if res is None else self.fn(SCREAMING_SNAKE_CASE__ , self.st[l] ) if r % 2 == 0: _UpperCAmelCase : List[Any] = self.st[r] if res is None else self.fn(SCREAMING_SNAKE_CASE__ , self.st[r] ) _UpperCAmelCase : Optional[Any] = (l + 1) // 2, (r - 1) // 2 return res if __name__ == "__main__": from functools import reduce lowerCAmelCase_ : List[str] = [1, 10, -2, 9, -3, 8, 4, -7, 5, 6, 11, -12] lowerCAmelCase_ : List[str] = { 0: 7, 1: 2, 2: 6, 3: -14, 4: 5, 5: 4, 6: 7, 7: -10, 8: 9, 9: 10, 10: 12, 11: 1, } lowerCAmelCase_ : Optional[int] = SegmentTree(test_array, min) lowerCAmelCase_ : str = SegmentTree(test_array, max) lowerCAmelCase_ : Optional[int] = SegmentTree(test_array, lambda a, b: a + b) def __A ( ): for i in range(len(lowerCamelCase_ ) ): for j in range(lowerCamelCase_ , len(lowerCamelCase_ ) ): _UpperCAmelCase : str = reduce(lowerCamelCase_ , test_array[i : j + 1] ) _UpperCAmelCase : Tuple = reduce(lowerCamelCase_ , test_array[i : j + 1] ) _UpperCAmelCase : Optional[int] = reduce(lambda lowerCAmelCase_ , lowerCAmelCase_ : a + b , test_array[i : j + 1] ) assert min_range == min_segment_tree.query(lowerCamelCase_ , lowerCamelCase_ ) assert max_range == max_segment_tree.query(lowerCamelCase_ , lowerCamelCase_ ) assert sum_range == sum_segment_tree.query(lowerCamelCase_ , lowerCamelCase_ ) test_all_segments() for index, value in test_updates.items(): lowerCAmelCase_ : str = value min_segment_tree.update(index, value) max_segment_tree.update(index, value) sum_segment_tree.update(index, value) test_all_segments()
414
from typing import List, Optional, Union from ...configuration_utils import PretrainedConfig from ...utils import logging SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ = { '''huggingface/informer-tourism-monthly''': ( '''https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json''' ), # See all Informer models at https://huggingface.co/models?filter=informer } class _UpperCamelCase( __lowerCamelCase ): __SCREAMING_SNAKE_CASE : List[Any] = '''informer''' __SCREAMING_SNAKE_CASE : List[Any] = { '''hidden_size''': '''d_model''', '''num_attention_heads''': '''encoder_attention_heads''', '''num_hidden_layers''': '''encoder_layers''', } def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : str = "student_t" , SCREAMING_SNAKE_CASE__ : str = "nll" , SCREAMING_SNAKE_CASE__ : int = 1 , SCREAMING_SNAKE_CASE__ : List[int] = None , SCREAMING_SNAKE_CASE__ : Optional[Union[str, bool]] = "mean" , SCREAMING_SNAKE_CASE__ : int = 0 , SCREAMING_SNAKE_CASE__ : int = 0 , SCREAMING_SNAKE_CASE__ : int = 0 , SCREAMING_SNAKE_CASE__ : int = 0 , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None , SCREAMING_SNAKE_CASE__ : int = 6_4 , SCREAMING_SNAKE_CASE__ : int = 3_2 , SCREAMING_SNAKE_CASE__ : int = 3_2 , SCREAMING_SNAKE_CASE__ : int = 2 , SCREAMING_SNAKE_CASE__ : int = 2 , SCREAMING_SNAKE_CASE__ : int = 2 , SCREAMING_SNAKE_CASE__ : int = 2 , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : str = "gelu" , SCREAMING_SNAKE_CASE__ : float = 0.05 , SCREAMING_SNAKE_CASE__ : float = 0.1 , SCREAMING_SNAKE_CASE__ : float = 0.1 , SCREAMING_SNAKE_CASE__ : float = 0.1 , SCREAMING_SNAKE_CASE__ : float = 0.1 , SCREAMING_SNAKE_CASE__ : int = 1_0_0 , SCREAMING_SNAKE_CASE__ : float = 0.02 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=True , SCREAMING_SNAKE_CASE__ : str = "prob" , SCREAMING_SNAKE_CASE__ : int = 5 , SCREAMING_SNAKE_CASE__ : bool = True , **SCREAMING_SNAKE_CASE__ : Tuple , ): '''simple docstring''' __a : Dict = prediction_length __a : Tuple = context_length or prediction_length __a : Tuple = distribution_output __a : Tuple = loss __a : str = input_size __a : Dict = num_time_features __a : Optional[int] = lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7] __a : str = scaling __a : Tuple = num_dynamic_real_features __a : int = num_static_real_features __a : Dict = num_static_categorical_features # set cardinality if cardinality and num_static_categorical_features > 0: if len(SCREAMING_SNAKE_CASE__ ) != num_static_categorical_features: raise ValueError( 'The cardinality should be a list of the same length as `num_static_categorical_features`' ) __a : Optional[Any] = cardinality else: __a : Optional[int] = [0] # set embedding_dimension if embedding_dimension and num_static_categorical_features > 0: if len(SCREAMING_SNAKE_CASE__ ) != num_static_categorical_features: raise ValueError( 'The embedding dimension should be a list of the same length as `num_static_categorical_features`' ) __a : int = embedding_dimension else: __a : List[Any] = [min(5_0 , (cat + 1) // 2 ) for cat in self.cardinality] __a : int = num_parallel_samples # Transformer architecture configuration __a : str = input_size * len(self.lags_sequence ) + self._number_of_features __a : Optional[int] = d_model __a : Union[str, Any] = encoder_attention_heads __a : int = decoder_attention_heads __a : Any = encoder_ffn_dim __a : Union[str, Any] = decoder_ffn_dim __a : List[Any] = encoder_layers __a : Optional[int] = decoder_layers __a : int = dropout __a : Optional[Any] = attention_dropout __a : Dict = activation_dropout __a : Union[str, Any] = encoder_layerdrop __a : Optional[int] = decoder_layerdrop __a : List[str] = activation_function __a : str = init_std __a : Optional[int] = use_cache # Informer __a : Union[str, Any] = attention_type __a : str = sampling_factor __a : Dict = distil super().__init__(is_encoder_decoder=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) @property def __lowerCAmelCase ( self : Any ): '''simple docstring''' return ( sum(self.embedding_dimension ) + self.num_dynamic_real_features + self.num_time_features + self.num_static_real_features + self.input_size * 2 # the log1p(abs(loc)) and log(scale) features )
47
0
'''simple docstring''' import os import re import unicodedata from shutil import copyfile from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import is_torch_available, logging if is_torch_available(): import torch if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation _UpperCAmelCase : Tuple = logging.get_logger(__name__) _UpperCAmelCase : Union[str, Any] = {'''vocab_file''': '''spiece.model'''} _UpperCAmelCase : int = { '''vocab_file''': { '''AI-Sweden/gpt-sw3-126m''': '''https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model''', '''AI-Sweden/gpt-sw3-350m''': '''https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model''', '''AI-Sweden/gpt-sw3-1.6b''': '''https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model''', '''AI-Sweden/gpt-sw3-6.7b''': '''https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model''', '''AI-Sweden/gpt-sw3-20b''': '''https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model''', } } _UpperCAmelCase : List[Any] = { '''AI-Sweden/gpt-sw3-126m''': 20_48, '''AI-Sweden/gpt-sw3-350m''': 20_48, '''AI-Sweden/gpt-sw3-1.6b''': 20_48, '''AI-Sweden/gpt-sw3-6.7b''': 20_48, '''AI-Sweden/gpt-sw3-20b''': 20_48, } class lowercase_ ( __lowerCamelCase ): """simple docstring""" __lowerCAmelCase = VOCAB_FILES_NAMES __lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP __lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __lowerCAmelCase = ['''input_ids''', '''attention_mask'''] def __init__( self : Union[str, Any], UpperCamelCase__ : List[str], UpperCamelCase__ : Optional[Any]=False, UpperCamelCase__ : Optional[int]=False, UpperCamelCase__ : Optional[int]=False, UpperCamelCase__ : str=None, UpperCamelCase__ : Optional[Any]=None, UpperCamelCase__ : Optional[int]=None, UpperCamelCase__ : List[str]=None, UpperCamelCase__ : Optional[Dict[str, Any]] = None, **UpperCamelCase__ : List[Any], ) -> List[Any]: _A = {} if sp_model_kwargs is None else sp_model_kwargs _A = kwargs.get('name_or_path' ) if name_or_path is None: logger.warning( 'name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,' ' you are testing the model, this can safely be ignored' ) _A = 'None' # Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing _A = '<|endoftext|>' if eos_token is None else eos_token _A = '<unk>' if unk_token is None else unk_token if "gpt-sw3-7b" in name_or_path: _A = unk_token if pad_token is None else pad_token _A = eos_token if bos_token is None else bos_token else: _A = '<pad>' if pad_token is None else pad_token _A = '<s>' if bos_token is None else bos_token super().__init__( do_lower_case=SCREAMING_SNAKE_CASE__, remove_space=SCREAMING_SNAKE_CASE__, keep_accents=SCREAMING_SNAKE_CASE__, bos_token=SCREAMING_SNAKE_CASE__, eos_token=SCREAMING_SNAKE_CASE__, unk_token=SCREAMING_SNAKE_CASE__, pad_token=SCREAMING_SNAKE_CASE__, sp_model_kwargs=self.sp_model_kwargs, **SCREAMING_SNAKE_CASE__, ) _A = do_lower_case _A = remove_space _A = keep_accents _A = vocab_file _A = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(SCREAMING_SNAKE_CASE__ ) # Used for whitespace normalization in input texts # fmt : off _A = {' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', '', '„'} # fmt : on # Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing _A = re.compile( f'[{"".join(map(SCREAMING_SNAKE_CASE__, list(range(0, 9 ) ) + list(range(11, 32 ) ) + list(range(1_27, 1_60 ) ) + [1_60, 1_73, 82_03] ) )}]' ) def __getstate__( self : Optional[int] ) -> Optional[int]: _A = self.__dict__.copy() _A = None return state def __setstate__( self : Any, UpperCamelCase__ : int ) -> List[Any]: _A = d # for backward compatibility if not hasattr(self, 'sp_model_kwargs' ): _A = {} _A = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) @property # Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size def __UpperCAmelCase ( self : Union[str, Any] ) -> Optional[Any]: return len(self.sp_model ) def __UpperCAmelCase ( self : Union[str, Any], UpperCamelCase__ : str ) -> Union[str, Any]: _A = self.non_printing_characters_re.sub('', SCREAMING_SNAKE_CASE__ ) # Normalize whitespaces _A = ''.join([char if char not in self.whitespaces else ' ' for char in text] ) # NFC Unicode normalization _A = unicodedata.normalize('NFC', SCREAMING_SNAKE_CASE__ ) return text def __UpperCAmelCase ( self : List[Any], UpperCamelCase__ : str, **UpperCamelCase__ : str ) -> int: _A = self.preprocess_text(SCREAMING_SNAKE_CASE__ ) return self.sp_model.encode(SCREAMING_SNAKE_CASE__, out_type=SCREAMING_SNAKE_CASE__ ) def __UpperCAmelCase ( self : Dict, UpperCamelCase__ : str ) -> Optional[Any]: return self.sp_model.PieceToId(SCREAMING_SNAKE_CASE__ ) def __UpperCAmelCase ( self : List[Any], UpperCamelCase__ : int ) -> Optional[Any]: return self.sp_model.IdToPiece(SCREAMING_SNAKE_CASE__ ) @staticmethod def __UpperCAmelCase ( UpperCamelCase__ : str ) -> List[Any]: return out_string def __UpperCAmelCase ( self : List[Any], UpperCamelCase__ : List[str] ) -> Tuple: _A = [] _A = '' _A = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: # TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document if not prev_is_special: out_string += " " out_string += self.sp_model.decode(SCREAMING_SNAKE_CASE__ ) + token _A = True _A = [] else: current_sub_tokens.append(SCREAMING_SNAKE_CASE__ ) _A = False out_string += self.sp_model.decode(SCREAMING_SNAKE_CASE__ ) return out_string def __UpperCAmelCase ( self : Dict ) -> List[Any]: _A = {self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __UpperCAmelCase ( self : List[Any], UpperCamelCase__ : str, UpperCamelCase__ : Optional[str] = None ) -> Any: if not os.path.isdir(SCREAMING_SNAKE_CASE__ ): logger.error(f'Vocabulary path ({save_directory}) should be a directory' ) return _A = os.path.join( SCREAMING_SNAKE_CASE__, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE__ ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file, SCREAMING_SNAKE_CASE__ ) elif not os.path.isfile(self.vocab_file ): with open(SCREAMING_SNAKE_CASE__, 'wb' ) as fi: _A = self.sp_model.serialized_model_proto() fi.write(SCREAMING_SNAKE_CASE__ ) return (out_vocab_file,) def __UpperCAmelCase ( self : List[Any], UpperCamelCase__ : Union[str, List[str]], UpperCamelCase__ : Union[str, bool] = False ) -> Optional[int]: if isinstance(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ): _A = self.preprocess_text(SCREAMING_SNAKE_CASE__ ) _A = self.sp_model.encode(SCREAMING_SNAKE_CASE__ ) else: _A = [self.preprocess_text(SCREAMING_SNAKE_CASE__ ) for t in text] _A = self.sp_model.encode(SCREAMING_SNAKE_CASE__ ) if return_tensors is True or return_tensors == "pt": _A = torch.tensor(SCREAMING_SNAKE_CASE__ ) return token_ids def __UpperCAmelCase ( self : str, UpperCamelCase__ : Union[int, List[int]] ) -> int: return self.sp_model.decode(SCREAMING_SNAKE_CASE__ ) def __UpperCAmelCase ( self : List[Any], UpperCamelCase__ : "Conversation" ) -> Tuple: _A = [f'User: {text}' if is_user else f'Bot: {text}' for is_user, text in conversation.iter_texts()] _A = ( f'{self.eos_token}{self.bos_token}' + f'{self.bos_token}'.join(SCREAMING_SNAKE_CASE__ ) + f'{self.bos_token}Bot:' ) return self.encode(text=SCREAMING_SNAKE_CASE__ )
107
import torch from diffusers import DDIMParallelScheduler from .test_schedulers import SchedulerCommonTest class _UpperCamelCase( __lowerCamelCase ): __SCREAMING_SNAKE_CASE : int = (DDIMParallelScheduler,) __SCREAMING_SNAKE_CASE : Union[str, Any] = (('''eta''', 0.0), ('''num_inference_steps''', 50)) def __lowerCAmelCase ( self : str , **SCREAMING_SNAKE_CASE__ : Optional[int] ): '''simple docstring''' __a : List[Any] = { 'num_train_timesteps': 1_0_0_0, 'beta_start': 0.0_001, 'beta_end': 0.02, 'beta_schedule': 'linear', 'clip_sample': True, } config.update(**SCREAMING_SNAKE_CASE__ ) return config def __lowerCAmelCase ( self : str , **SCREAMING_SNAKE_CASE__ : Optional[int] ): '''simple docstring''' __a : Tuple = self.scheduler_classes[0] __a : Optional[Any] = self.get_scheduler_config(**SCREAMING_SNAKE_CASE__ ) __a : Any = scheduler_class(**SCREAMING_SNAKE_CASE__ ) __a , __a : List[str] = 1_0, 0.0 __a : Dict = self.dummy_model() __a : str = self.dummy_sample_deter scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ ) for t in scheduler.timesteps: __a : str = model(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) __a : List[str] = scheduler.step(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).prev_sample return sample def __lowerCAmelCase ( self : Tuple ): '''simple docstring''' for timesteps in [1_0_0, 5_0_0, 1_0_0_0]: self.check_over_configs(num_train_timesteps=SCREAMING_SNAKE_CASE__ ) def __lowerCAmelCase ( self : Dict ): '''simple docstring''' for steps_offset in [0, 1]: self.check_over_configs(steps_offset=SCREAMING_SNAKE_CASE__ ) __a : Optional[Any] = self.scheduler_classes[0] __a : List[str] = self.get_scheduler_config(steps_offset=1 ) __a : Optional[Any] = scheduler_class(**SCREAMING_SNAKE_CASE__ ) scheduler.set_timesteps(5 ) assert torch.equal(scheduler.timesteps , torch.LongTensor([8_0_1, 6_0_1, 4_0_1, 2_0_1, 1] ) ) def __lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' for beta_start, beta_end in zip([0.0_001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ): self.check_over_configs(beta_start=SCREAMING_SNAKE_CASE__ , beta_end=SCREAMING_SNAKE_CASE__ ) def __lowerCAmelCase ( self : int ): '''simple docstring''' for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=SCREAMING_SNAKE_CASE__ ) def __lowerCAmelCase ( self : Optional[Any] ): '''simple docstring''' for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=SCREAMING_SNAKE_CASE__ ) def __lowerCAmelCase ( self : List[Any] ): '''simple docstring''' for clip_sample in [True, False]: self.check_over_configs(clip_sample=SCREAMING_SNAKE_CASE__ ) def __lowerCAmelCase ( self : Optional[Any] ): '''simple docstring''' for timestep_spacing in ["trailing", "leading"]: self.check_over_configs(timestep_spacing=SCREAMING_SNAKE_CASE__ ) def __lowerCAmelCase ( self : int ): '''simple docstring''' for rescale_betas_zero_snr in [True, False]: self.check_over_configs(rescale_betas_zero_snr=SCREAMING_SNAKE_CASE__ ) def __lowerCAmelCase ( self : str ): '''simple docstring''' self.check_over_configs(thresholding=SCREAMING_SNAKE_CASE__ ) for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs( thresholding=SCREAMING_SNAKE_CASE__ , prediction_type=SCREAMING_SNAKE_CASE__ , sample_max_value=SCREAMING_SNAKE_CASE__ , ) def __lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' for t in [1, 1_0, 4_9]: self.check_over_forward(time_step=SCREAMING_SNAKE_CASE__ ) def __lowerCAmelCase ( self : List[str] ): '''simple docstring''' for t, num_inference_steps in zip([1, 1_0, 5_0] , [1_0, 5_0, 5_0_0] ): self.check_over_forward(time_step=SCREAMING_SNAKE_CASE__ , num_inference_steps=SCREAMING_SNAKE_CASE__ ) def __lowerCAmelCase ( self : List[Any] ): '''simple docstring''' for t, eta in zip([1, 1_0, 4_9] , [0.0, 0.5, 1.0] ): self.check_over_forward(time_step=SCREAMING_SNAKE_CASE__ , eta=SCREAMING_SNAKE_CASE__ ) def __lowerCAmelCase ( self : List[Any] ): '''simple docstring''' __a : List[str] = self.scheduler_classes[0] __a : Union[str, Any] = self.get_scheduler_config() __a : Any = scheduler_class(**SCREAMING_SNAKE_CASE__ ) assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(4_2_0 , 4_0_0 ) - 0.14_771 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(9_8_0 , 9_6_0 ) - 0.32_460 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(4_8_7 , 4_8_6 ) - 0.00_979 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(9_9_9 , 9_9_8 ) - 0.02 ) ) < 1e-5 def __lowerCAmelCase ( self : Optional[Any] ): '''simple docstring''' __a : List[str] = self.scheduler_classes[0] __a : List[str] = self.get_scheduler_config() __a : Optional[Any] = scheduler_class(**SCREAMING_SNAKE_CASE__ ) __a , __a : Any = 1_0, 0.0 scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ ) __a : List[Any] = self.dummy_model() __a : int = self.dummy_sample_deter __a : List[Any] = self.dummy_sample_deter + 0.1 __a : List[str] = self.dummy_sample_deter - 0.1 __a : Optional[Any] = samplea.shape[0] __a : Optional[Any] = torch.stack([samplea, samplea, samplea] , dim=0 ) __a : Union[str, Any] = torch.arange(SCREAMING_SNAKE_CASE__ )[0:3, None].repeat(1 , SCREAMING_SNAKE_CASE__ ) __a : int = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) ) __a : int = scheduler.batch_step_no_noise(SCREAMING_SNAKE_CASE__ , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) , SCREAMING_SNAKE_CASE__ ) __a : Dict = torch.sum(torch.abs(SCREAMING_SNAKE_CASE__ ) ) __a : Optional[Any] = torch.mean(torch.abs(SCREAMING_SNAKE_CASE__ ) ) assert abs(result_sum.item() - 1_147.7_904 ) < 1e-2 assert abs(result_mean.item() - 0.4_982 ) < 1e-3 def __lowerCAmelCase ( self : str ): '''simple docstring''' __a : List[str] = self.full_loop() __a : Tuple = torch.sum(torch.abs(SCREAMING_SNAKE_CASE__ ) ) __a : int = torch.mean(torch.abs(SCREAMING_SNAKE_CASE__ ) ) assert abs(result_sum.item() - 172.0_067 ) < 1e-2 assert abs(result_mean.item() - 0.223_967 ) < 1e-3 def __lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' __a : Optional[int] = self.full_loop(prediction_type='v_prediction' ) __a : str = torch.sum(torch.abs(SCREAMING_SNAKE_CASE__ ) ) __a : Union[str, Any] = torch.mean(torch.abs(SCREAMING_SNAKE_CASE__ ) ) assert abs(result_sum.item() - 52.5_302 ) < 1e-2 assert abs(result_mean.item() - 0.0_684 ) < 1e-3 def __lowerCAmelCase ( self : int ): '''simple docstring''' __a : Union[str, Any] = self.full_loop(set_alpha_to_one=SCREAMING_SNAKE_CASE__ , beta_start=0.01 ) __a : Optional[int] = torch.sum(torch.abs(SCREAMING_SNAKE_CASE__ ) ) __a : Optional[int] = torch.mean(torch.abs(SCREAMING_SNAKE_CASE__ ) ) assert abs(result_sum.item() - 149.8_295 ) < 1e-2 assert abs(result_mean.item() - 0.1_951 ) < 1e-3 def __lowerCAmelCase ( self : str ): '''simple docstring''' __a : Dict = self.full_loop(set_alpha_to_one=SCREAMING_SNAKE_CASE__ , beta_start=0.01 ) __a : str = torch.sum(torch.abs(SCREAMING_SNAKE_CASE__ ) ) __a : Tuple = torch.mean(torch.abs(SCREAMING_SNAKE_CASE__ ) ) assert abs(result_sum.item() - 149.0_784 ) < 1e-2 assert abs(result_mean.item() - 0.1_941 ) < 1e-3
47
0
'''simple docstring''' def _A ( ) -> str: lowercase : Union[str, Any] = 0 for i in range(1 ,1_0_0_1 ): total += i**i return str(lowerCamelCase_ )[-1_0:] if __name__ == "__main__": print(solution())
372
def UpperCAmelCase__ ( lowerCamelCase_ : list[int] , lowerCamelCase_ : list[int] ): # Check if the input is valid if not len(lowerCamelCase_ ) == len(lowerCamelCase_ ) == 3: raise ValueError('Please enter a valid equation.' ) if equationa[0] == equationa[1] == equationa[0] == equationa[1] == 0: raise ValueError('Both a & b of two equations can\'t be zero.' ) # Extract the coefficients __a , __a , __a : Optional[Any] = equationa __a , __a , __a : Optional[int] = equationa # Calculate the determinants of the matrices __a : str = aa * ba - aa * ba __a : Tuple = ca * ba - ca * ba __a : Union[str, Any] = aa * ca - aa * ca # Check if the system of linear equations has a solution (using Cramer's rule) if determinant == 0: if determinant_x == determinant_y == 0: raise ValueError('Infinite solutions. (Consistent system)' ) else: raise ValueError('No solution. (Inconsistent system)' ) else: if determinant_x == determinant_y == 0: # Trivial solution (Inconsistent system) return (0.0, 0.0) else: __a : Any = determinant_x / determinant __a : Optional[Any] = determinant_y / determinant # Non-Trivial Solution (Consistent system) return (x, y)
47
0
"""simple docstring""" import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging __snake_case = logging.get_logger(__name__) __snake_case = '▁' __snake_case = {'vocab_file': 'sentencepiece.bpe.model'} __snake_case = { 'vocab_file': { 'facebook/xglm-564M': 'https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model', } } __snake_case = { 'facebook/xglm-564M': 2048, } class _SCREAMING_SNAKE_CASE ( __lowerCamelCase ): """simple docstring""" _a : Optional[Any] = VOCAB_FILES_NAMES _a : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP _a : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _a : Any = ['''input_ids''', '''attention_mask'''] def __init__( self , lowerCamelCase__ , lowerCamelCase__="<s>" , lowerCamelCase__="</s>" , lowerCamelCase__="</s>" , lowerCamelCase__="<s>" , lowerCamelCase__="<unk>" , lowerCamelCase__="<pad>" , lowerCamelCase__ = None , **lowerCamelCase__ , ) -> Optional[int]: lowercase__ : Union[str, Any] = {} if sp_model_kwargs is None else sp_model_kwargs # Compatibility with the original tokenizer lowercase__ : Any = 7 lowercase__ : Union[str, Any] = [F'''<madeupword{i}>''' for i in range(self.num_madeup_words )] lowercase__ : Union[str, Any] = kwargs.get("""additional_special_tokens""" , [] ) kwargs["additional_special_tokens"] += [ word for word in madeup_words if word not in kwargs["additional_special_tokens"] ] super().__init__( bos_token=SCREAMING_SNAKE_CASE__ , eos_token=SCREAMING_SNAKE_CASE__ , unk_token=SCREAMING_SNAKE_CASE__ , sep_token=SCREAMING_SNAKE_CASE__ , cls_token=SCREAMING_SNAKE_CASE__ , pad_token=SCREAMING_SNAKE_CASE__ , sp_model_kwargs=self.sp_model_kwargs , **SCREAMING_SNAKE_CASE__ , ) lowercase__ : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(SCREAMING_SNAKE_CASE__ ) ) lowercase__ : str = vocab_file # Original fairseq vocab and spm vocab must be "aligned": # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 # -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ---- # fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-' # spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a' # The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab lowercase__ : Any = 1 # Mimic fairseq token-to-id alignment for the first 4 token lowercase__ : str = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3} lowercase__ : List[str] = len(self.sp_model ) lowercase__ : Optional[int] = {F'''<madeupword{i}>''': sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words )} self.fairseq_tokens_to_ids.update(SCREAMING_SNAKE_CASE__ ) lowercase__ : Dict = {v: k for k, v in self.fairseq_tokens_to_ids.items()} def __getstate__( self ) -> Dict: lowercase__ : Tuple = self.__dict__.copy() lowercase__ : List[str] = None lowercase__ : Optional[int] = self.sp_model.serialized_model_proto() return state def __setstate__( self , lowerCamelCase__ ) -> str: lowercase__ : int = d # for backward compatibility if not hasattr(self , """sp_model_kwargs""" ): lowercase__ : Dict = {} lowercase__ : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.LoadFromSerializedProto(self.sp_model_proto ) def UpperCAmelCase__( self , lowerCamelCase__ , lowerCamelCase__ = None ) -> Tuple: if token_ids_a is None: return [self.sep_token_id] + token_ids_a lowercase__ : Optional[Any] = [self.sep_token_id] return sep + token_ids_a + sep + sep + token_ids_a def UpperCAmelCase__( self , lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__ = False ) -> List[str]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=SCREAMING_SNAKE_CASE__ , token_ids_a=SCREAMING_SNAKE_CASE__ , already_has_special_tokens=SCREAMING_SNAKE_CASE__ ) if token_ids_a is None: return [1] + ([0] * len(SCREAMING_SNAKE_CASE__ )) return [1] + ([0] * len(SCREAMING_SNAKE_CASE__ )) + [1, 1] + ([0] * len(SCREAMING_SNAKE_CASE__ )) def UpperCAmelCase__( self , lowerCamelCase__ , lowerCamelCase__ = None ) -> int: lowercase__ : Optional[int] = [self.sep_token_id] if token_ids_a is None: return len(sep + token_ids_a ) * [0] return len(sep + token_ids_a + sep + sep + token_ids_a ) * [0] @property def UpperCAmelCase__( self ) -> Tuple: return len(self.sp_model ) + self.fairseq_offset + self.num_madeup_words def UpperCAmelCase__( self ) -> int: lowercase__ : str = {self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def UpperCAmelCase__( self , lowerCamelCase__ ) -> Dict: return self.sp_model.encode(SCREAMING_SNAKE_CASE__ , out_type=SCREAMING_SNAKE_CASE__ ) def UpperCAmelCase__( self , lowerCamelCase__ ) -> List[str]: if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] lowercase__ : List[str] = self.sp_model.PieceToId(SCREAMING_SNAKE_CASE__ ) # Need to return unknown token if the SP model returned 0 return spm_id + self.fairseq_offset if spm_id else self.unk_token_id def UpperCAmelCase__( self , lowerCamelCase__ ) -> str: if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset ) def UpperCAmelCase__( self , lowerCamelCase__ ) -> Optional[int]: lowercase__ : Optional[int] = ''.join(SCREAMING_SNAKE_CASE__ ).replace(SCREAMING_SNAKE_CASE__ , """ """ ).strip() return out_string def UpperCAmelCase__( self , lowerCamelCase__ , lowerCamelCase__ = None ) -> str: if not os.path.isdir(SCREAMING_SNAKE_CASE__ ): logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' ) return lowercase__ : Any = os.path.join( SCREAMING_SNAKE_CASE__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE__ ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , SCREAMING_SNAKE_CASE__ ) elif not os.path.isfile(self.vocab_file ): with open(SCREAMING_SNAKE_CASE__ , """wb""" ) as fi: lowercase__ : List[Any] = self.sp_model.serialized_model_proto() fi.write(SCREAMING_SNAKE_CASE__ ) return (out_vocab_file,)
200
from ...utils import is_note_seq_available, is_transformers_available, is_torch_available from ...utils import OptionalDependencyNotAvailable try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .notes_encoder import SpectrogramNotesEncoder from .continous_encoder import SpectrogramContEncoder from .pipeline_spectrogram_diffusion import ( SpectrogramContEncoder, SpectrogramDiffusionPipeline, TaFilmDecoder, ) try: if not (is_transformers_available() and is_torch_available() and is_note_seq_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403 else: from .midi_utils import MidiProcessor
47
0
from random import randint from tempfile import TemporaryFile import numpy as np def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_): '''simple docstring''' lowerCamelCase_ : Optional[Any] = 0 if start < end: lowerCamelCase_ : List[str] = randint(lowerCamelCase_ , lowerCamelCase_) lowerCamelCase_ : int = a[end] lowerCamelCase_ : str = a[pivot] lowerCamelCase_ : str = temp lowerCamelCase_ : str = _in_place_partition(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) count += _in_place_quick_sort(lowerCamelCase_ , lowerCamelCase_ , p - 1) count += _in_place_quick_sort(lowerCamelCase_ , p + 1 , lowerCamelCase_) return count def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_): '''simple docstring''' lowerCamelCase_ : int = 0 lowerCamelCase_ : str = randint(lowerCamelCase_ , lowerCamelCase_) lowerCamelCase_ : List[str] = a[end] lowerCamelCase_ : List[Any] = a[pivot] lowerCamelCase_ : Union[str, Any] = temp lowerCamelCase_ : Optional[Any] = start - 1 for index in range(lowerCamelCase_ , lowerCamelCase_): count += 1 if a[index] < a[end]: # check if current val is less than pivot value lowerCamelCase_ : int = new_pivot_index + 1 lowerCamelCase_ : List[str] = a[new_pivot_index] lowerCamelCase_ : List[Any] = a[index] lowerCamelCase_ : List[Any] = temp lowerCamelCase_ : int = a[new_pivot_index + 1] lowerCamelCase_ : Tuple = a[end] lowerCamelCase_ : str = temp return new_pivot_index + 1, count __magic_name__ = TemporaryFile() __magic_name__ = 1_0_0 # 1000 elements are to be sorted __magic_name__ , __magic_name__ = 0, 1 # mean and standard deviation __magic_name__ = np.random.normal(mu, sigma, p) np.save(outfile, X) print('''The array is''') print(X) outfile.seek(0) # using the same array __magic_name__ = np.load(outfile) __magic_name__ = len(M) - 1 __magic_name__ = _in_place_quick_sort(M, 0, r) print( '''No of Comparisons for 100 elements selected from a standard normal distribution''' '''is :''' ) print(z)
250
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available SCREAMING_SNAKE_CASE__ = { '''configuration_bridgetower''': [ '''BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BridgeTowerConfig''', '''BridgeTowerTextConfig''', '''BridgeTowerVisionConfig''', ], '''processing_bridgetower''': ['''BridgeTowerProcessor'''], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ = ['''BridgeTowerImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ = [ '''BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''BridgeTowerForContrastiveLearning''', '''BridgeTowerForImageAndTextRetrieval''', '''BridgeTowerForMaskedLM''', '''BridgeTowerModel''', '''BridgeTowerPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_bridgetower import ( BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP, BridgeTowerConfig, BridgeTowerTextConfig, BridgeTowerVisionConfig, ) from .processing_bridgetower import BridgeTowerProcessor try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_bridgetower import BridgeTowerImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_bridgetower import ( BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST, BridgeTowerForContrastiveLearning, BridgeTowerForImageAndTextRetrieval, BridgeTowerForMaskedLM, BridgeTowerModel, BridgeTowerPreTrainedModel, ) else: import sys SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
47
0
"""simple docstring""" import unittest import numpy as np import timeout_decorator # noqa from transformers import BlenderbotConfig, is_flax_available from transformers.testing_utils import jax_device, require_flax, slow from ...generation.test_flax_utils import FlaxGenerationTesterMixin from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor if is_flax_available(): import os # The slow tests are often failing with OOM error on GPU # This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed # but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html a_ = 'platform' import jax import jax.numpy as jnp from transformers import BlenderbotTokenizer from transformers.models.blenderbot.modeling_flax_blenderbot import ( FlaxBlenderbotForConditionalGeneration, FlaxBlenderbotModel, shift_tokens_right, ) def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=None , ): if attention_mask is None: __lowercase : Union[str, Any] = np.where(input_ids != config.pad_token_id , 1 , 0 ) if decoder_attention_mask is None: __lowercase : Dict = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 ) if head_mask is None: __lowercase : List[str] = np.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: __lowercase : Dict = np.ones((config.decoder_layers, config.decoder_attention_heads) ) if cross_attn_head_mask is None: __lowercase : Optional[Any] = np.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": attention_mask, } class UpperCAmelCase_ : def __init__( self , UpperCamelCase_ , UpperCamelCase_=13 , UpperCamelCase_=7 , UpperCamelCase_=True , UpperCamelCase_=False , UpperCamelCase_=99 , UpperCamelCase_=16 , UpperCamelCase_=2 , UpperCamelCase_=4 , UpperCamelCase_=4 , UpperCamelCase_="gelu" , UpperCamelCase_=0.1 , UpperCamelCase_=0.1 , UpperCamelCase_=32 , UpperCamelCase_=2 , UpperCamelCase_=1 , UpperCamelCase_=0 , UpperCamelCase_=0.0_2 , ) -> Tuple: __lowercase : Dict = parent __lowercase : Optional[int] = batch_size __lowercase : int = seq_length __lowercase : Any = is_training __lowercase : Tuple = use_labels __lowercase : Dict = vocab_size __lowercase : List[str] = hidden_size __lowercase : Optional[Any] = num_hidden_layers __lowercase : List[Any] = num_attention_heads __lowercase : Optional[Any] = intermediate_size __lowercase : List[str] = hidden_act __lowercase : Tuple = hidden_dropout_prob __lowercase : Dict = attention_probs_dropout_prob __lowercase : Dict = max_position_embeddings __lowercase : str = eos_token_id __lowercase : str = pad_token_id __lowercase : Optional[Any] = bos_token_id __lowercase : Union[str, Any] = initializer_range def _lowerCamelCase ( self ) -> List[str]: __lowercase : Optional[Any] = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size ) __lowercase : Dict = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 ) __lowercase : Optional[Any] = shift_tokens_right(SCREAMING_SNAKE_CASE__ , 1 , 2 ) __lowercase : str = BlenderbotConfig( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=SCREAMING_SNAKE_CASE__ , ) __lowercase : Optional[int] = prepare_blenderbot_inputs_dict(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) return config, inputs_dict def _lowerCamelCase ( self ) -> Union[str, Any]: __lowercase : Tuple = self.prepare_config_and_inputs() return config, inputs_dict def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> Union[str, Any]: __lowercase : Union[str, Any] = 20 __lowercase : int = model_class_name(SCREAMING_SNAKE_CASE__ ) __lowercase : str = model.encode(inputs_dict['''input_ids'''] ) __lowercase : Tuple = ( inputs_dict['decoder_input_ids'], inputs_dict['decoder_attention_mask'], ) __lowercase : List[str] = model.init_cache(decoder_input_ids.shape[0] , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) __lowercase : int = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='''i4''' ) __lowercase : Dict = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) __lowercase : Dict = model.decode( decoder_input_ids[:, :-1] , SCREAMING_SNAKE_CASE__ , decoder_attention_mask=SCREAMING_SNAKE_CASE__ , past_key_values=SCREAMING_SNAKE_CASE__ , decoder_position_ids=SCREAMING_SNAKE_CASE__ , ) __lowercase : Any = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' ) __lowercase : Dict = model.decode( decoder_input_ids[:, -1:] , SCREAMING_SNAKE_CASE__ , decoder_attention_mask=SCREAMING_SNAKE_CASE__ , past_key_values=outputs_cache.past_key_values , decoder_position_ids=SCREAMING_SNAKE_CASE__ , ) __lowercase : Union[str, Any] = model.decode(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) __lowercase : Tuple = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1E-3 , msg=F"""Max diff is {diff}""" ) def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> int: __lowercase : Optional[Any] = 20 __lowercase : List[str] = model_class_name(SCREAMING_SNAKE_CASE__ ) __lowercase : Optional[Any] = model.encode(inputs_dict['''input_ids'''] ) __lowercase : Any = ( inputs_dict['decoder_input_ids'], inputs_dict['decoder_attention_mask'], ) __lowercase : Any = jnp.concatenate( [ decoder_attention_mask, jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ), ] , axis=-1 , ) __lowercase : Dict = model.init_cache(decoder_input_ids.shape[0] , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) __lowercase : Optional[int] = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) __lowercase : List[str] = model.decode( decoder_input_ids[:, :-1] , SCREAMING_SNAKE_CASE__ , decoder_attention_mask=SCREAMING_SNAKE_CASE__ , past_key_values=SCREAMING_SNAKE_CASE__ , decoder_position_ids=SCREAMING_SNAKE_CASE__ , ) __lowercase : Tuple = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' ) __lowercase : int = model.decode( decoder_input_ids[:, -1:] , SCREAMING_SNAKE_CASE__ , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=SCREAMING_SNAKE_CASE__ , decoder_position_ids=SCREAMING_SNAKE_CASE__ , ) __lowercase : Dict = model.decode(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , decoder_attention_mask=SCREAMING_SNAKE_CASE__ ) __lowercase : List[str] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1E-3 , msg=F"""Max diff is {diff}""" ) @require_flax class UpperCAmelCase_ ( unittest.TestCase ): UpperCamelCase =99 def _lowerCamelCase ( self ) -> int: __lowercase : Dict = np.array( [ [71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 82, 2], [5, 97, 17, 39, 94, 40, 2], [76, 83, 94, 25, 70, 78, 2], [87, 59, 41, 35, 48, 66, 2], [55, 13, 16, 58, 5, 2, 1], # note padding [64, 27, 31, 51, 12, 75, 2], [52, 64, 86, 17, 83, 39, 2], [48, 61, 9, 24, 71, 82, 2], [26, 1, 60, 48, 22, 13, 2], [21, 5, 62, 28, 14, 76, 2], [45, 98, 37, 86, 59, 48, 2], [70, 70, 50, 9, 28, 0, 2], ] , dtype=np.intaa , ) __lowercase : Any = input_ids.shape[0] __lowercase : List[Any] = BlenderbotConfig( vocab_size=self.vocab_size , d_model=24 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=32 , decoder_ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , ) return config, input_ids, batch_size def _lowerCamelCase ( self ) -> Dict: __lowercase : List[Any] = self._get_config_and_data() __lowercase : str = FlaxBlenderbotForConditionalGeneration(SCREAMING_SNAKE_CASE__ ) __lowercase : List[Any] = lm_model(input_ids=SCREAMING_SNAKE_CASE__ ) __lowercase : Tuple = (batch_size, input_ids.shape[1], config.vocab_size) self.assertEqual(outputs['''logits'''].shape , SCREAMING_SNAKE_CASE__ ) def _lowerCamelCase ( self ) -> Union[str, Any]: __lowercase : List[Any] = BlenderbotConfig( vocab_size=self.vocab_size , d_model=14 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=48 , ) __lowercase : Union[str, Any] = FlaxBlenderbotForConditionalGeneration(SCREAMING_SNAKE_CASE__ ) __lowercase : Tuple = np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]] , dtype=np.intaa ) __lowercase : Optional[Any] = np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]] , dtype=np.intaa ) __lowercase : Optional[int] = lm_model(input_ids=SCREAMING_SNAKE_CASE__ , decoder_input_ids=SCREAMING_SNAKE_CASE__ ) __lowercase : List[Any] = (*summary.shape, config.vocab_size) self.assertEqual(outputs['''logits'''].shape , SCREAMING_SNAKE_CASE__ ) def _lowerCamelCase ( self ) -> Optional[Any]: __lowercase : int = np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]] , dtype=np.intaa ) __lowercase : Optional[int] = shift_tokens_right(SCREAMING_SNAKE_CASE__ , 1 , 2 ) __lowercase : List[str] = np.equal(SCREAMING_SNAKE_CASE__ , 1 ).astype(np.floataa ).sum() __lowercase : Any = np.equal(SCREAMING_SNAKE_CASE__ , 1 ).astype(np.floataa ).sum() self.assertEqual(shifted.shape , input_ids.shape ) self.assertEqual(SCREAMING_SNAKE_CASE__ , n_pad_before - 1 ) self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() ) @require_flax class UpperCAmelCase_ ( __lowerCamelCase , unittest.TestCase , __lowerCamelCase ): UpperCamelCase =True UpperCamelCase =( ( FlaxBlenderbotModel, FlaxBlenderbotForConditionalGeneration, ) if is_flax_available() else () ) UpperCamelCase =(FlaxBlenderbotForConditionalGeneration,) if is_flax_available() else () def _lowerCamelCase ( self ) -> Optional[int]: __lowercase : List[Any] = FlaxBlenderbotModelTester(self ) def _lowerCamelCase ( self ) -> Union[str, Any]: __lowercase : Any = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) def _lowerCamelCase ( self ) -> Any: __lowercase : Tuple = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward_with_attn_mask(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) def _lowerCamelCase ( self ) -> str: __lowercase : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): __lowercase : List[str] = self._prepare_for_class(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) __lowercase : List[Any] = model_class(SCREAMING_SNAKE_CASE__ ) @jax.jit def encode_jitted(UpperCamelCase_ , UpperCamelCase_=None , **UpperCamelCase_ ): return model.encode(input_ids=SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ ) with self.subTest('''JIT Enabled''' ): __lowercase : Dict = encode_jitted(**SCREAMING_SNAKE_CASE__ ).to_tuple() with self.subTest('''JIT Disabled''' ): with jax.disable_jit(): __lowercase : Dict = encode_jitted(**SCREAMING_SNAKE_CASE__ ).to_tuple() self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , len(SCREAMING_SNAKE_CASE__ ) ) for jitted_output, output in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): self.assertEqual(jitted_output.shape , output.shape ) def _lowerCamelCase ( self ) -> int: __lowercase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): __lowercase : Tuple = model_class(SCREAMING_SNAKE_CASE__ ) __lowercase : Tuple = model.encode(inputs_dict['''input_ids'''] , inputs_dict['''attention_mask'''] ) __lowercase : Optional[int] = { 'decoder_input_ids': inputs_dict['decoder_input_ids'], 'decoder_attention_mask': inputs_dict['decoder_attention_mask'], 'encoder_outputs': encoder_outputs, } @jax.jit def decode_jitted(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ): return model.decode( decoder_input_ids=SCREAMING_SNAKE_CASE__ , decoder_attention_mask=SCREAMING_SNAKE_CASE__ , encoder_outputs=SCREAMING_SNAKE_CASE__ , ) with self.subTest('''JIT Enabled''' ): __lowercase : List[Any] = decode_jitted(**SCREAMING_SNAKE_CASE__ ).to_tuple() with self.subTest('''JIT Disabled''' ): with jax.disable_jit(): __lowercase : str = decode_jitted(**SCREAMING_SNAKE_CASE__ ).to_tuple() self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , len(SCREAMING_SNAKE_CASE__ ) ) for jitted_output, output in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): self.assertEqual(jitted_output.shape , output.shape ) @slow def _lowerCamelCase ( self ) -> Optional[int]: for model_class_name in self.all_model_classes: __lowercase : List[Any] = model_class_name.from_pretrained('''facebook/blenderbot-400M-distill''' ) # FlaxBlenderbotForSequenceClassification expects eos token in input_ids __lowercase : List[Any] = np.ones((1, 1) ) * model.config.eos_token_id __lowercase : Union[str, Any] = model(SCREAMING_SNAKE_CASE__ ) self.assertIsNotNone(SCREAMING_SNAKE_CASE__ ) @unittest.skipUnless(jax_device != '''cpu''' , '''3B test too slow on CPU.''' ) @slow def _lowerCamelCase ( self ) -> List[Any]: __lowercase : int = {'num_beams': 1, 'early_stopping': True, 'min_length': 15, 'max_length': 25} __lowercase : Dict = {'skip_special_tokens': True, 'clean_up_tokenization_spaces': True} __lowercase : Dict = FlaxBlenderbotForConditionalGeneration.from_pretrained('''facebook/blenderbot-3B''' , from_pt=SCREAMING_SNAKE_CASE__ ) __lowercase : List[Any] = BlenderbotTokenizer.from_pretrained('''facebook/blenderbot-3B''' ) __lowercase : Tuple = ['Sam'] __lowercase : int = tokenizer(SCREAMING_SNAKE_CASE__ , return_tensors='''jax''' ) __lowercase : Any = model.generate(**SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) __lowercase : Union[str, Any] = 'Sam is a great name. It means "sun" in Gaelic.' __lowercase : Any = tokenizer.batch_decode(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) assert generated_txt[0].strip() == tgt_text
76
from string import ascii_lowercase, ascii_uppercase def UpperCAmelCase__ ( lowerCamelCase_ : str ): if not sentence: return "" __a : Union[str, Any] = dict(zip(lowerCamelCase_ , lowerCamelCase_ ) ) return lower_to_upper.get(sentence[0] , sentence[0] ) + sentence[1:] if __name__ == "__main__": from doctest import testmod testmod()
47
0
"""simple docstring""" __lowerCAmelCase : int = '''Tobias Carryer''' from time import time class _lowerCAmelCase : """simple docstring""" def __init__( self , _lowercase , _lowercase , _lowercase , _lowercase=int(time() ) ) -> List[str]: # noqa: B008 '''simple docstring''' snake_case_ : Union[str, Any] = multiplier snake_case_ : List[Any] = increment snake_case_ : Dict = modulo snake_case_ : Dict = seed def UpperCAmelCase__ ( self ) -> int: '''simple docstring''' snake_case_ : Union[str, Any] = (self.multiplier * self.seed + self.increment) % self.modulo return self.seed if __name__ == "__main__": # Show the LCG in action. __lowerCAmelCase : Dict = LinearCongruentialGenerator(166_4525, 10_1390_4223, 2 << 31) while True: print(lcg.next_number())
58
import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ = { '''asapp/sew-d-tiny-100k''': '''https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json''', # See all SEW-D models at https://huggingface.co/models?filter=sew-d } class _UpperCamelCase( __lowerCamelCase ): __SCREAMING_SNAKE_CASE : Optional[Any] = '''sew-d''' def __init__( self : Dict , SCREAMING_SNAKE_CASE__ : Dict=3_2 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=7_6_8 , SCREAMING_SNAKE_CASE__ : Optional[Any]=1_2 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=1_2 , SCREAMING_SNAKE_CASE__ : str=3_0_7_2 , SCREAMING_SNAKE_CASE__ : str=2 , SCREAMING_SNAKE_CASE__ : Optional[Any]=5_1_2 , SCREAMING_SNAKE_CASE__ : List[str]=2_5_6 , SCREAMING_SNAKE_CASE__ : Optional[Any]=True , SCREAMING_SNAKE_CASE__ : Tuple=True , SCREAMING_SNAKE_CASE__ : List[str]=("p2c", "c2p") , SCREAMING_SNAKE_CASE__ : str="layer_norm" , SCREAMING_SNAKE_CASE__ : Tuple="gelu_python" , SCREAMING_SNAKE_CASE__ : Tuple=0.1 , SCREAMING_SNAKE_CASE__ : Optional[int]=0.1 , SCREAMING_SNAKE_CASE__ : Any=0.1 , SCREAMING_SNAKE_CASE__ : Any=0.0 , SCREAMING_SNAKE_CASE__ : str=0.1 , SCREAMING_SNAKE_CASE__ : Optional[int]=0.02 , SCREAMING_SNAKE_CASE__ : int=1e-7 , SCREAMING_SNAKE_CASE__ : Any=1e-5 , SCREAMING_SNAKE_CASE__ : Optional[int]="group" , SCREAMING_SNAKE_CASE__ : Optional[Any]="gelu" , SCREAMING_SNAKE_CASE__ : Optional[int]=(6_4, 1_2_8, 1_2_8, 1_2_8, 1_2_8, 2_5_6, 2_5_6, 2_5_6, 2_5_6, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , SCREAMING_SNAKE_CASE__ : List[Any]=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , SCREAMING_SNAKE_CASE__ : str=(1_0, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , SCREAMING_SNAKE_CASE__ : Optional[int]=False , SCREAMING_SNAKE_CASE__ : Optional[int]=1_2_8 , SCREAMING_SNAKE_CASE__ : Tuple=1_6 , SCREAMING_SNAKE_CASE__ : Any=True , SCREAMING_SNAKE_CASE__ : List[Any]=0.05 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=1_0 , SCREAMING_SNAKE_CASE__ : List[Any]=2 , SCREAMING_SNAKE_CASE__ : int=0.0 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=1_0 , SCREAMING_SNAKE_CASE__ : Optional[int]=0 , SCREAMING_SNAKE_CASE__ : Optional[int]="mean" , SCREAMING_SNAKE_CASE__ : List[Any]=False , SCREAMING_SNAKE_CASE__ : List[str]=False , SCREAMING_SNAKE_CASE__ : str=2_5_6 , SCREAMING_SNAKE_CASE__ : str=0 , SCREAMING_SNAKE_CASE__ : List[Any]=1 , SCREAMING_SNAKE_CASE__ : List[Any]=2 , **SCREAMING_SNAKE_CASE__ : Any , ): '''simple docstring''' super().__init__(**SCREAMING_SNAKE_CASE__ , pad_token_id=SCREAMING_SNAKE_CASE__ , bos_token_id=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ ) __a : Optional[int] = hidden_size __a : Optional[Any] = feat_extract_norm __a : List[str] = feat_extract_activation __a : Dict = list(SCREAMING_SNAKE_CASE__ ) __a : Union[str, Any] = list(SCREAMING_SNAKE_CASE__ ) __a : List[str] = list(SCREAMING_SNAKE_CASE__ ) __a : int = conv_bias __a : Tuple = num_conv_pos_embeddings __a : List[str] = num_conv_pos_embedding_groups __a : Optional[Any] = len(self.conv_dim ) __a : Union[str, Any] = num_hidden_layers __a : Optional[Any] = intermediate_size __a : Union[str, Any] = squeeze_factor __a : List[Any] = max_position_embeddings __a : Tuple = position_buckets __a : Optional[int] = share_att_key __a : List[str] = relative_attention __a : Any = norm_rel_ebd __a : Any = list(SCREAMING_SNAKE_CASE__ ) __a : Union[str, Any] = hidden_act __a : str = num_attention_heads __a : Union[str, Any] = hidden_dropout __a : Optional[int] = attention_dropout __a : List[str] = activation_dropout __a : int = feat_proj_dropout __a : int = final_dropout __a : Dict = layer_norm_eps __a : Tuple = feature_layer_norm_eps __a : str = initializer_range __a : Tuple = vocab_size if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( 'Configuration for convolutional layers is incorrect.' 'It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,' f'''but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)''' f'''= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 __a : Tuple = apply_spec_augment __a : Optional[Any] = mask_time_prob __a : Any = mask_time_length __a : List[str] = mask_time_min_masks __a : List[str] = mask_feature_prob __a : Tuple = mask_feature_length __a : Any = mask_feature_min_masks # ctc loss __a : Optional[int] = ctc_loss_reduction __a : List[Any] = ctc_zero_infinity # sequence classification __a : Dict = use_weighted_layer_sum __a : Optional[Any] = classifier_proj_size @property def __lowerCAmelCase ( self : Optional[Any] ): '''simple docstring''' return functools.reduce(operator.mul , self.conv_stride , 1 )
47
0
'''simple docstring''' from __future__ import annotations from collections.abc import MutableSequence class _snake_case : '''simple docstring''' def __init__( self: Tuple , __UpperCamelCase: int , __UpperCamelCase: MutableSequence[float] ) -> Union[str, Any]: if len(SCREAMING_SNAKE_CASE__ ) != degree + 1: raise ValueError( "The number of coefficients should be equal to the degree + 1." ) __magic_name__ : list[float] = list(SCREAMING_SNAKE_CASE__ ) __magic_name__ : List[str] = degree def __add__( self: str , __UpperCamelCase: Polynomial ) -> int: if self.degree > polynomial_a.degree: __magic_name__ : List[Any] = self.coefficients[:] for i in range(polynomial_a.degree + 1 ): coefficients[i] += polynomial_a.coefficients[i] return Polynomial(self.degree , SCREAMING_SNAKE_CASE__ ) else: __magic_name__ : List[str] = polynomial_a.coefficients[:] for i in range(self.degree + 1 ): coefficients[i] += self.coefficients[i] return Polynomial(polynomial_a.degree , SCREAMING_SNAKE_CASE__ ) def __sub__( self: str , __UpperCamelCase: Polynomial ) -> Tuple: return self + polynomial_a * Polynomial(0 , [-1] ) def __neg__( self: List[Any] ) -> Dict: return Polynomial(self.degree , [-c for c in self.coefficients] ) def __mul__( self: Union[str, Any] , __UpperCamelCase: Polynomial ) -> int: __magic_name__ : list[float] = [0] * (self.degree + polynomial_a.degree + 1) for i in range(self.degree + 1 ): for j in range(polynomial_a.degree + 1 ): coefficients[i + j] += ( self.coefficients[i] * polynomial_a.coefficients[j] ) return Polynomial(self.degree + polynomial_a.degree , SCREAMING_SNAKE_CASE__ ) def lowerCAmelCase__ ( self: Optional[int] , __UpperCamelCase: int | float ) -> List[str]: __magic_name__ : int | float = 0 for i in range(self.degree + 1 ): result += self.coefficients[i] * (substitution**i) return result def __str__( self: Any ) -> Tuple: __magic_name__ : Any = '' for i in range(self.degree , -1 , -1 ): if self.coefficients[i] == 0: continue elif self.coefficients[i] > 0: if polynomial: polynomial += " + " else: polynomial += " - " if i == 0: polynomial += str(abs(self.coefficients[i] ) ) elif i == 1: polynomial += str(abs(self.coefficients[i] ) ) + "x" else: polynomial += str(abs(self.coefficients[i] ) ) + "x^" + str(SCREAMING_SNAKE_CASE__ ) return polynomial def __repr__( self: Tuple ) -> int: return self.__str__() def lowerCAmelCase__ ( self: List[Any] ) -> Tuple: __magic_name__ : list[float] = [0] * self.degree for i in range(self.degree ): __magic_name__ : List[Any] = self.coefficients[i + 1] * (i + 1) return Polynomial(self.degree - 1 , SCREAMING_SNAKE_CASE__ ) def lowerCAmelCase__ ( self: Any , __UpperCamelCase: int | float = 0 ) -> str: __magic_name__ : list[float] = [0] * (self.degree + 2) __magic_name__ : Optional[int] = constant for i in range(self.degree + 1 ): __magic_name__ : List[Any] = self.coefficients[i] / (i + 1) return Polynomial(self.degree + 1 , SCREAMING_SNAKE_CASE__ ) def __eq__( self: Union[str, Any] , __UpperCamelCase: object ) -> int: if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): return False if self.degree != polynomial_a.degree: return False for i in range(self.degree + 1 ): if self.coefficients[i] != polynomial_a.coefficients[i]: return False return True def __ne__( self: Any , __UpperCamelCase: object ) -> int: return not self.__eq__(SCREAMING_SNAKE_CASE__ )
436
from __future__ import annotations from sys import maxsize from typing import Generic, TypeVar SCREAMING_SNAKE_CASE__ = TypeVar('''T''') def UpperCAmelCase__ ( lowerCamelCase_ : int ): return (position - 1) // 2 def UpperCAmelCase__ ( lowerCamelCase_ : int ): return (2 * position) + 1 def UpperCAmelCase__ ( lowerCamelCase_ : int ): return (2 * position) + 2 class _UpperCamelCase( Generic[T] ): def __init__( self : List[str] ): '''simple docstring''' __a : list[tuple[T, int]] = [] __a : dict[T, int] = {} __a : int = 0 def __len__( self : Any ): '''simple docstring''' return self.elements def __repr__( self : Any ): '''simple docstring''' return str(self.heap ) def __lowerCAmelCase ( self : Optional[Any] ): '''simple docstring''' return self.elements == 0 def __lowerCAmelCase ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : T , SCREAMING_SNAKE_CASE__ : int ): '''simple docstring''' self.heap.append((elem, weight) ) __a : List[Any] = self.elements self.elements += 1 self._bubble_up(SCREAMING_SNAKE_CASE__ ) def __lowerCAmelCase ( self : Tuple ): '''simple docstring''' if self.elements > 1: self._swap_nodes(0 , self.elements - 1 ) __a , __a : Union[str, Any] = self.heap.pop() del self.position_map[elem] self.elements -= 1 if self.elements > 0: __a , __a : Dict = self.heap[0] self._bubble_down(SCREAMING_SNAKE_CASE__ ) return elem def __lowerCAmelCase ( self : List[str] , SCREAMING_SNAKE_CASE__ : T , SCREAMING_SNAKE_CASE__ : int ): '''simple docstring''' __a : List[Any] = self.position_map[elem] __a : str = (elem, weight) if position > 0: __a : Tuple = get_parent_position(SCREAMING_SNAKE_CASE__ ) __a , __a : Dict = self.heap[parent_position] if parent_weight > weight: self._bubble_up(SCREAMING_SNAKE_CASE__ ) else: self._bubble_down(SCREAMING_SNAKE_CASE__ ) else: self._bubble_down(SCREAMING_SNAKE_CASE__ ) def __lowerCAmelCase ( self : Any , SCREAMING_SNAKE_CASE__ : T ): '''simple docstring''' __a : List[Any] = self.position_map[elem] if curr_pos == 0: return None __a : List[str] = get_parent_position(SCREAMING_SNAKE_CASE__ ) __a , __a : str = self.heap[curr_pos] __a , __a : Optional[int] = self.heap[parent_position] if parent_weight > weight: self._swap_nodes(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) return self._bubble_up(SCREAMING_SNAKE_CASE__ ) return None def __lowerCAmelCase ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : T ): '''simple docstring''' __a : int = self.position_map[elem] __a , __a : Optional[Any] = self.heap[curr_pos] __a : Tuple = get_child_left_position(SCREAMING_SNAKE_CASE__ ) __a : Optional[Any] = get_child_right_position(SCREAMING_SNAKE_CASE__ ) if child_left_position < self.elements and child_right_position < self.elements: __a , __a : str = self.heap[child_left_position] __a , __a : List[str] = self.heap[child_right_position] if child_right_weight < child_left_weight and child_right_weight < weight: self._swap_nodes(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) return self._bubble_down(SCREAMING_SNAKE_CASE__ ) if child_left_position < self.elements: __a , __a : Any = self.heap[child_left_position] if child_left_weight < weight: self._swap_nodes(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) return self._bubble_down(SCREAMING_SNAKE_CASE__ ) else: return None if child_right_position < self.elements: __a , __a : Union[str, Any] = self.heap[child_right_position] if child_right_weight < weight: self._swap_nodes(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) return self._bubble_down(SCREAMING_SNAKE_CASE__ ) return None def __lowerCAmelCase ( self : List[Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ): '''simple docstring''' __a : Optional[Any] = self.heap[nodea_pos][0] __a : str = self.heap[nodea_pos][0] __a , __a : int = ( self.heap[nodea_pos], self.heap[nodea_pos], ) __a : str = nodea_pos __a : Optional[int] = nodea_pos class _UpperCamelCase( Generic[T] ): def __init__( self : List[Any] ): '''simple docstring''' __a : dict[T, dict[T, int]] = {} __a : int = 0 def __repr__( self : Tuple ): '''simple docstring''' return str(self.connections ) def __len__( self : Dict ): '''simple docstring''' return self.nodes def __lowerCAmelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : T ): '''simple docstring''' if node not in self.connections: __a : Tuple = {} self.nodes += 1 def __lowerCAmelCase ( self : Any , SCREAMING_SNAKE_CASE__ : T , SCREAMING_SNAKE_CASE__ : T , SCREAMING_SNAKE_CASE__ : int ): '''simple docstring''' self.add_node(SCREAMING_SNAKE_CASE__ ) self.add_node(SCREAMING_SNAKE_CASE__ ) __a : Optional[Any] = weight __a : Any = weight def UpperCAmelCase__ ( lowerCamelCase_ : GraphUndirectedWeighted[T] , ): __a : dict[T, int] = {node: maxsize for node in graph.connections} __a : dict[T, T | None] = {node: None for node in graph.connections} __a : MinPriorityQueue[T] = MinPriorityQueue() for node, weight in dist.items(): priority_queue.push(lowerCamelCase_ , lowerCamelCase_ ) if priority_queue.is_empty(): return dist, parent # initialization __a : Optional[int] = priority_queue.extract_min() __a : int = 0 for neighbour in graph.connections[node]: if dist[neighbour] > dist[node] + graph.connections[node][neighbour]: __a : str = dist[node] + graph.connections[node][neighbour] priority_queue.update_key(lowerCamelCase_ , dist[neighbour] ) __a : Optional[int] = node # running prim's algorithm while not priority_queue.is_empty(): __a : Any = priority_queue.extract_min() for neighbour in graph.connections[node]: if dist[neighbour] > dist[node] + graph.connections[node][neighbour]: __a : Tuple = dist[node] + graph.connections[node][neighbour] priority_queue.update_key(lowerCamelCase_ , dist[neighbour] ) __a : Dict = node return dist, parent
47
0
'''simple docstring''' from collections import OrderedDict from ...utils import logging from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update from .configuration_auto import CONFIG_MAPPING_NAMES _lowercase : Tuple = logging.get_logger(__name__) _lowercase : Tuple = OrderedDict( [ # Base model mapping ("""albert""", """FlaxAlbertModel"""), ("""bart""", """FlaxBartModel"""), ("""beit""", """FlaxBeitModel"""), ("""bert""", """FlaxBertModel"""), ("""big_bird""", """FlaxBigBirdModel"""), ("""blenderbot""", """FlaxBlenderbotModel"""), ("""blenderbot-small""", """FlaxBlenderbotSmallModel"""), ("""clip""", """FlaxCLIPModel"""), ("""distilbert""", """FlaxDistilBertModel"""), ("""electra""", """FlaxElectraModel"""), ("""gpt-sw3""", """FlaxGPT2Model"""), ("""gpt2""", """FlaxGPT2Model"""), ("""gpt_neo""", """FlaxGPTNeoModel"""), ("""gptj""", """FlaxGPTJModel"""), ("""longt5""", """FlaxLongT5Model"""), ("""marian""", """FlaxMarianModel"""), ("""mbart""", """FlaxMBartModel"""), ("""mt5""", """FlaxMT5Model"""), ("""opt""", """FlaxOPTModel"""), ("""pegasus""", """FlaxPegasusModel"""), ("""regnet""", """FlaxRegNetModel"""), ("""resnet""", """FlaxResNetModel"""), ("""roberta""", """FlaxRobertaModel"""), ("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormModel"""), ("""roformer""", """FlaxRoFormerModel"""), ("""t5""", """FlaxT5Model"""), ("""vision-text-dual-encoder""", """FlaxVisionTextDualEncoderModel"""), ("""vit""", """FlaxViTModel"""), ("""wav2vec2""", """FlaxWav2Vec2Model"""), ("""whisper""", """FlaxWhisperModel"""), ("""xglm""", """FlaxXGLMModel"""), ("""xlm-roberta""", """FlaxXLMRobertaModel"""), ] ) _lowercase : List[Any] = OrderedDict( [ # Model for pre-training mapping ("""albert""", """FlaxAlbertForPreTraining"""), ("""bart""", """FlaxBartForConditionalGeneration"""), ("""bert""", """FlaxBertForPreTraining"""), ("""big_bird""", """FlaxBigBirdForPreTraining"""), ("""electra""", """FlaxElectraForPreTraining"""), ("""longt5""", """FlaxLongT5ForConditionalGeneration"""), ("""mbart""", """FlaxMBartForConditionalGeneration"""), ("""mt5""", """FlaxMT5ForConditionalGeneration"""), ("""roberta""", """FlaxRobertaForMaskedLM"""), ("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForMaskedLM"""), ("""roformer""", """FlaxRoFormerForMaskedLM"""), ("""t5""", """FlaxT5ForConditionalGeneration"""), ("""wav2vec2""", """FlaxWav2Vec2ForPreTraining"""), ("""whisper""", """FlaxWhisperForConditionalGeneration"""), ("""xlm-roberta""", """FlaxXLMRobertaForMaskedLM"""), ] ) _lowercase : Optional[Any] = OrderedDict( [ # Model for Masked LM mapping ("""albert""", """FlaxAlbertForMaskedLM"""), ("""bart""", """FlaxBartForConditionalGeneration"""), ("""bert""", """FlaxBertForMaskedLM"""), ("""big_bird""", """FlaxBigBirdForMaskedLM"""), ("""distilbert""", """FlaxDistilBertForMaskedLM"""), ("""electra""", """FlaxElectraForMaskedLM"""), ("""mbart""", """FlaxMBartForConditionalGeneration"""), ("""roberta""", """FlaxRobertaForMaskedLM"""), ("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForMaskedLM"""), ("""roformer""", """FlaxRoFormerForMaskedLM"""), ("""xlm-roberta""", """FlaxXLMRobertaForMaskedLM"""), ] ) _lowercase : List[Any] = OrderedDict( [ # Model for Seq2Seq Causal LM mapping ("""bart""", """FlaxBartForConditionalGeneration"""), ("""blenderbot""", """FlaxBlenderbotForConditionalGeneration"""), ("""blenderbot-small""", """FlaxBlenderbotSmallForConditionalGeneration"""), ("""encoder-decoder""", """FlaxEncoderDecoderModel"""), ("""longt5""", """FlaxLongT5ForConditionalGeneration"""), ("""marian""", """FlaxMarianMTModel"""), ("""mbart""", """FlaxMBartForConditionalGeneration"""), ("""mt5""", """FlaxMT5ForConditionalGeneration"""), ("""pegasus""", """FlaxPegasusForConditionalGeneration"""), ("""t5""", """FlaxT5ForConditionalGeneration"""), ] ) _lowercase : Tuple = OrderedDict( [ # Model for Image-classsification ("""beit""", """FlaxBeitForImageClassification"""), ("""regnet""", """FlaxRegNetForImageClassification"""), ("""resnet""", """FlaxResNetForImageClassification"""), ("""vit""", """FlaxViTForImageClassification"""), ] ) _lowercase : Optional[int] = OrderedDict( [ ("""vision-encoder-decoder""", """FlaxVisionEncoderDecoderModel"""), ] ) _lowercase : Any = OrderedDict( [ # Model for Causal LM mapping ("""bart""", """FlaxBartForCausalLM"""), ("""bert""", """FlaxBertForCausalLM"""), ("""big_bird""", """FlaxBigBirdForCausalLM"""), ("""electra""", """FlaxElectraForCausalLM"""), ("""gpt-sw3""", """FlaxGPT2LMHeadModel"""), ("""gpt2""", """FlaxGPT2LMHeadModel"""), ("""gpt_neo""", """FlaxGPTNeoForCausalLM"""), ("""gptj""", """FlaxGPTJForCausalLM"""), ("""opt""", """FlaxOPTForCausalLM"""), ("""roberta""", """FlaxRobertaForCausalLM"""), ("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForCausalLM"""), ("""xglm""", """FlaxXGLMForCausalLM"""), ("""xlm-roberta""", """FlaxXLMRobertaForCausalLM"""), ] ) _lowercase : List[str] = OrderedDict( [ # Model for Sequence Classification mapping ("""albert""", """FlaxAlbertForSequenceClassification"""), ("""bart""", """FlaxBartForSequenceClassification"""), ("""bert""", """FlaxBertForSequenceClassification"""), ("""big_bird""", """FlaxBigBirdForSequenceClassification"""), ("""distilbert""", """FlaxDistilBertForSequenceClassification"""), ("""electra""", """FlaxElectraForSequenceClassification"""), ("""mbart""", """FlaxMBartForSequenceClassification"""), ("""roberta""", """FlaxRobertaForSequenceClassification"""), ("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForSequenceClassification"""), ("""roformer""", """FlaxRoFormerForSequenceClassification"""), ("""xlm-roberta""", """FlaxXLMRobertaForSequenceClassification"""), ] ) _lowercase : Optional[Any] = OrderedDict( [ # Model for Question Answering mapping ("""albert""", """FlaxAlbertForQuestionAnswering"""), ("""bart""", """FlaxBartForQuestionAnswering"""), ("""bert""", """FlaxBertForQuestionAnswering"""), ("""big_bird""", """FlaxBigBirdForQuestionAnswering"""), ("""distilbert""", """FlaxDistilBertForQuestionAnswering"""), ("""electra""", """FlaxElectraForQuestionAnswering"""), ("""mbart""", """FlaxMBartForQuestionAnswering"""), ("""roberta""", """FlaxRobertaForQuestionAnswering"""), ("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForQuestionAnswering"""), ("""roformer""", """FlaxRoFormerForQuestionAnswering"""), ("""xlm-roberta""", """FlaxXLMRobertaForQuestionAnswering"""), ] ) _lowercase : str = OrderedDict( [ # Model for Token Classification mapping ("""albert""", """FlaxAlbertForTokenClassification"""), ("""bert""", """FlaxBertForTokenClassification"""), ("""big_bird""", """FlaxBigBirdForTokenClassification"""), ("""distilbert""", """FlaxDistilBertForTokenClassification"""), ("""electra""", """FlaxElectraForTokenClassification"""), ("""roberta""", """FlaxRobertaForTokenClassification"""), ("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForTokenClassification"""), ("""roformer""", """FlaxRoFormerForTokenClassification"""), ("""xlm-roberta""", """FlaxXLMRobertaForTokenClassification"""), ] ) _lowercase : List[Any] = OrderedDict( [ # Model for Multiple Choice mapping ("""albert""", """FlaxAlbertForMultipleChoice"""), ("""bert""", """FlaxBertForMultipleChoice"""), ("""big_bird""", """FlaxBigBirdForMultipleChoice"""), ("""distilbert""", """FlaxDistilBertForMultipleChoice"""), ("""electra""", """FlaxElectraForMultipleChoice"""), ("""roberta""", """FlaxRobertaForMultipleChoice"""), ("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForMultipleChoice"""), ("""roformer""", """FlaxRoFormerForMultipleChoice"""), ("""xlm-roberta""", """FlaxXLMRobertaForMultipleChoice"""), ] ) _lowercase : Dict = OrderedDict( [ ("""bert""", """FlaxBertForNextSentencePrediction"""), ] ) _lowercase : Optional[Any] = OrderedDict( [ ("""speech-encoder-decoder""", """FlaxSpeechEncoderDecoderModel"""), ("""whisper""", """FlaxWhisperForConditionalGeneration"""), ] ) _lowercase : str = OrderedDict( [ ("""whisper""", """FlaxWhisperForAudioClassification"""), ] ) _lowercase : Any = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES) _lowercase : List[Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES) _lowercase : Dict = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES) _lowercase : Dict = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES ) _lowercase : Dict = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES ) _lowercase : str = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES) _lowercase : Dict = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES) _lowercase : Tuple = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES ) _lowercase : Optional[int] = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES ) _lowercase : Optional[Any] = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES ) _lowercase : Union[str, Any] = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES ) _lowercase : Union[str, Any] = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES ) _lowercase : Any = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES ) _lowercase : str = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES ) class UpperCamelCase__( _BaseAutoModelClass ): __magic_name__ : str = FLAX_MODEL_MAPPING _lowercase : Union[str, Any] = auto_class_update(FlaxAutoModel) class UpperCamelCase__( _BaseAutoModelClass ): __magic_name__ : Tuple = FLAX_MODEL_FOR_PRETRAINING_MAPPING _lowercase : Union[str, Any] = auto_class_update(FlaxAutoModelForPreTraining, head_doc="""pretraining""") class UpperCamelCase__( _BaseAutoModelClass ): __magic_name__ : Tuple = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING _lowercase : Any = auto_class_update(FlaxAutoModelForCausalLM, head_doc="""causal language modeling""") class UpperCamelCase__( _BaseAutoModelClass ): __magic_name__ : Any = FLAX_MODEL_FOR_MASKED_LM_MAPPING _lowercase : Any = auto_class_update(FlaxAutoModelForMaskedLM, head_doc="""masked language modeling""") class UpperCamelCase__( _BaseAutoModelClass ): __magic_name__ : List[str] = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING _lowercase : List[Any] = auto_class_update( FlaxAutoModelForSeqaSeqLM, head_doc="""sequence-to-sequence language modeling""", checkpoint_for_example="""t5-base""" ) class UpperCamelCase__( _BaseAutoModelClass ): __magic_name__ : int = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING _lowercase : Any = auto_class_update( FlaxAutoModelForSequenceClassification, head_doc="""sequence classification""" ) class UpperCamelCase__( _BaseAutoModelClass ): __magic_name__ : int = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING _lowercase : Dict = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc="""question answering""") class UpperCamelCase__( _BaseAutoModelClass ): __magic_name__ : Any = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING _lowercase : int = auto_class_update( FlaxAutoModelForTokenClassification, head_doc="""token classification""" ) class UpperCamelCase__( _BaseAutoModelClass ): __magic_name__ : Any = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING _lowercase : Optional[int] = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc="""multiple choice""") class UpperCamelCase__( _BaseAutoModelClass ): __magic_name__ : Any = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING _lowercase : List[Any] = auto_class_update( FlaxAutoModelForNextSentencePrediction, head_doc="""next sentence prediction""" ) class UpperCamelCase__( _BaseAutoModelClass ): __magic_name__ : str = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING _lowercase : Dict = auto_class_update( FlaxAutoModelForImageClassification, head_doc="""image classification""" ) class UpperCamelCase__( _BaseAutoModelClass ): __magic_name__ : List[str] = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING _lowercase : int = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc="""vision-to-text modeling""") class UpperCamelCase__( _BaseAutoModelClass ): __magic_name__ : List[Any] = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING _lowercase : int = auto_class_update( FlaxAutoModelForSpeechSeqaSeq, head_doc="""sequence-to-sequence speech-to-text modeling""" )
210
from collections.abc import Sequence from queue import Queue class _UpperCamelCase: def __init__( self : Tuple , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : int=None , SCREAMING_SNAKE_CASE__ : Tuple=None ): '''simple docstring''' __a : Tuple = start __a : Dict = end __a : List[str] = val __a : List[Any] = (start + end) // 2 __a : Optional[Any] = left __a : List[str] = right def __repr__( self : Dict ): '''simple docstring''' return f'''SegmentTreeNode(start={self.start}, end={self.end}, val={self.val})''' class _UpperCamelCase: def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : Sequence , SCREAMING_SNAKE_CASE__ : Optional[Any] ): '''simple docstring''' __a : Tuple = collection __a : Dict = function if self.collection: __a : int = self._build_tree(0 , len(SCREAMING_SNAKE_CASE__ ) - 1 ) def __lowerCAmelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : int ): '''simple docstring''' self._update_tree(self.root , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) def __lowerCAmelCase ( self : str , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[str] ): '''simple docstring''' return self._query_range(self.root , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) def __lowerCAmelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Dict ): '''simple docstring''' if start == end: return SegmentTreeNode(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , self.collection[start] ) __a : Tuple = (start + end) // 2 __a : Optional[int] = self._build_tree(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) __a : Tuple = self._build_tree(mid + 1 , SCREAMING_SNAKE_CASE__ ) return SegmentTreeNode(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , self.fn(left.val , right.val ) , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) def __lowerCAmelCase ( self : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : List[Any] ): '''simple docstring''' if node.start == i and node.end == i: __a : Optional[Any] = val return if i <= node.mid: self._update_tree(node.left , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else: self._update_tree(node.right , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) __a : int = self.fn(node.left.val , node.right.val ) def __lowerCAmelCase ( self : Any , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[Any] ): '''simple docstring''' if node.start == i and node.end == j: return node.val if i <= node.mid: if j <= node.mid: # range in left child tree return self._query_range(node.left , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else: # range in left child tree and right child tree return self.fn( self._query_range(node.left , SCREAMING_SNAKE_CASE__ , node.mid ) , self._query_range(node.right , node.mid + 1 , SCREAMING_SNAKE_CASE__ ) , ) else: # range in right child tree return self._query_range(node.right , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) def __lowerCAmelCase ( self : List[Any] ): '''simple docstring''' if self.root is not None: __a : Tuple = Queue() queue.put(self.root ) while not queue.empty(): __a : Tuple = queue.get() yield node if node.left is not None: queue.put(node.left ) if node.right is not None: queue.put(node.right ) if __name__ == "__main__": import operator for fn in [operator.add, max, min]: print('''*''' * 50) SCREAMING_SNAKE_CASE__ = SegmentTree([2, 1, 5, 3, 4], fn) for node in arr.traverse(): print(node) print() arr.update(1, 5) for node in arr.traverse(): print(node) print() print(arr.query_range(3, 4)) # 7 print(arr.query_range(2, 2)) # 5 print(arr.query_range(1, 3)) # 13 print()
47
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowerCamelCase : Union[str, Any] = { "configuration_pegasus_x": ["PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP", "PegasusXConfig"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase : Any = [ "PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST", "PegasusXForConditionalGeneration", "PegasusXModel", "PegasusXPreTrainedModel", ] if TYPE_CHECKING: from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_pegasus_x import ( PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST, PegasusXForConditionalGeneration, PegasusXModel, PegasusXPreTrainedModel, ) else: import sys lowerCamelCase : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
70
import os import posixpath import uuid from dataclasses import dataclass from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union import numpy as np import pyarrow as pa import datasets from datasets.arrow_writer import ArrowWriter, ParquetWriter from datasets.config import MAX_SHARD_SIZE from datasets.filesystems import ( is_remote_filesystem, rename, ) from datasets.iterable_dataset import _BaseExamplesIterable from datasets.utils.py_utils import convert_file_size_to_int SCREAMING_SNAKE_CASE__ = datasets.utils.logging.get_logger(__name__) if TYPE_CHECKING: import pyspark @dataclass class _UpperCamelCase( datasets.BuilderConfig ): __SCREAMING_SNAKE_CASE : Optional[datasets.Features] = None def UpperCAmelCase__ ( lowerCamelCase_ : "pyspark.sql.DataFrame" , lowerCamelCase_ : List[int] , ): import pyspark def generate_fn(): __a : List[Any] = df.select('*' , pyspark.sql.functions.spark_partition_id().alias('part_id' ) ) for partition_id in partition_order: __a : Optional[int] = df_with_partition_id.select('*' ).where(f'''part_id = {partition_id}''' ).drop('part_id' ) __a : Optional[Any] = partition_df.collect() __a : Union[str, Any] = 0 for row in rows: yield f'''{partition_id}_{row_id}''', row.asDict() row_id += 1 return generate_fn class _UpperCamelCase( _BaseExamplesIterable ): def __init__( self : Any , SCREAMING_SNAKE_CASE__ : "pyspark.sql.DataFrame" , SCREAMING_SNAKE_CASE__ : Dict=None , ): '''simple docstring''' __a : List[str] = df __a : Tuple = partition_order or range(self.df.rdd.getNumPartitions() ) __a : List[Any] = _generate_iterable_examples(self.df , self.partition_order ) def __iter__( self : Tuple ): '''simple docstring''' yield from self.generate_examples_fn() def __lowerCAmelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : np.random.Generator ): '''simple docstring''' __a : Union[str, Any] = list(range(self.df.rdd.getNumPartitions() ) ) generator.shuffle(SCREAMING_SNAKE_CASE__ ) return SparkExamplesIterable(self.df , partition_order=SCREAMING_SNAKE_CASE__ ) def __lowerCAmelCase ( self : str , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ): '''simple docstring''' __a : Union[str, Any] = self.split_shard_indices_by_worker(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) return SparkExamplesIterable(self.df , partition_order=SCREAMING_SNAKE_CASE__ ) @property def __lowerCAmelCase ( self : Dict ): '''simple docstring''' return len(self.partition_order ) class _UpperCamelCase( datasets.DatasetBuilder ): __SCREAMING_SNAKE_CASE : List[str] = SparkConfig def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : "pyspark.sql.DataFrame" , SCREAMING_SNAKE_CASE__ : str = None , SCREAMING_SNAKE_CASE__ : str = None , **SCREAMING_SNAKE_CASE__ : Optional[int] , ): '''simple docstring''' import pyspark __a : int = pyspark.sql.SparkSession.builder.getOrCreate() __a : Optional[int] = df __a : List[Any] = working_dir super().__init__( cache_dir=SCREAMING_SNAKE_CASE__ , config_name=str(self.df.semanticHash() ) , **SCREAMING_SNAKE_CASE__ , ) def __lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' def create_cache_and_write_probe(SCREAMING_SNAKE_CASE__ : List[str] ): # makedirs with exist_ok will recursively create the directory. It will not throw an error if directories # already exist. os.makedirs(self._cache_dir , exist_ok=SCREAMING_SNAKE_CASE__ ) __a : List[Any] = os.path.join(self._cache_dir , 'fs_test' + uuid.uuida().hex ) # Opening the file in append mode will create a new file unless it already exists, in which case it will not # change the file contents. open(SCREAMING_SNAKE_CASE__ , 'a' ) return [probe_file] if self._spark.conf.get('spark.master' , '' ).startswith('local' ): return # If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS # accessible to the driver. # TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error. if self._cache_dir: __a : List[Any] = ( self._spark.sparkContext.parallelize(range(1 ) , 1 ).mapPartitions(SCREAMING_SNAKE_CASE__ ).collect() ) if os.path.isfile(probe[0] ): return raise ValueError( 'When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir' ) def __lowerCAmelCase ( self : List[str] ): '''simple docstring''' return datasets.DatasetInfo(features=self.config.features ) def __lowerCAmelCase ( self : int , SCREAMING_SNAKE_CASE__ : datasets.download.download_manager.DownloadManager ): '''simple docstring''' return [datasets.SplitGenerator(name=datasets.Split.TRAIN )] def __lowerCAmelCase ( self : List[str] , SCREAMING_SNAKE_CASE__ : Any ): '''simple docstring''' import pyspark def get_arrow_batch_size(SCREAMING_SNAKE_CASE__ : int ): for batch in it: yield pa.RecordBatch.from_pydict({'batch_bytes': [batch.nbytes]} ) __a : List[str] = self.df.count() __a : Dict = df_num_rows if df_num_rows <= 1_0_0 else 1_0_0 # Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample. __a : List[str] = ( self.df.limit(SCREAMING_SNAKE_CASE__ ) .repartition(1 ) .mapInArrow(SCREAMING_SNAKE_CASE__ , 'batch_bytes: long' ) .agg(pyspark.sql.functions.sum('batch_bytes' ).alias('sample_bytes' ) ) .collect()[0] .sample_bytes / sample_num_rows ) __a : Dict = approx_bytes_per_row * df_num_rows if approx_total_size > max_shard_size: # Make sure there is at least one row per partition. __a : Union[str, Any] = min(SCREAMING_SNAKE_CASE__ , int(approx_total_size / max_shard_size ) ) __a : int = self.df.repartition(SCREAMING_SNAKE_CASE__ ) def __lowerCAmelCase ( self : Tuple , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : int , ): '''simple docstring''' import pyspark __a : Any = ParquetWriter if file_format == 'parquet' else ArrowWriter __a : Union[str, Any] = os.path.join(self._working_dir , os.path.basename(SCREAMING_SNAKE_CASE__ ) ) if self._working_dir else fpath __a : Optional[int] = file_format == 'parquet' # Define these so that we don't reference self in write_arrow, which will result in a pickling error due to # pickling the SparkContext. __a : List[str] = self.config.features __a : int = self._writer_batch_size __a : Union[str, Any] = self._fs.storage_options def write_arrow(SCREAMING_SNAKE_CASE__ : Optional[int] ): # Within the same SparkContext, no two task attempts will share the same attempt ID. __a : Any = pyspark.TaskContext().taskAttemptId() __a : str = next(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) if first_batch is None: # Some partitions might not receive any data. return pa.RecordBatch.from_arrays( [[task_id], [0], [0]] , names=['task_id', 'num_examples', 'num_bytes'] , ) __a : Any = 0 __a : List[str] = writer_class( features=SCREAMING_SNAKE_CASE__ , path=working_fpath.replace('SSSSS' , f'''{shard_id:05d}''' ).replace('TTTTT' , f'''{task_id:05d}''' ) , writer_batch_size=SCREAMING_SNAKE_CASE__ , storage_options=SCREAMING_SNAKE_CASE__ , embed_local_files=SCREAMING_SNAKE_CASE__ , ) __a : Optional[Any] = pa.Table.from_batches([first_batch] ) writer.write_table(SCREAMING_SNAKE_CASE__ ) for batch in it: if max_shard_size is not None and writer._num_bytes >= max_shard_size: __a , __a : Optional[int] = writer.finalize() writer.close() yield pa.RecordBatch.from_arrays( [[task_id], [num_examples], [num_bytes]] , names=['task_id', 'num_examples', 'num_bytes'] , ) shard_id += 1 __a : Optional[Any] = writer_class( features=writer._features , path=working_fpath.replace('SSSSS' , f'''{shard_id:05d}''' ).replace('TTTTT' , f'''{task_id:05d}''' ) , writer_batch_size=SCREAMING_SNAKE_CASE__ , storage_options=SCREAMING_SNAKE_CASE__ , embed_local_files=SCREAMING_SNAKE_CASE__ , ) __a : Union[str, Any] = pa.Table.from_batches([batch] ) writer.write_table(SCREAMING_SNAKE_CASE__ ) if writer._num_bytes > 0: __a , __a : str = writer.finalize() writer.close() yield pa.RecordBatch.from_arrays( [[task_id], [num_examples], [num_bytes]] , names=['task_id', 'num_examples', 'num_bytes'] , ) if working_fpath != fpath: for file in os.listdir(os.path.dirname(SCREAMING_SNAKE_CASE__ ) ): __a : Any = os.path.join(os.path.dirname(SCREAMING_SNAKE_CASE__ ) , os.path.basename(SCREAMING_SNAKE_CASE__ ) ) shutil.move(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) __a : Dict = ( self.df.mapInArrow(SCREAMING_SNAKE_CASE__ , 'task_id: long, num_examples: long, num_bytes: long' ) .groupBy('task_id' ) .agg( pyspark.sql.functions.sum('num_examples' ).alias('total_num_examples' ) , pyspark.sql.functions.sum('num_bytes' ).alias('total_num_bytes' ) , pyspark.sql.functions.count('num_bytes' ).alias('num_shards' ) , pyspark.sql.functions.collect_list('num_examples' ).alias('shard_lengths' ) , ) .collect() ) for row in stats: yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths) def __lowerCAmelCase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : "datasets.SplitGenerator" , SCREAMING_SNAKE_CASE__ : str = "arrow" , SCREAMING_SNAKE_CASE__ : Optional[Union[str, int]] = None , SCREAMING_SNAKE_CASE__ : Optional[int] = None , **SCREAMING_SNAKE_CASE__ : Optional[Any] , ): '''simple docstring''' self._validate_cache_dir() __a : List[str] = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE ) self._repartition_df_if_needed(SCREAMING_SNAKE_CASE__ ) __a : Union[str, Any] = not is_remote_filesystem(self._fs ) __a : Optional[Any] = os.path.join if is_local else posixpath.join __a : Any = '-TTTTT-SSSSS-of-NNNNN' __a : Union[str, Any] = f'''{self.name}-{split_generator.name}{SUFFIX}.{file_format}''' __a : Any = path_join(self._output_dir , SCREAMING_SNAKE_CASE__ ) __a : Any = 0 __a : Dict = 0 __a : int = 0 __a : List[str] = [] __a : Optional[int] = [] for task_id, content in self._prepare_split_single(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): ( ( __a ) , ( __a ) , ( __a ) , ( __a ) , ) : Optional[int] = content if num_bytes > 0: total_num_examples += num_examples total_num_bytes += num_bytes total_shards += num_shards task_id_and_num_shards.append((task_id, num_shards) ) all_shard_lengths.extend(SCREAMING_SNAKE_CASE__ ) __a : List[str] = total_num_examples __a : Optional[int] = total_num_bytes # should rename everything at the end logger.debug(f'''Renaming {total_shards} shards.''' ) if total_shards > 1: __a : Any = all_shard_lengths # Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a # pickling error due to pickling the SparkContext. __a : Dict = self._fs # use the -SSSSS-of-NNNNN pattern def _rename_shard( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , ): rename( SCREAMING_SNAKE_CASE__ , fpath.replace('SSSSS' , f'''{shard_id:05d}''' ).replace('TTTTT' , f'''{task_id:05d}''' ) , fpath.replace('TTTTT-SSSSS' , f'''{global_shard_id:05d}''' ).replace('NNNNN' , f'''{total_shards:05d}''' ) , ) __a : Union[str, Any] = [] __a : List[str] = 0 for i in range(len(SCREAMING_SNAKE_CASE__ ) ): __a , __a : Union[str, Any] = task_id_and_num_shards[i] for shard_id in range(SCREAMING_SNAKE_CASE__ ): args.append([task_id, shard_id, global_shard_id] ) global_shard_id += 1 self._spark.sparkContext.parallelize(SCREAMING_SNAKE_CASE__ , len(SCREAMING_SNAKE_CASE__ ) ).map(lambda SCREAMING_SNAKE_CASE__ : _rename_shard(*SCREAMING_SNAKE_CASE__ ) ).collect() else: # don't use any pattern __a : List[Any] = 0 __a : Any = task_id_and_num_shards[0][0] self._rename( fpath.replace('SSSSS' , f'''{shard_id:05d}''' ).replace('TTTTT' , f'''{task_id:05d}''' ) , fpath.replace(SCREAMING_SNAKE_CASE__ , '' ) , ) def __lowerCAmelCase ( self : str , SCREAMING_SNAKE_CASE__ : "datasets.SplitGenerator" , ): '''simple docstring''' return SparkExamplesIterable(self.df )
47
0
"""simple docstring""" import unittest import numpy as np import torch from torch import nn from transformers import ( CLIPImageProcessor, CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer, CLIPVisionConfig, CLIPVisionModelWithProjection, ) from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler from diffusers.utils import torch_device from diffusers.utils.testing_utils import enable_full_determinism, skip_mps from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class __UpperCamelCase ( __lowerCamelCase , unittest.TestCase ): lowercase_ : List[str] = KandinskyVaaPriorPipeline lowercase_ : int = ['''prompt'''] lowercase_ : Tuple = ['''prompt''', '''negative_prompt'''] lowercase_ : Any = [ '''num_images_per_prompt''', '''generator''', '''num_inference_steps''', '''latents''', '''negative_prompt''', '''guidance_scale''', '''output_type''', '''return_dict''', ] lowercase_ : Tuple = False @property def UpperCAmelCase__ ( self : List[Any] ) -> Any: return 32 @property def UpperCAmelCase__ ( self : Optional[int] ) -> int: return 32 @property def UpperCAmelCase__ ( self : str ) -> Dict: return self.time_input_dim @property def UpperCAmelCase__ ( self : Optional[Any] ) -> List[str]: return self.time_input_dim * 4 @property def UpperCAmelCase__ ( self : Optional[Any] ) -> Optional[Any]: return 100 @property def UpperCAmelCase__ ( self : Any ) -> Union[str, Any]: lowerCAmelCase :Optional[int] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' ) return tokenizer @property def UpperCAmelCase__ ( self : str ) -> Optional[int]: torch.manual_seed(0 ) lowerCAmelCase :Tuple = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) return CLIPTextModelWithProjection(SCREAMING_SNAKE_CASE__ ) @property def UpperCAmelCase__ ( self : List[str] ) -> Optional[int]: torch.manual_seed(0 ) lowerCAmelCase :Union[str, Any] = { 'num_attention_heads': 2, 'attention_head_dim': 12, 'embedding_dim': self.text_embedder_hidden_size, 'num_layers': 1, } lowerCAmelCase :Optional[Any] = PriorTransformer(**SCREAMING_SNAKE_CASE__ ) # clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0 lowerCAmelCase :Tuple = nn.Parameter(torch.ones(model.clip_std.shape ) ) return model @property def UpperCAmelCase__ ( self : Optional[Any] ) -> Dict: torch.manual_seed(0 ) lowerCAmelCase :Dict = CLIPVisionConfig( hidden_size=self.text_embedder_hidden_size , image_size=224 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=14 , ) lowerCAmelCase :int = CLIPVisionModelWithProjection(SCREAMING_SNAKE_CASE__ ) return model @property def UpperCAmelCase__ ( self : List[str] ) -> Union[str, Any]: lowerCAmelCase :Optional[Any] = CLIPImageProcessor( crop_size=224 , do_center_crop=SCREAMING_SNAKE_CASE__ , do_normalize=SCREAMING_SNAKE_CASE__ , do_resize=SCREAMING_SNAKE_CASE__ , image_mean=[0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3] , image_std=[0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1] , resample=3 , size=224 , ) return image_processor def UpperCAmelCase__ ( self : Optional[int] ) -> Any: lowerCAmelCase :Union[str, Any] = self.dummy_prior lowerCAmelCase :int = self.dummy_image_encoder lowerCAmelCase :Optional[Any] = self.dummy_text_encoder lowerCAmelCase :Tuple = self.dummy_tokenizer lowerCAmelCase :Dict = self.dummy_image_processor lowerCAmelCase :Tuple = UnCLIPScheduler( variance_type='fixed_small_log' , prediction_type='sample' , num_train_timesteps=1000 , clip_sample=SCREAMING_SNAKE_CASE__ , clip_sample_range=1_0.0 , ) lowerCAmelCase :Any = { 'prior': prior, 'image_encoder': image_encoder, 'text_encoder': text_encoder, 'tokenizer': tokenizer, 'scheduler': scheduler, 'image_processor': image_processor, } return components def UpperCAmelCase__ ( self : Optional[int] , UpperCAmelCase : Tuple , UpperCAmelCase : Any=0 ) -> Dict: if str(SCREAMING_SNAKE_CASE__ ).startswith('mps' ): lowerCAmelCase :List[str] = torch.manual_seed(SCREAMING_SNAKE_CASE__ ) else: lowerCAmelCase :str = torch.Generator(device=SCREAMING_SNAKE_CASE__ ).manual_seed(SCREAMING_SNAKE_CASE__ ) lowerCAmelCase :Tuple = { 'prompt': 'horse', 'generator': generator, 'guidance_scale': 4.0, 'num_inference_steps': 2, 'output_type': 'np', } return inputs def UpperCAmelCase__ ( self : Tuple ) -> Optional[Any]: lowerCAmelCase :Union[str, Any] = 'cpu' lowerCAmelCase :str = self.get_dummy_components() lowerCAmelCase :Optional[Any] = self.pipeline_class(**SCREAMING_SNAKE_CASE__ ) lowerCAmelCase :Optional[Any] = pipe.to(SCREAMING_SNAKE_CASE__ ) pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ ) lowerCAmelCase :List[Any] = pipe(**self.get_dummy_inputs(SCREAMING_SNAKE_CASE__ ) ) lowerCAmelCase :List[Any] = output.image_embeds lowerCAmelCase :Optional[int] = pipe( **self.get_dummy_inputs(SCREAMING_SNAKE_CASE__ ) , return_dict=SCREAMING_SNAKE_CASE__ , )[0] lowerCAmelCase :List[Any] = image[0, -10:] lowerCAmelCase :Union[str, Any] = image_from_tuple[0, -10:] assert image.shape == (1, 32) lowerCAmelCase :Optional[int] = np.array( [-0.0_5_3_2, 1.7_1_2_0, 0.3_6_5_6, -1.0_8_5_2, -0.8_9_4_6, -1.1_7_5_6, 0.4_3_4_8, 0.2_4_8_2, 0.5_1_4_6, -0.1_1_5_6] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 @skip_mps def UpperCAmelCase__ ( self : Any ) -> List[str]: lowerCAmelCase :str = torch_device == 'cpu' lowerCAmelCase :Tuple = True lowerCAmelCase :Optional[Any] = False self._test_inference_batch_single_identical( test_max_difference=SCREAMING_SNAKE_CASE__ , relax_max_difference=SCREAMING_SNAKE_CASE__ , test_mean_pixel_difference=SCREAMING_SNAKE_CASE__ , ) @skip_mps def UpperCAmelCase__ ( self : Optional[Any] ) -> Any: lowerCAmelCase :Optional[int] = torch_device == 'cpu' lowerCAmelCase :Any = False self._test_attention_slicing_forward_pass( test_max_difference=SCREAMING_SNAKE_CASE__ , test_mean_pixel_difference=SCREAMING_SNAKE_CASE__ , )
553
import argparse import logging import os from datetime import datetime import numpy as np import torch from torch import nn from torch.utils.data import DataLoader, RandomSampler, TensorDataset from tqdm import tqdm from transformers import GPTaLMHeadModel SCREAMING_SNAKE_CASE__ = logging.getLogger(__name__) def UpperCAmelCase__ ( lowerCamelCase_ : str , lowerCamelCase_ : int ): # save results if os.path.exists(lowerCamelCase_ ): if os.path.exists(os.path.join(lowerCamelCase_ , 'config.json' ) ) and os.path.isfile( os.path.join(lowerCamelCase_ , 'config.json' ) ): os.remove(os.path.join(lowerCamelCase_ , 'config.json' ) ) if os.path.exists(os.path.join(lowerCamelCase_ , 'pytorch_model.bin' ) ) and os.path.isfile( os.path.join(lowerCamelCase_ , 'pytorch_model.bin' ) ): os.remove(os.path.join(lowerCamelCase_ , 'pytorch_model.bin' ) ) else: os.makedirs(lowerCamelCase_ ) model.save_pretrained(lowerCamelCase_ ) def UpperCAmelCase__ ( lowerCamelCase_ : int , lowerCamelCase_ : Any=False ): __a : Dict = 2 if unlogit: __a : Optional[Any] = torch.pow(lowerCamelCase_ , lowerCamelCase_ ) __a : Any = p * torch.log(lowerCamelCase_ ) __a : Union[str, Any] = 0 return -plogp.sum(dim=-1 ) def UpperCAmelCase__ ( lowerCamelCase_ : Any ): logger.info('lv, h >\t' + '\t'.join(f'''{x + 1}''' for x in range(len(lowerCamelCase_ ) ) ) ) for row in range(len(lowerCamelCase_ ) ): if tensor.dtype != torch.long: logger.info(f'''layer {row + 1}:\t''' + '\t'.join(f'''{x:.5f}''' for x in tensor[row].cpu().data ) ) else: logger.info(f'''layer {row + 1}:\t''' + '\t'.join(f'''{x:d}''' for x in tensor[row].cpu().data ) ) def UpperCAmelCase__ ( lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Any , lowerCamelCase_ : int , lowerCamelCase_ : int=True , lowerCamelCase_ : Optional[Any]=True , lowerCamelCase_ : List[Any]=None , lowerCamelCase_ : List[Any]=False ): __a , __a : Optional[int] = model.config.num_hidden_layers, model.config.num_attention_heads __a : str = torch.zeros(lowerCamelCase_ , lowerCamelCase_ ).to(args.device ) __a : int = torch.zeros(lowerCamelCase_ , lowerCamelCase_ ).to(args.device ) if head_mask is None: __a : Union[str, Any] = torch.ones(lowerCamelCase_ , lowerCamelCase_ ).to(args.device ) head_mask.requires_grad_(requires_grad=lowerCamelCase_ ) # If actually pruned attention multi-head, set head mask to None to avoid shape mismatch if actually_pruned: __a : Any = None __a : Optional[int] = 0.0 __a : Optional[Any] = 0.0 for step, inputs in enumerate(tqdm(lowerCamelCase_ , desc='Iteration' , disable=args.local_rank not in [-1, 0] ) ): __a : Dict = tuple(t.to(args.device ) for t in inputs ) ((__a) , ) : Dict = inputs # Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below) __a : List[Any] = model(lowerCamelCase_ , labels=lowerCamelCase_ , head_mask=lowerCamelCase_ ) # (loss), lm_logits, presents, (all hidden_states), (attentions) __a , __a , __a : int = ( outputs[0], outputs[1], outputs[-1], ) # Loss and logits are the first, attention the last loss.backward() # Backpropagate to populate the gradients in the head mask total_loss += loss.detach().cpu().numpy() if compute_entropy: for layer, attn in enumerate(lowerCamelCase_ ): __a : List[str] = entropy(attn.detach() , lowerCamelCase_ ) attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach() if compute_importance: head_importance += head_mask.grad.abs().detach() tot_tokens += torch.ones_like(lowerCamelCase_ ).float().detach().sum().data # Normalize attn_entropy /= tot_tokens head_importance /= tot_tokens # Layerwise importance normalization if not args.dont_normalize_importance_by_layer: __a : Optional[Any] = 2 __a : Union[str, Any] = torch.pow(torch.pow(lowerCamelCase_ , lowerCamelCase_ ).sum(-1 ) , 1 / exponent ) head_importance /= norm_by_layer.unsqueeze(-1 ) + 1e-20 if not args.dont_normalize_global_importance: __a : List[str] = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min()) # Print matrices if compute_entropy: logger.info('Attention entropies' ) print_ad_tensor(lowerCamelCase_ ) if compute_importance: logger.info('Head importance scores' ) print_ad_tensor(lowerCamelCase_ ) logger.info('Head ranked by importance scores' ) __a : Optional[Any] = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device ) __a : str = torch.arange( head_importance.numel() , device=args.device ) __a : Tuple = head_ranks.view_as(lowerCamelCase_ ) print_ad_tensor(lowerCamelCase_ ) return attn_entropy, head_importance, total_loss def UpperCAmelCase__ ( lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Tuple , lowerCamelCase_ : int ): __a , __a , __a : Optional[int] = compute_heads_importance(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , compute_entropy=lowerCamelCase_ ) __a : Tuple = 1 / loss # instead of downsteam score use the LM loss logger.info('Pruning: original score: %f, threshold: %f' , lowerCamelCase_ , original_score * args.masking_threshold ) __a : Tuple = torch.ones_like(lowerCamelCase_ ) __a : int = max(1 , int(new_head_mask.numel() * args.masking_amount ) ) __a : Tuple = original_score while current_score >= original_score * args.masking_threshold: __a : Optional[Any] = new_head_mask.clone().detach() # save current head mask # heads from least important to most - keep only not-masked heads __a : List[str] = float('Inf' ) __a : List[Any] = head_importance.view(-1 ).sort()[1] if len(lowerCamelCase_ ) <= num_to_mask: print('BREAK BY num_to_mask' ) break # mask heads __a : Any = current_heads_to_mask[:num_to_mask] logger.info('Heads to mask: %s' , str(current_heads_to_mask.tolist() ) ) __a : int = new_head_mask.view(-1 ) __a : Tuple = 0.0 __a : int = new_head_mask.view_as(lowerCamelCase_ ) __a : Optional[int] = new_head_mask.clone().detach() print_ad_tensor(lowerCamelCase_ ) # Compute metric and head importance again __a , __a , __a : int = compute_heads_importance( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , compute_entropy=lowerCamelCase_ , head_mask=lowerCamelCase_ ) __a : List[Any] = 1 / loss logger.info( 'Masking: current score: %f, remaining heads %d (%.1f percents)' , lowerCamelCase_ , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 1_0_0 , ) logger.info('Final head mask' ) print_ad_tensor(lowerCamelCase_ ) np.save(os.path.join(args.output_dir , 'head_mask.npy' ) , head_mask.detach().cpu().numpy() ) return head_mask def UpperCAmelCase__ ( lowerCamelCase_ : Dict , lowerCamelCase_ : Tuple , lowerCamelCase_ : Any , lowerCamelCase_ : Union[str, Any] ): __a : List[Any] = datetime.now() __a , __a , __a : List[str] = compute_heads_importance( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , compute_entropy=lowerCamelCase_ , compute_importance=lowerCamelCase_ , head_mask=lowerCamelCase_ ) __a : List[str] = 1 / loss __a : List[Any] = datetime.now() - before_time __a : List[str] = sum(p.numel() for p in model.parameters() ) __a : Dict = { layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(lowerCamelCase_ ) ) } for k, v in heads_to_prune.items(): if isinstance(lowerCamelCase_ , lowerCamelCase_ ): __a : Tuple = [ v, ] assert sum(len(lowerCamelCase_ ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item() model.prune_heads(lowerCamelCase_ ) __a : Optional[Any] = sum(p.numel() for p in model.parameters() ) __a : Tuple = datetime.now() __a , __a , __a : Tuple = compute_heads_importance( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , compute_entropy=lowerCamelCase_ , compute_importance=lowerCamelCase_ , head_mask=lowerCamelCase_ , actually_pruned=lowerCamelCase_ , ) __a : Optional[Any] = 1 / loss __a : List[Any] = datetime.now() - before_time logger.info( 'Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)' , lowerCamelCase_ , lowerCamelCase_ , pruned_num_params / original_num_params * 1_0_0 , ) logger.info('Pruning: score with masking: %f score with pruning: %f' , lowerCamelCase_ , lowerCamelCase_ ) logger.info('Pruning: speed ratio (original timing / new timing): %f percents' , original_time / new_time * 1_0_0 ) save_model(lowerCamelCase_ , args.output_dir ) def UpperCAmelCase__ ( ): __a : List[str] = argparse.ArgumentParser() # Required parameters parser.add_argument( '--data_dir' , default=lowerCamelCase_ , type=lowerCamelCase_ , required=lowerCamelCase_ , help='The input data dir. Should contain the .tsv files (or other data files) for the task.' , ) parser.add_argument( '--model_name_or_path' , default=lowerCamelCase_ , type=lowerCamelCase_ , required=lowerCamelCase_ , help='Path to pretrained model or model identifier from huggingface.co/models' , ) parser.add_argument( '--output_dir' , default=lowerCamelCase_ , type=lowerCamelCase_ , required=lowerCamelCase_ , help='The output directory where the model predictions and checkpoints will be written.' , ) # Other parameters parser.add_argument( '--config_name' , default='' , type=lowerCamelCase_ , help='Pretrained config name or path if not the same as model_name_or_path' , ) parser.add_argument( '--tokenizer_name' , default='' , type=lowerCamelCase_ , help='Pretrained tokenizer name or path if not the same as model_name_or_path' , ) parser.add_argument( '--cache_dir' , default=lowerCamelCase_ , type=lowerCamelCase_ , help='Where do you want to store the pre-trained models downloaded from s3' , ) parser.add_argument( '--data_subset' , type=lowerCamelCase_ , default=-1 , help='If > 0: limit the data to a subset of data_subset instances.' ) parser.add_argument( '--overwrite_output_dir' , action='store_true' , help='Whether to overwrite data in output directory' ) parser.add_argument( '--overwrite_cache' , action='store_true' , help='Overwrite the cached training and evaluation sets' ) parser.add_argument( '--dont_normalize_importance_by_layer' , action='store_true' , help='Don\'t normalize importance score by layers' ) parser.add_argument( '--dont_normalize_global_importance' , action='store_true' , help='Don\'t normalize all importance scores between 0 and 1' , ) parser.add_argument( '--try_masking' , action='store_true' , help='Whether to try to mask head until a threshold of accuracy.' ) parser.add_argument( '--masking_threshold' , default=0.9 , type=lowerCamelCase_ , help='masking threshold in term of metrics (stop masking when metric < threshold * original metric value).' , ) parser.add_argument( '--masking_amount' , default=0.1 , type=lowerCamelCase_ , help='Amount to heads to masking at each masking step.' ) parser.add_argument('--metric_name' , default='acc' , type=lowerCamelCase_ , help='Metric to use for head masking.' ) parser.add_argument( '--max_seq_length' , default=1_2_8 , type=lowerCamelCase_ , help=( 'The maximum total input sequence length after WordPiece tokenization. \n' 'Sequences longer than this will be truncated, sequences shorter padded.' ) , ) parser.add_argument('--batch_size' , default=1 , type=lowerCamelCase_ , help='Batch size.' ) parser.add_argument('--seed' , type=lowerCamelCase_ , default=4_2 ) parser.add_argument('--local_rank' , type=lowerCamelCase_ , default=-1 , help='local_rank for distributed training on gpus' ) parser.add_argument('--no_cuda' , action='store_true' , help='Whether not to use CUDA when available' ) parser.add_argument('--server_ip' , type=lowerCamelCase_ , default='' , help='Can be used for distant debugging.' ) parser.add_argument('--server_port' , type=lowerCamelCase_ , default='' , help='Can be used for distant debugging.' ) __a : Optional[Any] = parser.parse_args() if args.server_ip and args.server_port: # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script import ptvsd print('Waiting for debugger attach' ) ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=lowerCamelCase_ ) ptvsd.wait_for_attach() # Setup devices and distributed training if args.local_rank == -1 or args.no_cuda: __a : List[str] = torch.device('cuda' if torch.cuda.is_available() and not args.no_cuda else 'cpu' ) __a : Tuple = 0 if args.no_cuda else torch.cuda.device_count() else: torch.cuda.set_device(args.local_rank ) __a : Union[str, Any] = torch.device('cuda' , args.local_rank ) __a : Any = 1 torch.distributed.init_process_group(backend='nccl' ) # Initializes the distributed backend # Setup logging logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN ) logger.info('device: {} n_gpu: {}, distributed: {}'.format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) ) __a : Optional[Any] = GPTaLMHeadModel.from_pretrained(args.model_name_or_path ) # Distributed and parallel training model.to(args.device ) if args.local_rank != -1: __a : List[Any] = nn.parallel.DistributedDataParallel( lowerCamelCase_ , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=lowerCamelCase_ ) elif args.n_gpu > 1: __a : Union[str, Any] = nn.DataParallel(lowerCamelCase_ ) # Print/save training arguments os.makedirs(args.output_dir , exist_ok=lowerCamelCase_ ) torch.save(lowerCamelCase_ , os.path.join(args.output_dir , 'run_args.bin' ) ) logger.info('Training/evaluation parameters %s' , lowerCamelCase_ ) # Prepare dataset __a : Tuple = np.concatenate( [ np.loadtxt(args.data_dir , dtype=np.intaa ), ] ) __a : str = (torch.from_numpy(lowerCamelCase_ ),) __a : List[str] = TensorDataset(*lowerCamelCase_ ) __a : Optional[Any] = RandomSampler(lowerCamelCase_ ) __a : Union[str, Any] = DataLoader(lowerCamelCase_ , sampler=lowerCamelCase_ , batch_size=args.batch_size ) # Compute head entropy and importance score compute_heads_importance(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) # Try head masking (set heads to zero until the score goes under a threshole) # and head pruning (remove masked heads and see the effect on the network) if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0: __a : Union[str, Any] = mask_heads(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) prune_heads(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) if __name__ == "__main__": main()
47
0
'''simple docstring''' from ...utils import is_torch_available, is_transformers_available if is_transformers_available() and is_torch_available(): from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
414
import argparse import os import gluonnlp as nlp import mxnet as mx import numpy as np import torch from gluonnlp.base import get_home_dir from gluonnlp.model.bert import BERTEncoder from gluonnlp.model.utils import _load_vocab from gluonnlp.vocab import Vocab from packaging import version from torch import nn from transformers import BertConfig, BertForMaskedLM, BertModel, RobertaTokenizer from transformers.models.bert.modeling_bert import ( BertIntermediate, BertLayer, BertOutput, BertSelfAttention, BertSelfOutput, ) from transformers.utils import logging if version.parse(nlp.__version__) != version.parse('''0.8.3'''): raise Exception('''requires gluonnlp == 0.8.3''') if version.parse(mx.__version__) != version.parse('''1.5.0'''): raise Exception('''requires mxnet == 1.5.0''') logging.set_verbosity_info() SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ = '''The Nymphenburg Palace is a beautiful palace in Munich!''' def UpperCAmelCase__ ( lowerCamelCase_ : str , lowerCamelCase_ : str ): __a : List[Any] = { 'attention_cell': 'multi_head', 'num_layers': 4, 'units': 1_0_2_4, 'hidden_size': 7_6_8, 'max_length': 5_1_2, 'num_heads': 8, 'scaled': True, 'dropout': 0.1, 'use_residual': True, 'embed_size': 1_0_2_4, 'embed_dropout': 0.1, 'word_embed': None, 'layer_norm_eps': 1e-5, 'token_type_vocab_size': 2, } __a : Optional[int] = bort_4_8_768_1024_hparams # Let's construct the original Bort model here # Taken from official BERT implementation, see: # https://github.com/alexa/bort/blob/master/bort/bort.py __a : List[str] = BERTEncoder( attention_cell=predefined_args['attention_cell'] , num_layers=predefined_args['num_layers'] , units=predefined_args['units'] , hidden_size=predefined_args['hidden_size'] , max_length=predefined_args['max_length'] , num_heads=predefined_args['num_heads'] , scaled=predefined_args['scaled'] , dropout=predefined_args['dropout'] , output_attention=lowerCamelCase_ , output_all_encodings=lowerCamelCase_ , use_residual=predefined_args['use_residual'] , activation=predefined_args.get('activation' , 'gelu' ) , layer_norm_eps=predefined_args.get('layer_norm_eps' , lowerCamelCase_ ) , ) # Vocab information needs to be fetched first # It's the same as RoBERTa, so RobertaTokenizer can be used later __a : int = 'openwebtext_ccnews_stories_books_cased' # Specify download folder to Gluonnlp's vocab __a : Optional[Any] = os.path.join(get_home_dir() , 'models' ) __a : Optional[Any] = _load_vocab(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , cls=lowerCamelCase_ ) __a : Any = nlp.model.BERTModel( lowerCamelCase_ , len(lowerCamelCase_ ) , units=predefined_args['units'] , embed_size=predefined_args['embed_size'] , embed_dropout=predefined_args['embed_dropout'] , word_embed=predefined_args['word_embed'] , use_pooler=lowerCamelCase_ , use_token_type_embed=lowerCamelCase_ , token_type_vocab_size=predefined_args['token_type_vocab_size'] , use_classifier=lowerCamelCase_ , use_decoder=lowerCamelCase_ , ) original_bort.load_parameters(lowerCamelCase_ , cast_dtype=lowerCamelCase_ , ignore_extra=lowerCamelCase_ ) __a : Dict = original_bort._collect_params_with_prefix() # Build our config 🤗 __a : Optional[Any] = { 'architectures': ['BertForMaskedLM'], 'attention_probs_dropout_prob': predefined_args['dropout'], 'hidden_act': 'gelu', 'hidden_dropout_prob': predefined_args['dropout'], 'hidden_size': predefined_args['embed_size'], 'initializer_range': 0.02, 'intermediate_size': predefined_args['hidden_size'], 'layer_norm_eps': predefined_args['layer_norm_eps'], 'max_position_embeddings': predefined_args['max_length'], 'model_type': 'bort', 'num_attention_heads': predefined_args['num_heads'], 'num_hidden_layers': predefined_args['num_layers'], 'pad_token_id': 1, # 2 = BERT, 1 = RoBERTa 'type_vocab_size': 1, # 2 = BERT, 1 = RoBERTa 'vocab_size': len(lowerCamelCase_ ), } __a : str = BertConfig.from_dict(lowerCamelCase_ ) __a : Optional[int] = BertForMaskedLM(lowerCamelCase_ ) hf_bort_model.eval() # Parameter mapping table (Gluonnlp to Transformers) # * denotes layer index # # | Gluon Parameter | Transformers Parameter # | -------------------------------------------------------------- | ---------------------- # | `encoder.layer_norm.beta` | `bert.embeddings.LayerNorm.bias` # | `encoder.layer_norm.gamma` | `bert.embeddings.LayerNorm.weight` # | `encoder.position_weight` | `bert.embeddings.position_embeddings.weight` # | `word_embed.0.weight` | `bert.embeddings.word_embeddings.weight` # | `encoder.transformer_cells.*.attention_cell.proj_key.bias` | `bert.encoder.layer.*.attention.self.key.bias` # | `encoder.transformer_cells.*.attention_cell.proj_key.weight` | `bert.encoder.layer.*.attention.self.key.weight` # | `encoder.transformer_cells.*.attention_cell.proj_query.bias` | `bert.encoder.layer.*.attention.self.query.bias` # | `encoder.transformer_cells.*.attention_cell.proj_query.weight` | `bert.encoder.layer.*.attention.self.query.weight` # | `encoder.transformer_cells.*.attention_cell.proj_value.bias` | `bert.encoder.layer.*.attention.self.value.bias` # | `encoder.transformer_cells.*.attention_cell.proj_value.weight` | `bert.encoder.layer.*.attention.self.value.weight` # | `encoder.transformer_cells.*.ffn.ffn_2.bias` | `bert.encoder.layer.*.attention.output.dense.bias` # | `encoder.transformer_cells.*.ffn.ffn_2.weight` | `bert.encoder.layer.*.attention.output.dense.weight` # | `encoder.transformer_cells.*.layer_norm.beta` | `bert.encoder.layer.*.attention.output.LayerNorm.bias` # | `encoder.transformer_cells.*.layer_norm.gamma` | `bert.encoder.layer.*.attention.output.LayerNorm.weight` # | `encoder.transformer_cells.*.ffn.ffn_1.bias` | `bert.encoder.layer.*.intermediate.dense.bias` # | `encoder.transformer_cells.*.ffn.ffn_1.weight` | `bert.encoder.layer.*.intermediate.dense.weight` # | `encoder.transformer_cells.*.ffn.layer_norm.beta` | `bert.encoder.layer.*.output.LayerNorm.bias` # | `encoder.transformer_cells.*.ffn.layer_norm.gamma` | `bert.encoder.layer.*.output.LayerNorm.weight` # | `encoder.transformer_cells.*.proj.bias` | `bert.encoder.layer.*.output.dense.bias` # | `encoder.transformer_cells.*.proj.weight` | `bert.encoder.layer.*.output.dense.weight` # Helper function to convert MXNET Arrays to PyTorch def to_torch(lowerCamelCase_ : Optional[Any] ) -> nn.Parameter: return nn.Parameter(torch.FloatTensor(mx_array.data().asnumpy() ) ) # Check param shapes and map new HF param back def check_and_map_params(lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : List[str] ): __a : Optional[int] = hf_param.shape __a : int = to_torch(params[gluon_param] ) __a : int = gluon_param.shape assert ( shape_hf == shape_gluon ), f'''The gluon parameter {gluon_param} has shape {shape_gluon}, but expects shape {shape_hf} for Transformers''' return gluon_param __a : str = check_and_map_params( hf_bort_model.bert.embeddings.word_embeddings.weight , 'word_embed.0.weight' ) __a : str = check_and_map_params( hf_bort_model.bert.embeddings.position_embeddings.weight , 'encoder.position_weight' ) __a : Tuple = check_and_map_params( hf_bort_model.bert.embeddings.LayerNorm.bias , 'encoder.layer_norm.beta' ) __a : Union[str, Any] = check_and_map_params( hf_bort_model.bert.embeddings.LayerNorm.weight , 'encoder.layer_norm.gamma' ) # Inspired by RoBERTa conversion script, we just zero them out (Bort does not use them) __a : Union[str, Any] = torch.zeros_like( hf_bort_model.bert.embeddings.token_type_embeddings.weight.data ) for i in range(hf_bort_config.num_hidden_layers ): __a : BertLayer = hf_bort_model.bert.encoder.layer[i] # self attention __a : BertSelfAttention = layer.attention.self __a : Optional[int] = check_and_map_params( self_attn.key.bias.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_key.bias''' ) __a : str = check_and_map_params( self_attn.key.weight.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_key.weight''' ) __a : List[str] = check_and_map_params( self_attn.query.bias.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_query.bias''' ) __a : str = check_and_map_params( self_attn.query.weight.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_query.weight''' ) __a : Dict = check_and_map_params( self_attn.value.bias.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_value.bias''' ) __a : str = check_and_map_params( self_attn.value.weight.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_value.weight''' ) # self attention output __a : BertSelfOutput = layer.attention.output __a : Tuple = check_and_map_params( self_output.dense.bias , f'''encoder.transformer_cells.{i}.proj.bias''' ) __a : Dict = check_and_map_params( self_output.dense.weight , f'''encoder.transformer_cells.{i}.proj.weight''' ) __a : Optional[Any] = check_and_map_params( self_output.LayerNorm.bias , f'''encoder.transformer_cells.{i}.layer_norm.beta''' ) __a : Optional[Any] = check_and_map_params( self_output.LayerNorm.weight , f'''encoder.transformer_cells.{i}.layer_norm.gamma''' ) # intermediate __a : BertIntermediate = layer.intermediate __a : List[str] = check_and_map_params( intermediate.dense.bias , f'''encoder.transformer_cells.{i}.ffn.ffn_1.bias''' ) __a : Optional[Any] = check_and_map_params( intermediate.dense.weight , f'''encoder.transformer_cells.{i}.ffn.ffn_1.weight''' ) # output __a : BertOutput = layer.output __a : str = check_and_map_params( bert_output.dense.bias , f'''encoder.transformer_cells.{i}.ffn.ffn_2.bias''' ) __a : List[Any] = check_and_map_params( bert_output.dense.weight , f'''encoder.transformer_cells.{i}.ffn.ffn_2.weight''' ) __a : str = check_and_map_params( bert_output.LayerNorm.bias , f'''encoder.transformer_cells.{i}.ffn.layer_norm.beta''' ) __a : List[str] = check_and_map_params( bert_output.LayerNorm.weight , f'''encoder.transformer_cells.{i}.ffn.layer_norm.gamma''' ) # Save space and energy 🎄 hf_bort_model.half() # Compare output of both models __a : Union[str, Any] = RobertaTokenizer.from_pretrained('roberta-base' ) __a : Union[str, Any] = tokenizer.encode_plus(lowerCamelCase_ )['input_ids'] # Get gluon output __a : Optional[int] = mx.nd.array([input_ids] ) __a : Tuple = original_bort(inputs=lowerCamelCase_ , token_types=[] ) # Get Transformer output (save and reload model again) hf_bort_model.save_pretrained(lowerCamelCase_ ) __a : Optional[Any] = BertModel.from_pretrained(lowerCamelCase_ ) hf_bort_model.eval() __a : Union[str, Any] = tokenizer.encode_plus(lowerCamelCase_ , return_tensors='pt' ) __a : int = hf_bort_model(**lowerCamelCase_ )[0] __a : Dict = output_gluon[0].asnumpy() __a : str = output_hf[0].detach().numpy() __a : List[Any] = np.max(np.abs(hf_layer - gluon_layer ) ).item() __a : str = np.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1e-3 ) if success: print('✔️ Both model do output the same tensors' ) else: print('❌ Both model do **NOT** output the same tensors' ) print('Absolute difference is:' , lowerCamelCase_ ) if __name__ == "__main__": SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--bort_checkpoint_path''', default=None, type=str, required=True, help='''Path the official Bort params file.''' ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) SCREAMING_SNAKE_CASE__ = parser.parse_args() convert_bort_checkpoint_to_pytorch(args.bort_checkpoint_path, args.pytorch_dump_folder_path)
47
0
'''simple docstring''' import json from typing import List, Optional, Tuple from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_mvp import MvpTokenizer _UpperCAmelCase : int = logging.get_logger(__name__) _UpperCAmelCase : Union[str, Any] = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''} # See all MVP models at https://huggingface.co/models?filter=mvp _UpperCAmelCase : Optional[Any] = { '''vocab_file''': { '''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/vocab.json''', }, '''added_tokens.json''': { '''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/added_tokens.json''', }, '''merges_file''': { '''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/merges.txt''', }, '''tokenizer_file''': { '''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/tokenizer.json''', }, } _UpperCAmelCase : Dict = { '''RUCAIBox/mvp''': 10_24, } class lowercase_ ( __lowerCamelCase ): """simple docstring""" __lowerCAmelCase = VOCAB_FILES_NAMES __lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP __lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __lowerCAmelCase = ['''input_ids''', '''attention_mask'''] __lowerCAmelCase = MvpTokenizer def __init__( self : List[Any], UpperCamelCase__ : Union[str, Any]=None, UpperCamelCase__ : Any=None, UpperCamelCase__ : Any=None, UpperCamelCase__ : int="replace", UpperCamelCase__ : List[str]="<s>", UpperCamelCase__ : Optional[Any]="</s>", UpperCamelCase__ : Union[str, Any]="</s>", UpperCamelCase__ : Dict="<s>", UpperCamelCase__ : Optional[int]="<unk>", UpperCamelCase__ : Dict="<pad>", UpperCamelCase__ : List[str]="<mask>", UpperCamelCase__ : Union[str, Any]=False, UpperCamelCase__ : str=True, **UpperCamelCase__ : str, ) -> str: super().__init__( SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, tokenizer_file=SCREAMING_SNAKE_CASE__, errors=SCREAMING_SNAKE_CASE__, bos_token=SCREAMING_SNAKE_CASE__, eos_token=SCREAMING_SNAKE_CASE__, sep_token=SCREAMING_SNAKE_CASE__, cls_token=SCREAMING_SNAKE_CASE__, unk_token=SCREAMING_SNAKE_CASE__, pad_token=SCREAMING_SNAKE_CASE__, mask_token=SCREAMING_SNAKE_CASE__, add_prefix_space=SCREAMING_SNAKE_CASE__, trim_offsets=SCREAMING_SNAKE_CASE__, **SCREAMING_SNAKE_CASE__, ) _A = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get('add_prefix_space', SCREAMING_SNAKE_CASE__ ) != add_prefix_space: _A = getattr(SCREAMING_SNAKE_CASE__, pre_tok_state.pop('type' ) ) _A = add_prefix_space _A = pre_tok_class(**SCREAMING_SNAKE_CASE__ ) _A = add_prefix_space # the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__` _A = 'post_processor' _A = getattr(self.backend_tokenizer, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) if tokenizer_component_instance: _A = json.loads(tokenizer_component_instance.__getstate__() ) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: _A = tuple(state['sep'] ) if "cls" in state: _A = tuple(state['cls'] ) _A = False if state.get('add_prefix_space', SCREAMING_SNAKE_CASE__ ) != add_prefix_space: _A = add_prefix_space _A = True if state.get('trim_offsets', SCREAMING_SNAKE_CASE__ ) != trim_offsets: _A = trim_offsets _A = True if changes_to_apply: _A = getattr(SCREAMING_SNAKE_CASE__, state.pop('type' ) ) _A = component_class(**SCREAMING_SNAKE_CASE__ ) setattr(self.backend_tokenizer, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) @property def __UpperCAmelCase ( self : Dict ) -> str: if self._mask_token is None: if self.verbose: logger.error('Using mask_token, but it is not set yet.' ) return None return str(self._mask_token ) @mask_token.setter def __UpperCAmelCase ( self : Dict, UpperCamelCase__ : Tuple ) -> List[Any]: _A = AddedToken(SCREAMING_SNAKE_CASE__, lstrip=SCREAMING_SNAKE_CASE__, rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) else value _A = value def __UpperCAmelCase ( self : Optional[int], *UpperCamelCase__ : Optional[int], **UpperCamelCase__ : List[str] ) -> List[Any]: _A = kwargs.get('is_split_into_words', SCREAMING_SNAKE_CASE__ ) if is_split_into_words and not self.add_prefix_space: raise ValueError( f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True ' 'to use it with pretokenized inputs.' ) return super()._batch_encode_plus(*SCREAMING_SNAKE_CASE__, **SCREAMING_SNAKE_CASE__ ) def __UpperCAmelCase ( self : int, *UpperCamelCase__ : Dict, **UpperCamelCase__ : Tuple ) -> Tuple: _A = kwargs.get('is_split_into_words', SCREAMING_SNAKE_CASE__ ) if is_split_into_words and not self.add_prefix_space: raise ValueError( f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True ' 'to use it with pretokenized inputs.' ) return super()._encode_plus(*SCREAMING_SNAKE_CASE__, **SCREAMING_SNAKE_CASE__ ) def __UpperCAmelCase ( self : str, UpperCamelCase__ : str, UpperCamelCase__ : Optional[str] = None ) -> Union[str, Any]: _A = self._tokenizer.model.save(SCREAMING_SNAKE_CASE__, name=SCREAMING_SNAKE_CASE__ ) return tuple(SCREAMING_SNAKE_CASE__ ) def __UpperCAmelCase ( self : int, UpperCamelCase__ : Any, UpperCamelCase__ : Tuple=None ) -> Union[str, Any]: _A = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def __UpperCAmelCase ( self : Dict, UpperCamelCase__ : List[int], UpperCamelCase__ : Optional[List[int]] = None ) -> Union[str, Any]: _A = [self.sep_token_id] _A = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
107
def UpperCAmelCase__ ( lowerCamelCase_ : Optional[int] , lowerCamelCase_ : List[str] ): __a : Any = '' for i in table: res += inp[i - 1] return res def UpperCAmelCase__ ( lowerCamelCase_ : Optional[Any] ): return data[1:] + data[0] def UpperCAmelCase__ ( lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Optional[int] ): __a : Optional[int] = '' for i in range(len(lowerCamelCase_ ) ): if a[i] == b[i]: res += "0" else: res += "1" return res def UpperCAmelCase__ ( lowerCamelCase_ : List[Any] , lowerCamelCase_ : str ): __a : List[str] = int('0b' + data[0] + data[-1] , 2 ) __a : List[str] = int('0b' + data[1:3] , 2 ) return bin(s[row][col] )[2:] def UpperCAmelCase__ ( lowerCamelCase_ : Any , lowerCamelCase_ : List[str] , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : Optional[Any] ): __a : List[Any] = message[:4] __a : str = message[4:] __a : Any = apply_table(lowerCamelCase_ , lowerCamelCase_ ) __a : int = xor(lowerCamelCase_ , lowerCamelCase_ ) __a : Dict = apply_sbox(lowerCamelCase_ , temp[:4] ) # noqa: E741 __a : Tuple = apply_sbox(lowerCamelCase_ , temp[4:] ) __a : List[Any] = '0' * (2 - len(lowerCamelCase_ )) + l # noqa: E741 __a : List[str] = '0' * (2 - len(lowerCamelCase_ )) + r __a : List[Any] = apply_table(l + r , lowerCamelCase_ ) __a : Dict = xor(lowerCamelCase_ , lowerCamelCase_ ) return temp + right if __name__ == "__main__": SCREAMING_SNAKE_CASE__ = input('''Enter 10 bit key: ''') SCREAMING_SNAKE_CASE__ = input('''Enter 8 bit message: ''') SCREAMING_SNAKE_CASE__ = [6, 3, 7, 4, 8, 5, 10, 9] SCREAMING_SNAKE_CASE__ = [3, 5, 2, 7, 4, 10, 1, 9, 8, 6] SCREAMING_SNAKE_CASE__ = [2, 4, 3, 1] SCREAMING_SNAKE_CASE__ = [2, 6, 3, 1, 4, 8, 5, 7] SCREAMING_SNAKE_CASE__ = [4, 1, 3, 5, 7, 2, 8, 6] SCREAMING_SNAKE_CASE__ = [4, 1, 2, 3, 2, 3, 4, 1] SCREAMING_SNAKE_CASE__ = [[1, 0, 3, 2], [3, 2, 1, 0], [0, 2, 1, 3], [3, 1, 3, 2]] SCREAMING_SNAKE_CASE__ = [[0, 1, 2, 3], [2, 0, 1, 3], [3, 0, 1, 0], [2, 1, 0, 3]] # key generation SCREAMING_SNAKE_CASE__ = apply_table(key, paa_table) SCREAMING_SNAKE_CASE__ = temp[:5] SCREAMING_SNAKE_CASE__ = temp[5:] SCREAMING_SNAKE_CASE__ = left_shift(left) SCREAMING_SNAKE_CASE__ = left_shift(right) SCREAMING_SNAKE_CASE__ = apply_table(left + right, pa_table) SCREAMING_SNAKE_CASE__ = left_shift(left) SCREAMING_SNAKE_CASE__ = left_shift(right) SCREAMING_SNAKE_CASE__ = left_shift(left) SCREAMING_SNAKE_CASE__ = left_shift(right) SCREAMING_SNAKE_CASE__ = apply_table(left + right, pa_table) # encryption SCREAMING_SNAKE_CASE__ = apply_table(message, IP) SCREAMING_SNAKE_CASE__ = function(expansion, sa, sa, keya, temp) SCREAMING_SNAKE_CASE__ = temp[4:] + temp[:4] SCREAMING_SNAKE_CASE__ = function(expansion, sa, sa, keya, temp) SCREAMING_SNAKE_CASE__ = apply_table(temp, IP_inv) print('''Cipher text is:''', CT) # decryption SCREAMING_SNAKE_CASE__ = apply_table(CT, IP) SCREAMING_SNAKE_CASE__ = function(expansion, sa, sa, keya, temp) SCREAMING_SNAKE_CASE__ = temp[4:] + temp[:4] SCREAMING_SNAKE_CASE__ = function(expansion, sa, sa, keya, temp) SCREAMING_SNAKE_CASE__ = apply_table(temp, IP_inv) print('''Plain text after decypting is:''', PT)
47
0
'''simple docstring''' import os import tempfile import unittest from transformers import FlaubertConfig, is_torch_available from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( FlaubertForMultipleChoice, FlaubertForQuestionAnswering, FlaubertForQuestionAnsweringSimple, FlaubertForSequenceClassification, FlaubertForTokenClassification, FlaubertModel, FlaubertWithLMHeadModel, ) from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST class _UpperCamelCase ( __lowerCamelCase): '''simple docstring''' def __init__( self , a_ , a_=1_3 , a_=7 , a_=True , a_=True , a_=True , a_=True , a_=True , a_=False , a_=False , a_=False , a_=2 , a_=9_9 , a_=0 , a_=3_2 , a_=5 , a_=4 , a_=0.1 , a_=0.1 , a_=5_1_2 , a_=1_2 , a_=2 , a_=0.02 , a_=3 , a_=4 , a_="last" , a_=None , a_=None , ) -> Union[str, Any]: lowercase : Dict = parent lowercase : Dict = batch_size lowercase : Union[str, Any] = seq_length lowercase : Union[str, Any] = is_training lowercase : List[Any] = use_input_lengths lowercase : Any = use_token_type_ids lowercase : Any = use_labels lowercase : Optional[Any] = gelu_activation lowercase : List[Any] = sinusoidal_embeddings lowercase : int = causal lowercase : Tuple = asm lowercase : List[str] = n_langs lowercase : Tuple = vocab_size lowercase : Dict = n_special lowercase : int = hidden_size lowercase : Dict = num_hidden_layers lowercase : Dict = num_attention_heads lowercase : str = hidden_dropout_prob lowercase : Optional[Any] = attention_probs_dropout_prob lowercase : List[str] = max_position_embeddings lowercase : Tuple = type_vocab_size lowercase : str = type_sequence_label_size lowercase : Dict = initializer_range lowercase : int = num_labels lowercase : Optional[int] = num_choices lowercase : List[Any] = summary_type lowercase : Any = use_proj lowercase : Any = scope def a__ ( self ) -> Any: lowercase : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowercase : Dict = random_attention_mask([self.batch_size, self.seq_length] ) lowercase : List[Any] = None if self.use_input_lengths: lowercase : List[Any] = ( ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2 ) # small variation of seq_length lowercase : str = None if self.use_token_type_ids: lowercase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.n_langs ) lowercase : int = None lowercase : str = None lowercase : Dict = None if self.use_labels: lowercase : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowercase : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowercase : List[Any] = ids_tensor([self.batch_size] , 2 ).float() lowercase : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices ) lowercase : Dict = self.get_config() return ( config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ) def a__ ( self ) -> int: return FlaubertConfig( vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , ) def a__ ( self , a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ , ) -> Dict: lowercase : Tuple = FlaubertModel(config=SCREAMING_SNAKE_CASE__ ) model.to(SCREAMING_SNAKE_CASE__ ) model.eval() lowercase : List[Any] = model(SCREAMING_SNAKE_CASE__ , lengths=SCREAMING_SNAKE_CASE__ , langs=SCREAMING_SNAKE_CASE__ ) lowercase : int = model(SCREAMING_SNAKE_CASE__ , langs=SCREAMING_SNAKE_CASE__ ) lowercase : int = model(SCREAMING_SNAKE_CASE__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def a__ ( self , a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ , ) -> int: lowercase : Any = FlaubertWithLMHeadModel(SCREAMING_SNAKE_CASE__ ) model.to(SCREAMING_SNAKE_CASE__ ) model.eval() lowercase : Optional[int] = model(SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def a__ ( self , a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ , ) -> str: lowercase : Union[str, Any] = FlaubertForQuestionAnsweringSimple(SCREAMING_SNAKE_CASE__ ) model.to(SCREAMING_SNAKE_CASE__ ) model.eval() lowercase : int = model(SCREAMING_SNAKE_CASE__ ) lowercase : Tuple = model(SCREAMING_SNAKE_CASE__ , start_positions=SCREAMING_SNAKE_CASE__ , end_positions=SCREAMING_SNAKE_CASE__ ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def a__ ( self , a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ , ) -> Dict: lowercase : Tuple = FlaubertForQuestionAnswering(SCREAMING_SNAKE_CASE__ ) model.to(SCREAMING_SNAKE_CASE__ ) model.eval() lowercase : List[Any] = model(SCREAMING_SNAKE_CASE__ ) lowercase : List[Any] = model( SCREAMING_SNAKE_CASE__ , start_positions=SCREAMING_SNAKE_CASE__ , end_positions=SCREAMING_SNAKE_CASE__ , cls_index=SCREAMING_SNAKE_CASE__ , is_impossible=SCREAMING_SNAKE_CASE__ , p_mask=SCREAMING_SNAKE_CASE__ , ) lowercase : List[Any] = model( SCREAMING_SNAKE_CASE__ , start_positions=SCREAMING_SNAKE_CASE__ , end_positions=SCREAMING_SNAKE_CASE__ , cls_index=SCREAMING_SNAKE_CASE__ , is_impossible=SCREAMING_SNAKE_CASE__ , ) (lowercase ) : Optional[Any] = result_with_labels.to_tuple() lowercase : List[Any] = model(SCREAMING_SNAKE_CASE__ , start_positions=SCREAMING_SNAKE_CASE__ , end_positions=SCREAMING_SNAKE_CASE__ ) (lowercase ) : List[Any] = result_with_labels.to_tuple() self.parent.assertEqual(result_with_labels.loss.shape , () ) self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) ) self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) ) self.parent.assertEqual( result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual( result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) ) def a__ ( self , a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ , ) -> Dict: lowercase : Optional[Any] = FlaubertForSequenceClassification(SCREAMING_SNAKE_CASE__ ) model.to(SCREAMING_SNAKE_CASE__ ) model.eval() lowercase : List[str] = model(SCREAMING_SNAKE_CASE__ ) lowercase : int = model(SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def a__ ( self , a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ , ) -> Dict: lowercase : List[Any] = self.num_labels lowercase : str = FlaubertForTokenClassification(SCREAMING_SNAKE_CASE__ ) model.to(SCREAMING_SNAKE_CASE__ ) model.eval() lowercase : Any = model(SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def a__ ( self , a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ , ) -> Any: lowercase : Any = self.num_choices lowercase : List[str] = FlaubertForMultipleChoice(config=SCREAMING_SNAKE_CASE__ ) model.to(SCREAMING_SNAKE_CASE__ ) model.eval() lowercase : Optional[int] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowercase : Optional[int] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowercase : List[str] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowercase : List[Any] = model( SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def a__ ( self ) -> Optional[int]: lowercase : Dict = self.prepare_config_and_inputs() ( lowercase ) : str = config_and_inputs lowercase : List[str] = { 'input_ids': input_ids, 'token_type_ids': token_type_ids, 'lengths': input_lengths, 'attention_mask': input_mask, } return config, inputs_dict @require_torch class _UpperCamelCase ( __lowerCamelCase , __lowerCamelCase , unittest.TestCase): '''simple docstring''' _snake_case = ( ( FlaubertModel, FlaubertWithLMHeadModel, FlaubertForQuestionAnswering, FlaubertForQuestionAnsweringSimple, FlaubertForSequenceClassification, FlaubertForTokenClassification, FlaubertForMultipleChoice, ) if is_torch_available() else () ) _snake_case = ( { '''feature-extraction''': FlaubertModel, '''fill-mask''': FlaubertWithLMHeadModel, '''question-answering''': FlaubertForQuestionAnsweringSimple, '''text-classification''': FlaubertForSequenceClassification, '''token-classification''': FlaubertForTokenClassification, '''zero-shot''': FlaubertForSequenceClassification, } if is_torch_available() else {} ) def a__ ( self , a_ , a_ , a_ , a_ , a_ ) -> Dict: if ( pipeline_test_casse_name == "QAPipelineTests" and tokenizer_name is not None and not tokenizer_name.endswith("Fast" ) ): # `QAPipelineTests` fails for a few models when the slower tokenizer are used. # (The slower tokenizers were never used for pipeline tests before the pipeline testing rework) # TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer return True return False def a__ ( self , a_ , a_ , a_=False ) -> List[str]: lowercase : List[Any] = super()._prepare_for_class(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , return_labels=SCREAMING_SNAKE_CASE__ ) if return_labels: if model_class.__name__ == "FlaubertForQuestionAnswering": lowercase : Tuple = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=SCREAMING_SNAKE_CASE__ ) lowercase : Tuple = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=SCREAMING_SNAKE_CASE__ ) return inputs_dict def a__ ( self ) -> Optional[Any]: lowercase : Union[str, Any] = FlaubertModelTester(self ) lowercase : Dict = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE__ , emb_dim=3_7 ) def a__ ( self ) -> int: self.config_tester.run_common_tests() def a__ ( self ) -> int: lowercase : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_model(*SCREAMING_SNAKE_CASE__ ) def a__ ( self ) -> Union[str, Any]: lowercase : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_lm_head(*SCREAMING_SNAKE_CASE__ ) def a__ ( self ) -> Any: lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_simple_qa(*SCREAMING_SNAKE_CASE__ ) def a__ ( self ) -> int: lowercase : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_qa(*SCREAMING_SNAKE_CASE__ ) def a__ ( self ) -> Optional[int]: lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_sequence_classif(*SCREAMING_SNAKE_CASE__ ) def a__ ( self ) -> Dict: lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_token_classif(*SCREAMING_SNAKE_CASE__ ) def a__ ( self ) -> Optional[int]: lowercase : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_multiple_choice(*SCREAMING_SNAKE_CASE__ ) @slow def a__ ( self ) -> str: for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase : Dict = FlaubertModel.from_pretrained(SCREAMING_SNAKE_CASE__ ) self.assertIsNotNone(SCREAMING_SNAKE_CASE__ ) @slow @require_torch_gpu def a__ ( self ) -> int: lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # FlauBertForMultipleChoice behaves incorrectly in JIT environments. if model_class == FlaubertForMultipleChoice: return lowercase : str = True lowercase : Tuple = model_class(config=SCREAMING_SNAKE_CASE__ ) lowercase : Any = self._prepare_for_class(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) lowercase : Union[str, Any] = torch.jit.trace( SCREAMING_SNAKE_CASE__ , (inputs_dict["input_ids"].to("cpu" ), inputs_dict["attention_mask"].to("cpu" )) ) with tempfile.TemporaryDirectory() as tmp: torch.jit.save(SCREAMING_SNAKE_CASE__ , os.path.join(SCREAMING_SNAKE_CASE__ , "traced_model.pt" ) ) lowercase : Tuple = torch.jit.load(os.path.join(SCREAMING_SNAKE_CASE__ , "traced_model.pt" ) , map_location=SCREAMING_SNAKE_CASE__ ) loaded(inputs_dict["input_ids"].to(SCREAMING_SNAKE_CASE__ ) , inputs_dict["attention_mask"].to(SCREAMING_SNAKE_CASE__ ) ) @require_torch class _UpperCamelCase ( unittest.TestCase): '''simple docstring''' @slow def a__ ( self ) -> List[Any]: lowercase : List[str] = FlaubertModel.from_pretrained("flaubert/flaubert_base_cased" ) lowercase : Union[str, Any] = torch.tensor([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] ) with torch.no_grad(): lowercase : str = model(SCREAMING_SNAKE_CASE__ )[0] lowercase : Optional[Any] = torch.Size((1, 1_1, 7_6_8) ) self.assertEqual(output.shape , SCREAMING_SNAKE_CASE__ ) lowercase : Any = torch.tensor( [[[-2.62_51, -1.42_98, -0.02_27], [-2.85_10, -1.63_87, 0.22_58], [-2.81_14, -1.18_32, -0.30_66]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , SCREAMING_SNAKE_CASE__ , atol=1e-4 ) )
372
import unittest from transformers import is_tf_available from transformers.testing_utils import require_tf if is_tf_available(): import tensorflow as tf from tensorflow.python.eager import context from tensorflow.python.framework import ops from transformers import GradientAccumulator, create_optimizer @require_tf class _UpperCamelCase( unittest.TestCase ): def __lowerCAmelCase ( self : List[str] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Union[str, Any] ): '''simple docstring''' self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , len(SCREAMING_SNAKE_CASE__ ) ) for a, b in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): self.assertAlmostEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , delta=SCREAMING_SNAKE_CASE__ ) def __lowerCAmelCase ( self : Any ): '''simple docstring''' __a : List[Any] = GradientAccumulator() accumulator([tf.constant([1.0, 2.0] )] ) accumulator([tf.constant([-2.0, 1.0] )] ) accumulator([tf.constant([-1.0, 2.0] )] ) with self.assertRaises(SCREAMING_SNAKE_CASE__ ): accumulator([tf.constant([1.0, 1.0] ), tf.constant([2.0, 2.0] )] ) self.assertEqual(accumulator.step , 3 ) self.assertEqual(len(accumulator.gradients ) , 1 ) self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [-2.0, 5.0] , tol=1e-2 ) accumulator.reset() self.assertEqual(accumulator.step , 0 ) self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [0.0, 0.0] , tol=1e-2 ) def __lowerCAmelCase ( self : Dict ): '''simple docstring''' __a : int = None ops.enable_eager_execution_internal() __a : Optional[Any] = tf.config.list_physical_devices('CPU' ) if len(SCREAMING_SNAKE_CASE__ ) == 1: tf.config.set_logical_device_configuration( physical_devices[0] , [tf.config.LogicalDeviceConfiguration(), tf.config.LogicalDeviceConfiguration()] ) __a : int = tf.config.list_logical_devices(device_type='CPU' ) __a : str = tf.distribute.MirroredStrategy(devices=devices[:2] ) with strategy.scope(): __a : List[str] = GradientAccumulator() __a : Tuple = tf.Variable([4.0, 3.0] ) __a , __a : int = create_optimizer(5e-5 , 1_0 , 5 ) __a : List[Any] = tf.Variable([0.0, 0.0] , trainable=SCREAMING_SNAKE_CASE__ ) def accumulate_on_replica(SCREAMING_SNAKE_CASE__ : Optional[Any] ): accumulator([gradient] ) def apply_on_replica(): optimizer.apply_gradients(list(zip(accumulator.gradients , [variable] ) ) ) @tf.function def accumulate(SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Tuple ): with strategy.scope(): __a : Optional[Any] = strategy.experimental_local_results(SCREAMING_SNAKE_CASE__ ) local_variables[0].assign(SCREAMING_SNAKE_CASE__ ) local_variables[1].assign(SCREAMING_SNAKE_CASE__ ) strategy.run(SCREAMING_SNAKE_CASE__ , args=(gradient_placeholder,) ) @tf.function def apply_grad(): with strategy.scope(): strategy.run(SCREAMING_SNAKE_CASE__ ) def _check_local_values(SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : int ): __a : Union[str, Any] = strategy.experimental_local_results(accumulator._gradients[0] ) self.assertListAlmostEqual(values[0].value() , SCREAMING_SNAKE_CASE__ , tol=1e-2 ) self.assertListAlmostEqual(values[1].value() , SCREAMING_SNAKE_CASE__ , tol=1e-2 ) accumulate([1.0, 2.0] , [-1.0, 1.0] ) accumulate([3.0, -1.0] , [-1.0, -1.0] ) accumulate([-2.0, 2.0] , [3.0, -2.0] ) self.assertEqual(accumulator.step , 3 ) _check_local_values([2.0, 3.0] , [1.0, -2.0] ) apply_grad() self.assertListAlmostEqual(variable.value() , [4.0, 3.0] , tol=1e-2 ) accumulator.reset() self.assertEqual(accumulator.step , 0 ) _check_local_values([0.0, 0.0] , [0.0, 0.0] )
47
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) __snake_case = { 'configuration_vision_encoder_decoder': ['VisionEncoderDecoderConfig', 'VisionEncoderDecoderOnnxConfig'] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case = ['VisionEncoderDecoderModel'] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case = ['TFVisionEncoderDecoderModel'] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case = ['FlaxVisionEncoderDecoderModel'] if TYPE_CHECKING: from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel else: import sys __snake_case = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
200
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ = { '''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/config.json''', '''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/config.json''', '''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/config.json''', '''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/config.json''', '''roberta-base-openai-detector''': '''https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json''', '''roberta-large-openai-detector''': '''https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json''', } class _UpperCamelCase( __lowerCamelCase ): __SCREAMING_SNAKE_CASE : Union[str, Any] = '''roberta''' def __init__( self : List[Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any]=5_0_2_6_5 , SCREAMING_SNAKE_CASE__ : Optional[int]=7_6_8 , SCREAMING_SNAKE_CASE__ : str=1_2 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=1_2 , SCREAMING_SNAKE_CASE__ : Optional[Any]=3_0_7_2 , SCREAMING_SNAKE_CASE__ : Any="gelu" , SCREAMING_SNAKE_CASE__ : Optional[Any]=0.1 , SCREAMING_SNAKE_CASE__ : str=0.1 , SCREAMING_SNAKE_CASE__ : Optional[Any]=5_1_2 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=2 , SCREAMING_SNAKE_CASE__ : Any=0.02 , SCREAMING_SNAKE_CASE__ : List[str]=1e-12 , SCREAMING_SNAKE_CASE__ : Optional[Any]=1 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=0 , SCREAMING_SNAKE_CASE__ : List[str]=2 , SCREAMING_SNAKE_CASE__ : Tuple="absolute" , SCREAMING_SNAKE_CASE__ : Any=True , SCREAMING_SNAKE_CASE__ : List[str]=None , **SCREAMING_SNAKE_CASE__ : Any , ): '''simple docstring''' super().__init__(pad_token_id=SCREAMING_SNAKE_CASE__ , bos_token_id=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) __a : Optional[Any] = vocab_size __a : Tuple = hidden_size __a : List[str] = num_hidden_layers __a : List[Any] = num_attention_heads __a : str = hidden_act __a : Optional[Any] = intermediate_size __a : Dict = hidden_dropout_prob __a : List[str] = attention_probs_dropout_prob __a : Optional[Any] = max_position_embeddings __a : Dict = type_vocab_size __a : str = initializer_range __a : List[str] = layer_norm_eps __a : Optional[int] = position_embedding_type __a : Union[str, Any] = use_cache __a : str = classifier_dropout class _UpperCamelCase( __lowerCamelCase ): @property def __lowerCAmelCase ( self : Any ): '''simple docstring''' if self.task == "multiple-choice": __a : List[str] = {0: 'batch', 1: 'choice', 2: 'sequence'} else: __a : Dict = {0: 'batch', 1: 'sequence'} return OrderedDict( [ ('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ] )
47
0
from ...configuration_utils import PretrainedConfig from ...utils import logging __magic_name__ = logging.get_logger(__name__) __magic_name__ = { '''vinvino02/glpn-kitti''': '''https://huggingface.co/vinvino02/glpn-kitti/resolve/main/config.json''', # See all GLPN models at https://huggingface.co/models?filter=glpn } class lowerCAmelCase__ ( __lowerCamelCase ): """simple docstring""" __UpperCAmelCase : int = '''glpn''' def __init__( self , a_=3 , a_=4 , a_=[2, 2, 2, 2] , a_=[8, 4, 2, 1] , a_=[32, 64, 160, 256] , a_=[7, 3, 3, 3] , a_=[4, 2, 2, 2] , a_=[1, 2, 5, 8] , a_=[4, 4, 4, 4] , a_="gelu" , a_=0.0 , a_=0.0 , a_=0.02 , a_=0.1 , a_=1E-6 , a_=64 , a_=10 , a_=-1 , **a_ , ): super().__init__(**SCREAMING_SNAKE_CASE__ ) lowerCamelCase_ : List[str] = num_channels lowerCamelCase_ : Dict = num_encoder_blocks lowerCamelCase_ : List[str] = depths lowerCamelCase_ : Optional[int] = sr_ratios lowerCamelCase_ : int = hidden_sizes lowerCamelCase_ : str = patch_sizes lowerCamelCase_ : Union[str, Any] = strides lowerCamelCase_ : str = mlp_ratios lowerCamelCase_ : Optional[int] = num_attention_heads lowerCamelCase_ : List[Any] = hidden_act lowerCamelCase_ : Any = hidden_dropout_prob lowerCamelCase_ : List[Any] = attention_probs_dropout_prob lowerCamelCase_ : str = initializer_range lowerCamelCase_ : int = drop_path_rate lowerCamelCase_ : int = layer_norm_eps lowerCamelCase_ : Optional[Any] = decoder_hidden_size lowerCamelCase_ : Dict = max_depth lowerCamelCase_ : Union[str, Any] = head_in_index
250
import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ = '''▁''' SCREAMING_SNAKE_CASE__ = {'''vocab_file''': '''sentencepiece.bpe.model'''} SCREAMING_SNAKE_CASE__ = { '''vocab_file''': { '''facebook/xglm-564M''': '''https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model''', } } SCREAMING_SNAKE_CASE__ = { '''facebook/xglm-564M''': 2048, } class _UpperCamelCase( __lowerCamelCase ): __SCREAMING_SNAKE_CASE : Optional[Any] = VOCAB_FILES_NAMES __SCREAMING_SNAKE_CASE : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP __SCREAMING_SNAKE_CASE : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __SCREAMING_SNAKE_CASE : Any = ['''input_ids''', '''attention_mask'''] def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : List[Any]="<s>" , SCREAMING_SNAKE_CASE__ : List[str]="</s>" , SCREAMING_SNAKE_CASE__ : Optional[Any]="</s>" , SCREAMING_SNAKE_CASE__ : Union[str, Any]="<s>" , SCREAMING_SNAKE_CASE__ : str="<unk>" , SCREAMING_SNAKE_CASE__ : Dict="<pad>" , SCREAMING_SNAKE_CASE__ : Optional[Dict[str, Any]] = None , **SCREAMING_SNAKE_CASE__ : List[str] , ): '''simple docstring''' __a : Union[str, Any] = {} if sp_model_kwargs is None else sp_model_kwargs # Compatibility with the original tokenizer __a : Any = 7 __a : Union[str, Any] = [f'''<madeupword{i}>''' for i in range(self.num_madeup_words )] __a : Union[str, Any] = kwargs.get('additional_special_tokens' , [] ) kwargs["additional_special_tokens"] += [ word for word in madeup_words if word not in kwargs["additional_special_tokens"] ] super().__init__( bos_token=SCREAMING_SNAKE_CASE__ , eos_token=SCREAMING_SNAKE_CASE__ , unk_token=SCREAMING_SNAKE_CASE__ , sep_token=SCREAMING_SNAKE_CASE__ , cls_token=SCREAMING_SNAKE_CASE__ , pad_token=SCREAMING_SNAKE_CASE__ , sp_model_kwargs=self.sp_model_kwargs , **SCREAMING_SNAKE_CASE__ , ) __a : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(SCREAMING_SNAKE_CASE__ ) ) __a : str = vocab_file # Original fairseq vocab and spm vocab must be "aligned": # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 # -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ---- # fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-' # spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a' # The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab __a : Any = 1 # Mimic fairseq token-to-id alignment for the first 4 token __a : str = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3} __a : List[str] = len(self.sp_model ) __a : Optional[int] = {f'''<madeupword{i}>''': sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words )} self.fairseq_tokens_to_ids.update(SCREAMING_SNAKE_CASE__ ) __a : Dict = {v: k for k, v in self.fairseq_tokens_to_ids.items()} def __getstate__( self : List[str] ): '''simple docstring''' __a : Tuple = self.__dict__.copy() __a : List[str] = None __a : Optional[int] = self.sp_model.serialized_model_proto() return state def __setstate__( self : List[str] , SCREAMING_SNAKE_CASE__ : Dict ): '''simple docstring''' __a : int = d # for backward compatibility if not hasattr(self , 'sp_model_kwargs' ): __a : Dict = {} __a : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.LoadFromSerializedProto(self.sp_model_proto ) def __lowerCAmelCase ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ): '''simple docstring''' if token_ids_a is None: return [self.sep_token_id] + token_ids_a __a : Optional[Any] = [self.sep_token_id] return sep + token_ids_a + sep + sep + token_ids_a def __lowerCAmelCase ( self : Dict , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None , SCREAMING_SNAKE_CASE__ : bool = False ): '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=SCREAMING_SNAKE_CASE__ , token_ids_a=SCREAMING_SNAKE_CASE__ , already_has_special_tokens=SCREAMING_SNAKE_CASE__ ) if token_ids_a is None: return [1] + ([0] * len(SCREAMING_SNAKE_CASE__ )) return [1] + ([0] * len(SCREAMING_SNAKE_CASE__ )) + [1, 1] + ([0] * len(SCREAMING_SNAKE_CASE__ )) def __lowerCAmelCase ( self : List[Any] , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ): '''simple docstring''' __a : Optional[int] = [self.sep_token_id] if token_ids_a is None: return len(sep + token_ids_a ) * [0] return len(sep + token_ids_a + sep + sep + token_ids_a ) * [0] @property def __lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' return len(self.sp_model ) + self.fairseq_offset + self.num_madeup_words def __lowerCAmelCase ( self : Tuple ): '''simple docstring''' __a : str = {self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __lowerCAmelCase ( self : Any , SCREAMING_SNAKE_CASE__ : str ): '''simple docstring''' return self.sp_model.encode(SCREAMING_SNAKE_CASE__ , out_type=SCREAMING_SNAKE_CASE__ ) def __lowerCAmelCase ( self : List[Any] , SCREAMING_SNAKE_CASE__ : List[Any] ): '''simple docstring''' if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] __a : List[str] = self.sp_model.PieceToId(SCREAMING_SNAKE_CASE__ ) # Need to return unknown token if the SP model returned 0 return spm_id + self.fairseq_offset if spm_id else self.unk_token_id def __lowerCAmelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Dict ): '''simple docstring''' if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset ) def __lowerCAmelCase ( self : Tuple , SCREAMING_SNAKE_CASE__ : List[Any] ): '''simple docstring''' __a : Optional[int] = ''.join(SCREAMING_SNAKE_CASE__ ).replace(SCREAMING_SNAKE_CASE__ , ' ' ).strip() return out_string def __lowerCAmelCase ( self : Dict , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[str] = None ): '''simple docstring''' if not os.path.isdir(SCREAMING_SNAKE_CASE__ ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return __a : Any = os.path.join( SCREAMING_SNAKE_CASE__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE__ ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , SCREAMING_SNAKE_CASE__ ) elif not os.path.isfile(self.vocab_file ): with open(SCREAMING_SNAKE_CASE__ , 'wb' ) as fi: __a : List[Any] = self.sp_model.serialized_model_proto() fi.write(SCREAMING_SNAKE_CASE__ ) return (out_vocab_file,)
47
0
"""simple docstring""" from collections import Counter from pathlib import Path from typing import Optional, Tuple import yaml class UpperCAmelCase_ ( yaml.SafeLoader ): def _lowerCamelCase ( self , UpperCamelCase_ ) -> Optional[Any]: __lowercase : Dict = [self.constructed_objects[key_node] for key_node, _ in node.value] __lowercase : Dict = [tuple(SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else key for key in keys] __lowercase : Optional[int] = Counter(SCREAMING_SNAKE_CASE__ ) __lowercase : Dict = [key for key in counter if counter[key] > 1] if duplicate_keys: raise TypeError(F"""Got duplicate yaml keys: {duplicate_keys}""" ) def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_=False ) -> Union[str, Any]: __lowercase : List[Any] = super().construct_mapping(SCREAMING_SNAKE_CASE__ , deep=SCREAMING_SNAKE_CASE__ ) self._check_no_duplicates_on_constructed_node(SCREAMING_SNAKE_CASE__ ) return mapping def __UpperCAmelCase ( __UpperCamelCase ): __lowercase : List[str] = list(readme_content.splitlines() ) if full_content and full_content[0] == "---" and "---" in full_content[1:]: __lowercase : int = full_content[1:].index('''---''' ) + 1 __lowercase : Any = '\n'.join(full_content[1:sep_idx] ) return yamlblock, "\n".join(full_content[sep_idx + 1 :] ) return None, "\n".join(lowerCamelCase_ ) class UpperCAmelCase_ ( __lowerCamelCase ): # class attributes UpperCamelCase ={'''train_eval_index'''} # train-eval-index in the YAML metadata @classmethod def _lowerCamelCase ( cls , UpperCamelCase_ ) -> Dict: with open(SCREAMING_SNAKE_CASE__ , encoding='''utf-8''' ) as readme_file: __lowercase : Optional[Any] = _split_yaml_from_readme(readme_file.read() ) if yaml_string is not None: return cls.from_yaml_string(SCREAMING_SNAKE_CASE__ ) else: return cls() def _lowerCamelCase ( self , UpperCamelCase_ ) -> List[str]: if path.exists(): with open(SCREAMING_SNAKE_CASE__ , encoding='''utf-8''' ) as readme_file: __lowercase : Optional[Any] = readme_file.read() else: __lowercase : str = None __lowercase : Optional[int] = self._to_readme(SCREAMING_SNAKE_CASE__ ) with open(SCREAMING_SNAKE_CASE__ , '''w''' , encoding='''utf-8''' ) as readme_file: readme_file.write(SCREAMING_SNAKE_CASE__ ) def _lowerCamelCase ( self , UpperCamelCase_ = None ) -> Tuple: if readme_content is not None: __lowercase : str = _split_yaml_from_readme(SCREAMING_SNAKE_CASE__ ) __lowercase : List[str] = '---\n' + self.to_yaml_string() + '---\n' + content else: __lowercase : Tuple = '---\n' + self.to_yaml_string() + '---\n' return full_content @classmethod def _lowerCamelCase ( cls , UpperCamelCase_ ) -> Dict: __lowercase : Any = yaml.load(SCREAMING_SNAKE_CASE__ , Loader=_NoDuplicateSafeLoader ) or {} # Convert the YAML keys to DatasetMetadata fields __lowercase : str = { (key.replace('''-''' , '''_''' ) if key.replace('''-''' , '''_''' ) in cls._FIELDS_WITH_DASHES else key): value for key, value in metadata_dict.items() } return cls(**SCREAMING_SNAKE_CASE__ ) def _lowerCamelCase ( self ) -> Tuple: return yaml.safe_dump( { (key.replace('''_''' , '''-''' ) if key in self._FIELDS_WITH_DASHES else key): value for key, value in self.items() } , sort_keys=SCREAMING_SNAKE_CASE__ , allow_unicode=SCREAMING_SNAKE_CASE__ , encoding='''utf-8''' , ).decode('''utf-8''' ) a_ = { 'image-classification': [], 'translation': [], 'image-segmentation': [], 'fill-mask': [], 'automatic-speech-recognition': [], 'token-classification': [], 'sentence-similarity': [], 'audio-classification': [], 'question-answering': [], 'summarization': [], 'zero-shot-classification': [], 'table-to-text': [], 'feature-extraction': [], 'other': [], 'multiple-choice': [], 'text-classification': [], 'text-to-image': [], 'text2text-generation': [], 'zero-shot-image-classification': [], 'tabular-classification': [], 'tabular-regression': [], 'image-to-image': [], 'tabular-to-text': [], 'unconditional-image-generation': [], 'text-retrieval': [], 'text-to-speech': [], 'object-detection': [], 'audio-to-audio': [], 'text-generation': [], 'conversational': [], 'table-question-answering': [], 'visual-question-answering': [], 'image-to-text': [], 'reinforcement-learning': [], 'voice-activity-detection': [], 'time-series-forecasting': [], 'document-question-answering': [], } if __name__ == "__main__": from argparse import ArgumentParser a_ = ArgumentParser(usage='Validate the yaml metadata block of a README.md file.') ap.add_argument('readme_filepath') a_ = ap.parse_args() a_ = Path(args.readme_filepath) a_ = DatasetMetadata.from_readme(readme_filepath) print(dataset_metadata) dataset_metadata.to_readme(readme_filepath)
76
import argparse from collections import OrderedDict from pathlib import Path import torch from transformers import ( VisualBertConfig, VisualBertForMultipleChoice, VisualBertForPreTraining, VisualBertForQuestionAnswering, VisualBertForVisualReasoning, ) from transformers.utils import logging logging.set_verbosity_info() SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ = [ ('''bert.bert''', '''visual_bert'''), ('''bert.cls''', '''cls'''), ('''bert.classifier''', '''cls'''), ('''token_type_embeddings_visual''', '''visual_token_type_embeddings'''), ('''position_embeddings_visual''', '''visual_position_embeddings'''), ('''projection''', '''visual_projection'''), ] SCREAMING_SNAKE_CASE__ = [ '''nlvr2_coco_pre_trained.th''', '''nlvr2_fine_tuned.th''', '''nlvr2_pre_trained.th''', '''vcr_coco_pre_train.th''', '''vcr_fine_tune.th''', '''vcr_pre_train.th''', '''vqa_coco_pre_trained.th''', '''vqa_fine_tuned.th''', '''vqa_pre_trained.th''', ] def UpperCAmelCase__ ( lowerCamelCase_ : Optional[int] ): __a : str = torch.load(lowerCamelCase_ , map_location='cpu' ) return sd def UpperCAmelCase__ ( lowerCamelCase_ : List[Any] , lowerCamelCase_ : int , lowerCamelCase_ : Dict=rename_keys_prefix ): __a : Optional[Any] = OrderedDict() __a : Any = torch.arange(config.max_position_embeddings ).expand((1, -1) ) # detector_d = OrderedDict() for key in d: if "detector" in key: # detector_d[key.replace('detector.','')] = d[key] continue __a : List[Any] = key for name_pair in rename_keys_prefix: __a : List[str] = new_key.replace(name_pair[0] , name_pair[1] ) __a : Any = d[key] if key == "bert.cls.predictions.decoder.weight": # Old bert code didn't have `decoder.bias`, but was added separately __a : int = new_d['cls.predictions.bias'] return new_d @torch.no_grad() def UpperCAmelCase__ ( lowerCamelCase_ : Any , lowerCamelCase_ : Any ): assert ( checkpoint_path.split('/' )[-1] in ACCEPTABLE_CHECKPOINTS ), f'''The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}.''' # Get Config if "pre" in checkpoint_path: __a : Dict = 'pretraining' if "vcr" in checkpoint_path: __a : int = {'visual_embedding_dim': 5_1_2} elif "vqa_advanced" in checkpoint_path: __a : int = {'visual_embedding_dim': 2_0_4_8} elif "vqa" in checkpoint_path: __a : Tuple = {'visual_embedding_dim': 2_0_4_8} elif "nlvr" in checkpoint_path: __a : List[Any] = {'visual_embedding_dim': 1_0_2_4} else: raise NotImplementedError(f'''No implementation found for `{checkpoint_path}`.''' ) else: if "vcr" in checkpoint_path: __a : int = {'visual_embedding_dim': 5_1_2} __a : Any = 'multichoice' elif "vqa_advanced" in checkpoint_path: __a : Any = {'visual_embedding_dim': 2_0_4_8} __a : List[str] = 'vqa_advanced' elif "vqa" in checkpoint_path: __a : List[Any] = {'visual_embedding_dim': 2_0_4_8, 'num_labels': 3_1_2_9} __a : List[Any] = 'vqa' elif "nlvr" in checkpoint_path: __a : Optional[int] = { 'visual_embedding_dim': 1_0_2_4, 'num_labels': 2, } __a : Optional[Any] = 'nlvr' __a : str = VisualBertConfig(**lowerCamelCase_ ) # Load State Dict __a : str = load_state_dict(lowerCamelCase_ ) __a : str = get_new_dict(lowerCamelCase_ , lowerCamelCase_ ) if model_type == "pretraining": __a : Optional[Any] = VisualBertForPreTraining(lowerCamelCase_ ) elif model_type == "vqa": __a : Any = VisualBertForQuestionAnswering(lowerCamelCase_ ) elif model_type == "nlvr": __a : int = VisualBertForVisualReasoning(lowerCamelCase_ ) elif model_type == "multichoice": __a : Optional[int] = VisualBertForMultipleChoice(lowerCamelCase_ ) model.load_state_dict(lowerCamelCase_ ) # Save Checkpoints Path(lowerCamelCase_ ).mkdir(exist_ok=lowerCamelCase_ ) model.save_pretrained(lowerCamelCase_ ) if __name__ == "__main__": SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser() # Required parameters parser.add_argument('''orig_checkpoint_path''', type=str, help='''A path to .th on local filesystem.''') parser.add_argument('''pytorch_dump_folder_path''', type=str, help='''Path to the output PyTorch model.''') SCREAMING_SNAKE_CASE__ = parser.parse_args() convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
47
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available __lowerCAmelCase : Tuple = { '''configuration_conditional_detr''': [ '''CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ConditionalDetrConfig''', '''ConditionalDetrOnnxConfig''', ] } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase : int = ['''ConditionalDetrFeatureExtractor'''] __lowerCAmelCase : str = ['''ConditionalDetrImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase : Tuple = [ '''CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST''', '''ConditionalDetrForObjectDetection''', '''ConditionalDetrForSegmentation''', '''ConditionalDetrModel''', '''ConditionalDetrPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_conditional_detr import ( CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP, ConditionalDetrConfig, ConditionalDetrOnnxConfig, ) try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_conditional_detr import ConditionalDetrFeatureExtractor from .image_processing_conditional_detr import ConditionalDetrImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_conditional_detr import ( CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST, ConditionalDetrForObjectDetection, ConditionalDetrForSegmentation, ConditionalDetrModel, ConditionalDetrPreTrainedModel, ) else: import sys __lowerCAmelCase : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
58
print((lambda quine: quine % quine)('''print((lambda quine: quine %% quine)(%r))'''))
47
0
'''simple docstring''' from collections.abc import Sequence from queue import Queue class _snake_case : '''simple docstring''' def __init__( self: Tuple , __UpperCamelCase: List[str] , __UpperCamelCase: List[Any] , __UpperCamelCase: Tuple , __UpperCamelCase: int=None , __UpperCamelCase: Tuple=None ) -> List[Any]: __magic_name__ : Tuple = start __magic_name__ : Dict = end __magic_name__ : List[str] = val __magic_name__ : List[Any] = (start + end) // 2 __magic_name__ : Optional[Any] = left __magic_name__ : List[str] = right def __repr__( self: Dict ) -> int: return f"""SegmentTreeNode(start={self.start}, end={self.end}, val={self.val})""" class _snake_case : '''simple docstring''' def __init__( self: List[str] , __UpperCamelCase: Sequence , __UpperCamelCase: Optional[Any] ) -> Union[str, Any]: __magic_name__ : Tuple = collection __magic_name__ : Dict = function if self.collection: __magic_name__ : int = self._build_tree(0 , len(SCREAMING_SNAKE_CASE__ ) - 1 ) def lowerCAmelCase__ ( self: Optional[int] , __UpperCamelCase: Optional[int] , __UpperCamelCase: int ) -> Any: self._update_tree(self.root , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) def lowerCAmelCase__ ( self: str , __UpperCamelCase: Any , __UpperCamelCase: List[str] ) -> List[Any]: return self._query_range(self.root , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) def lowerCAmelCase__ ( self: Optional[int] , __UpperCamelCase: int , __UpperCamelCase: Dict ) -> Optional[int]: if start == end: return SegmentTreeNode(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , self.collection[start] ) __magic_name__ : Tuple = (start + end) // 2 __magic_name__ : Optional[int] = self._build_tree(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) __magic_name__ : Tuple = self._build_tree(mid + 1 , SCREAMING_SNAKE_CASE__ ) return SegmentTreeNode(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , self.fn(left.val , right.val ) , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) def lowerCAmelCase__ ( self: List[Any] , __UpperCamelCase: Optional[Any] , __UpperCamelCase: Optional[int] , __UpperCamelCase: List[Any] ) -> List[str]: if node.start == i and node.end == i: __magic_name__ : Optional[Any] = val return if i <= node.mid: self._update_tree(node.left , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else: self._update_tree(node.right , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) __magic_name__ : int = self.fn(node.left.val , node.right.val ) def lowerCAmelCase__ ( self: Any , __UpperCamelCase: Dict , __UpperCamelCase: Tuple , __UpperCamelCase: Optional[Any] ) -> int: if node.start == i and node.end == j: return node.val if i <= node.mid: if j <= node.mid: # range in left child tree return self._query_range(node.left , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else: # range in left child tree and right child tree return self.fn( self._query_range(node.left , SCREAMING_SNAKE_CASE__ , node.mid ) , self._query_range(node.right , node.mid + 1 , SCREAMING_SNAKE_CASE__ ) , ) else: # range in right child tree return self._query_range(node.right , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) def lowerCAmelCase__ ( self: List[Any] ) -> List[Any]: if self.root is not None: __magic_name__ : Tuple = Queue() queue.put(self.root ) while not queue.empty(): __magic_name__ : Tuple = queue.get() yield node if node.left is not None: queue.put(node.left ) if node.right is not None: queue.put(node.right ) if __name__ == "__main__": import operator for fn in [operator.add, max, min]: print("*" * 50) _SCREAMING_SNAKE_CASE : List[Any] = SegmentTree([2, 1, 5, 3, 4], fn) for node in arr.traverse(): print(node) print() arr.update(1, 5) for node in arr.traverse(): print(node) print() print(arr.query_range(3, 4)) # 7 print(arr.query_range(2, 2)) # 5 print(arr.query_range(1, 3)) # 13 print()
436
import json import os import shutil import tempfile from unittest import TestCase from transformers import BartTokenizer, BartTokenizerFast, DPRQuestionEncoderTokenizer, DPRQuestionEncoderTokenizerFast from transformers.models.bart.configuration_bart import BartConfig from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES from transformers.models.dpr.configuration_dpr import DPRConfig from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES from transformers.testing_utils import require_faiss, require_tokenizers, require_torch, slow from transformers.utils import is_datasets_available, is_faiss_available, is_torch_available if is_torch_available() and is_datasets_available() and is_faiss_available(): from transformers.models.rag.configuration_rag import RagConfig from transformers.models.rag.tokenization_rag import RagTokenizer @require_faiss @require_torch class _UpperCamelCase( __lowerCamelCase ): def __lowerCAmelCase ( self : List[Any] ): '''simple docstring''' __a : List[Any] = tempfile.mkdtemp() __a : int = 8 # DPR tok __a : Dict = [ '[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing', ',', 'low', 'lowest', ] __a : int = os.path.join(self.tmpdirname , 'dpr_tokenizer' ) os.makedirs(SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ ) __a : Dict = os.path.join(SCREAMING_SNAKE_CASE__ , DPR_VOCAB_FILES_NAMES['vocab_file'] ) with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer: vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) ) # BART tok __a : str = [ 'l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', '\u0120', '\u0120l', '\u0120n', '\u0120lo', '\u0120low', 'er', '\u0120lowest', '\u0120newer', '\u0120wider', '<unk>', ] __a : Optional[int] = dict(zip(SCREAMING_SNAKE_CASE__ , range(len(SCREAMING_SNAKE_CASE__ ) ) ) ) __a : List[str] = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', ''] __a : List[str] = {'unk_token': '<unk>'} __a : Dict = os.path.join(self.tmpdirname , 'bart_tokenizer' ) os.makedirs(SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ ) __a : List[Any] = os.path.join(SCREAMING_SNAKE_CASE__ , BART_VOCAB_FILES_NAMES['vocab_file'] ) __a : Dict = os.path.join(SCREAMING_SNAKE_CASE__ , BART_VOCAB_FILES_NAMES['merges_file'] ) with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp: fp.write(json.dumps(SCREAMING_SNAKE_CASE__ ) + '\n' ) with open(self.merges_file , 'w' , encoding='utf-8' ) as fp: fp.write('\n'.join(SCREAMING_SNAKE_CASE__ ) ) def __lowerCAmelCase ( self : Tuple ): '''simple docstring''' return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , 'dpr_tokenizer' ) ) def __lowerCAmelCase ( self : str ): '''simple docstring''' return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , 'bart_tokenizer' ) ) def __lowerCAmelCase ( self : Union[str, Any] ): '''simple docstring''' shutil.rmtree(self.tmpdirname ) @require_tokenizers def __lowerCAmelCase ( self : Dict ): '''simple docstring''' __a : Tuple = os.path.join(self.tmpdirname , 'rag_tokenizer' ) __a : Optional[Any] = RagConfig(question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() ) __a : Optional[Any] = RagTokenizer(question_encoder=self.get_dpr_tokenizer() , generator=self.get_bart_tokenizer() ) rag_config.save_pretrained(SCREAMING_SNAKE_CASE__ ) rag_tokenizer.save_pretrained(SCREAMING_SNAKE_CASE__ ) __a : List[Any] = RagTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ , config=SCREAMING_SNAKE_CASE__ ) self.assertIsInstance(new_rag_tokenizer.question_encoder , SCREAMING_SNAKE_CASE__ ) self.assertEqual(new_rag_tokenizer.question_encoder.get_vocab() , rag_tokenizer.question_encoder.get_vocab() ) self.assertIsInstance(new_rag_tokenizer.generator , SCREAMING_SNAKE_CASE__ ) self.assertEqual(new_rag_tokenizer.generator.get_vocab() , rag_tokenizer.generator.get_vocab() ) @slow def __lowerCAmelCase ( self : Optional[Any] ): '''simple docstring''' __a : Optional[Any] = RagTokenizer.from_pretrained('facebook/rag-token-nq' ) __a : List[Any] = [ 'who got the first nobel prize in physics', 'when is the next deadpool movie being released', 'which mode is used for short wave broadcast service', 'who is the owner of reading football club', 'when is the next scandal episode coming out', 'when is the last time the philadelphia won the superbowl', 'what is the most current adobe flash player version', 'how many episodes are there in dragon ball z', 'what is the first step in the evolution of the eye', 'where is gall bladder situated in human body', 'what is the main mineral in lithium batteries', 'who is the president of usa right now', 'where do the greasers live in the outsiders', 'panda is a national animal of which country', 'what is the name of manchester united stadium', ] __a : Tuple = tokenizer(SCREAMING_SNAKE_CASE__ ) self.assertIsNotNone(SCREAMING_SNAKE_CASE__ ) @slow def __lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' __a : Any = RagTokenizer.from_pretrained('facebook/rag-sequence-nq' ) __a : Union[str, Any] = [ 'who got the first nobel prize in physics', 'when is the next deadpool movie being released', 'which mode is used for short wave broadcast service', 'who is the owner of reading football club', 'when is the next scandal episode coming out', 'when is the last time the philadelphia won the superbowl', 'what is the most current adobe flash player version', 'how many episodes are there in dragon ball z', 'what is the first step in the evolution of the eye', 'where is gall bladder situated in human body', 'what is the main mineral in lithium batteries', 'who is the president of usa right now', 'where do the greasers live in the outsiders', 'panda is a national animal of which country', 'what is the name of manchester united stadium', ] __a : str = tokenizer(SCREAMING_SNAKE_CASE__ ) self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
47
0
'''simple docstring''' import gc import unittest from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline from diffusers.utils import is_flax_available, slow from diffusers.utils.testing_utils import require_flax if is_flax_available(): import jax import jax.numpy as jnp from flax.jax_utils import replicate from flax.training.common_utils import shard @slow @require_flax class UpperCamelCase__( unittest.TestCase ): def a__( self : Optional[Any] )-> Any: """simple docstring""" super().tearDown() gc.collect() def a__( self : List[str] )-> Optional[int]: """simple docstring""" UpperCAmelCase = FlaxStableDiffusionPipeline.from_pretrained( '''stabilityai/stable-diffusion-2''' , revision='''bf16''' , dtype=jnp.bfloataa , ) UpperCAmelCase = 'A painting of a squirrel eating a burger' UpperCAmelCase = jax.device_count() UpperCAmelCase = num_samples * [prompt] UpperCAmelCase = sd_pipe.prepare_inputs(SCREAMING_SNAKE_CASE__ ) UpperCAmelCase = replicate(SCREAMING_SNAKE_CASE__ ) UpperCAmelCase = shard(SCREAMING_SNAKE_CASE__ ) UpperCAmelCase = jax.random.PRNGKey(0 ) UpperCAmelCase = jax.random.split(SCREAMING_SNAKE_CASE__ , jax.device_count() ) UpperCAmelCase = sd_pipe(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , num_inference_steps=25 , jit=SCREAMING_SNAKE_CASE__ )[0] assert images.shape == (jax.device_count(), 1, 768, 768, 3) UpperCAmelCase = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] ) UpperCAmelCase = images[0, 253:256, 253:256, -1] UpperCAmelCase = jnp.asarray(jax.device_get(image_slice.flatten() ) ) UpperCAmelCase = jnp.array([0.4238, 0.4414, 0.4395, 0.4453, 0.4629, 0.4590, 0.4531, 0.45508, 0.4512] ) print(F"""output_slice: {output_slice}""" ) assert jnp.abs(output_slice - expected_slice ).max() < 1E-2 def a__( self : List[Any] )-> int: """simple docstring""" UpperCAmelCase = 'stabilityai/stable-diffusion-2' UpperCAmelCase = FlaxDPMSolverMultistepScheduler.from_pretrained(SCREAMING_SNAKE_CASE__ , subfolder='''scheduler''' ) UpperCAmelCase = FlaxStableDiffusionPipeline.from_pretrained( SCREAMING_SNAKE_CASE__ , scheduler=SCREAMING_SNAKE_CASE__ , revision='''bf16''' , dtype=jnp.bfloataa , ) UpperCAmelCase = scheduler_params UpperCAmelCase = 'A painting of a squirrel eating a burger' UpperCAmelCase = jax.device_count() UpperCAmelCase = num_samples * [prompt] UpperCAmelCase = sd_pipe.prepare_inputs(SCREAMING_SNAKE_CASE__ ) UpperCAmelCase = replicate(SCREAMING_SNAKE_CASE__ ) UpperCAmelCase = shard(SCREAMING_SNAKE_CASE__ ) UpperCAmelCase = jax.random.PRNGKey(0 ) UpperCAmelCase = jax.random.split(SCREAMING_SNAKE_CASE__ , jax.device_count() ) UpperCAmelCase = sd_pipe(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , num_inference_steps=25 , jit=SCREAMING_SNAKE_CASE__ )[0] assert images.shape == (jax.device_count(), 1, 768, 768, 3) UpperCAmelCase = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] ) UpperCAmelCase = images[0, 253:256, 253:256, -1] UpperCAmelCase = jnp.asarray(jax.device_get(image_slice.flatten() ) ) UpperCAmelCase = jnp.array([0.4336, 0.42969, 0.4453, 0.4199, 0.4297, 0.4531, 0.4434, 0.4434, 0.4297] ) print(F"""output_slice: {output_slice}""" ) assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
210
import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ = {'''vocab_file''': '''spiece.model'''} SCREAMING_SNAKE_CASE__ = { '''vocab_file''': { '''bert_for_seq_generation''': ( '''https://huggingface.co/google/bert_for_seq_generation_L-24_bbc_encoder/resolve/main/spiece.model''' ), } } SCREAMING_SNAKE_CASE__ = {'''bert_for_seq_generation''': 512} class _UpperCamelCase( __lowerCamelCase ): __SCREAMING_SNAKE_CASE : int = VOCAB_FILES_NAMES __SCREAMING_SNAKE_CASE : List[str] = PRETRAINED_VOCAB_FILES_MAP __SCREAMING_SNAKE_CASE : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __SCREAMING_SNAKE_CASE : List[int] = [] __SCREAMING_SNAKE_CASE : int = ['''input_ids''', '''attention_mask'''] def __init__( self : str , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : List[Any]="<s>" , SCREAMING_SNAKE_CASE__ : Tuple="</s>" , SCREAMING_SNAKE_CASE__ : Any="<unk>" , SCREAMING_SNAKE_CASE__ : int="<pad>" , SCREAMING_SNAKE_CASE__ : List[str]="<::::>" , SCREAMING_SNAKE_CASE__ : Optional[Dict[str, Any]] = None , **SCREAMING_SNAKE_CASE__ : Tuple , ): '''simple docstring''' __a : Tuple = {} if sp_model_kwargs is None else sp_model_kwargs # Add extra_ids to the special token list super().__init__( bos_token=SCREAMING_SNAKE_CASE__ , eos_token=SCREAMING_SNAKE_CASE__ , unk_token=SCREAMING_SNAKE_CASE__ , pad_token=SCREAMING_SNAKE_CASE__ , sep_token=SCREAMING_SNAKE_CASE__ , sp_model_kwargs=self.sp_model_kwargs , **SCREAMING_SNAKE_CASE__ , ) __a : int = vocab_file __a : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(SCREAMING_SNAKE_CASE__ ) @property def __lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' return self.sp_model.get_piece_size() def __lowerCAmelCase ( self : int ): '''simple docstring''' __a : Dict = {self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self : Optional[Any] ): '''simple docstring''' __a : Union[str, Any] = self.__dict__.copy() __a : Any = None return state def __setstate__( self : int , SCREAMING_SNAKE_CASE__ : List[str] ): '''simple docstring''' __a : str = d # for backward compatibility if not hasattr(self , 'sp_model_kwargs' ): __a : str = {} __a : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def __lowerCAmelCase ( self : Dict , SCREAMING_SNAKE_CASE__ : str ): '''simple docstring''' return self.sp_model.encode(SCREAMING_SNAKE_CASE__ , out_type=SCREAMING_SNAKE_CASE__ ) def __lowerCAmelCase ( self : str , SCREAMING_SNAKE_CASE__ : Optional[int] ): '''simple docstring''' return self.sp_model.piece_to_id(SCREAMING_SNAKE_CASE__ ) def __lowerCAmelCase ( self : int , SCREAMING_SNAKE_CASE__ : str ): '''simple docstring''' __a : int = self.sp_model.IdToPiece(SCREAMING_SNAKE_CASE__ ) return token def __lowerCAmelCase ( self : Tuple , SCREAMING_SNAKE_CASE__ : Optional[int] ): '''simple docstring''' __a : Optional[Any] = [] __a : Optional[int] = '' for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: out_string += self.sp_model.decode(SCREAMING_SNAKE_CASE__ ) + token __a : Dict = [] else: current_sub_tokens.append(SCREAMING_SNAKE_CASE__ ) out_string += self.sp_model.decode(SCREAMING_SNAKE_CASE__ ) return out_string.strip() def __lowerCAmelCase ( self : List[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[str] = None ): '''simple docstring''' if not os.path.isdir(SCREAMING_SNAKE_CASE__ ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return __a : Tuple = os.path.join( SCREAMING_SNAKE_CASE__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE__ ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , SCREAMING_SNAKE_CASE__ ) elif not os.path.isfile(self.vocab_file ): with open(SCREAMING_SNAKE_CASE__ , 'wb' ) as fi: __a : List[str] = self.sp_model.serialized_model_proto() fi.write(SCREAMING_SNAKE_CASE__ ) return (out_vocab_file,)
47
0
import argparse import torch from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert from transformers.utils import logging logging.set_verbosity_info() def _SCREAMING_SNAKE_CASE ( lowercase : List[Any] , lowercase : Optional[int] , lowercase : Dict ): '''simple docstring''' lowerCamelCase_ = BertConfig.from_json_file(lowerCamelCase_ ) print(f"""Building PyTorch model from configuration: {config}""" ) lowerCamelCase_ = BertForPreTraining(lowerCamelCase_ ) # Load weights from tf checkpoint load_tf_weights_in_bert(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) # Save pytorch-model print(f"""Save PyTorch model to {pytorch_dump_path}""" ) torch.save(model.state_dict() , lowerCamelCase_ ) if __name__ == "__main__": lowerCamelCase : Union[str, Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( "--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path." ) parser.add_argument( "--bert_config_file", default=None, type=str, required=True, help=( "The config json file corresponding to the pre-trained BERT model. \n" "This specifies the model architecture." ), ) parser.add_argument( "--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) lowerCamelCase : List[Any] = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
70
from ..utils import DummyObject, requires_backends class _UpperCamelCase( metaclass=__lowerCamelCase ): __SCREAMING_SNAKE_CASE : Optional[Any] = ['''torch''', '''transformers''', '''onnx'''] def __init__( self : Dict , *SCREAMING_SNAKE_CASE__ : Any , **SCREAMING_SNAKE_CASE__ : Any ): '''simple docstring''' requires_backends(self , ['torch', 'transformers', 'onnx'] ) @classmethod def __lowerCAmelCase ( cls : Tuple , *SCREAMING_SNAKE_CASE__ : Optional[Any] , **SCREAMING_SNAKE_CASE__ : Optional[int] ): '''simple docstring''' requires_backends(cls , ['torch', 'transformers', 'onnx'] ) @classmethod def __lowerCAmelCase ( cls : Any , *SCREAMING_SNAKE_CASE__ : Optional[Any] , **SCREAMING_SNAKE_CASE__ : Tuple ): '''simple docstring''' requires_backends(cls , ['torch', 'transformers', 'onnx'] ) class _UpperCamelCase( metaclass=__lowerCamelCase ): __SCREAMING_SNAKE_CASE : List[Any] = ['''torch''', '''transformers''', '''onnx'''] def __init__( self : Any , *SCREAMING_SNAKE_CASE__ : int , **SCREAMING_SNAKE_CASE__ : List[Any] ): '''simple docstring''' requires_backends(self , ['torch', 'transformers', 'onnx'] ) @classmethod def __lowerCAmelCase ( cls : List[str] , *SCREAMING_SNAKE_CASE__ : Tuple , **SCREAMING_SNAKE_CASE__ : int ): '''simple docstring''' requires_backends(cls , ['torch', 'transformers', 'onnx'] ) @classmethod def __lowerCAmelCase ( cls : str , *SCREAMING_SNAKE_CASE__ : int , **SCREAMING_SNAKE_CASE__ : List[Any] ): '''simple docstring''' requires_backends(cls , ['torch', 'transformers', 'onnx'] ) class _UpperCamelCase( metaclass=__lowerCamelCase ): __SCREAMING_SNAKE_CASE : Any = ['''torch''', '''transformers''', '''onnx'''] def __init__( self : Tuple , *SCREAMING_SNAKE_CASE__ : List[str] , **SCREAMING_SNAKE_CASE__ : List[Any] ): '''simple docstring''' requires_backends(self , ['torch', 'transformers', 'onnx'] ) @classmethod def __lowerCAmelCase ( cls : str , *SCREAMING_SNAKE_CASE__ : Any , **SCREAMING_SNAKE_CASE__ : Union[str, Any] ): '''simple docstring''' requires_backends(cls , ['torch', 'transformers', 'onnx'] ) @classmethod def __lowerCAmelCase ( cls : Dict , *SCREAMING_SNAKE_CASE__ : List[Any] , **SCREAMING_SNAKE_CASE__ : int ): '''simple docstring''' requires_backends(cls , ['torch', 'transformers', 'onnx'] ) class _UpperCamelCase( metaclass=__lowerCamelCase ): __SCREAMING_SNAKE_CASE : Dict = ['''torch''', '''transformers''', '''onnx'''] def __init__( self : Any , *SCREAMING_SNAKE_CASE__ : Dict , **SCREAMING_SNAKE_CASE__ : int ): '''simple docstring''' requires_backends(self , ['torch', 'transformers', 'onnx'] ) @classmethod def __lowerCAmelCase ( cls : int , *SCREAMING_SNAKE_CASE__ : Tuple , **SCREAMING_SNAKE_CASE__ : Tuple ): '''simple docstring''' requires_backends(cls , ['torch', 'transformers', 'onnx'] ) @classmethod def __lowerCAmelCase ( cls : Any , *SCREAMING_SNAKE_CASE__ : int , **SCREAMING_SNAKE_CASE__ : str ): '''simple docstring''' requires_backends(cls , ['torch', 'transformers', 'onnx'] ) class _UpperCamelCase( metaclass=__lowerCamelCase ): __SCREAMING_SNAKE_CASE : int = ['''torch''', '''transformers''', '''onnx'''] def __init__( self : List[str] , *SCREAMING_SNAKE_CASE__ : Tuple , **SCREAMING_SNAKE_CASE__ : List[Any] ): '''simple docstring''' requires_backends(self , ['torch', 'transformers', 'onnx'] ) @classmethod def __lowerCAmelCase ( cls : int , *SCREAMING_SNAKE_CASE__ : Tuple , **SCREAMING_SNAKE_CASE__ : Union[str, Any] ): '''simple docstring''' requires_backends(cls , ['torch', 'transformers', 'onnx'] ) @classmethod def __lowerCAmelCase ( cls : Optional[Any] , *SCREAMING_SNAKE_CASE__ : List[Any] , **SCREAMING_SNAKE_CASE__ : int ): '''simple docstring''' requires_backends(cls , ['torch', 'transformers', 'onnx'] ) class _UpperCamelCase( metaclass=__lowerCamelCase ): __SCREAMING_SNAKE_CASE : Optional[int] = ['''torch''', '''transformers''', '''onnx'''] def __init__( self : Optional[int] , *SCREAMING_SNAKE_CASE__ : Dict , **SCREAMING_SNAKE_CASE__ : Any ): '''simple docstring''' requires_backends(self , ['torch', 'transformers', 'onnx'] ) @classmethod def __lowerCAmelCase ( cls : Optional[Any] , *SCREAMING_SNAKE_CASE__ : Tuple , **SCREAMING_SNAKE_CASE__ : Union[str, Any] ): '''simple docstring''' requires_backends(cls , ['torch', 'transformers', 'onnx'] ) @classmethod def __lowerCAmelCase ( cls : int , *SCREAMING_SNAKE_CASE__ : Optional[Any] , **SCREAMING_SNAKE_CASE__ : Tuple ): '''simple docstring''' requires_backends(cls , ['torch', 'transformers', 'onnx'] )
47
0
"""simple docstring""" import math from numpy import inf from scipy.integrate import quad def UpperCAmelCase ( a__ ): '''simple docstring''' if num <= 0: raise ValueError('math domain error' ) return quad(lowerCamelCase_ , 0 , lowerCamelCase_ , args=(lowerCamelCase_) )[0] def UpperCAmelCase ( a__ , a__ ): '''simple docstring''' return math.pow(lowerCamelCase_ , z - 1 ) * math.exp(-x ) if __name__ == "__main__": from doctest import testmod testmod()
553
import math from datetime import datetime, timedelta def UpperCAmelCase__ ( lowerCamelCase_ : int ): __a : Union[str, Any] = year % 1_9 __a : int = year % 4 __a : Optional[int] = year % 7 __a : Dict = math.floor(year / 1_0_0 ) __a : Optional[Any] = math.floor((1_3 + 8 * leap_day_inhibits) / 2_5 ) __a : Union[str, Any] = leap_day_inhibits / 4 __a : str = ( 1_5 - lunar_orbit_correction + leap_day_inhibits - leap_day_reinstall_number ) % 3_0 __a : Union[str, Any] = (4 + leap_day_inhibits - leap_day_reinstall_number) % 7 # days to be added to March 21 __a : List[Any] = (1_9 * metonic_cycle + secular_moon_shift) % 3_0 # PHM -> Paschal Full Moon __a : List[Any] = ( 2 * julian_leap_year + 4 * non_leap_year + 6 * days_to_add + century_starting_point ) % 7 if days_to_add == 2_9 and days_from_phm_to_sunday == 6: return datetime(lowerCamelCase_ , 4 , 1_9 ) elif days_to_add == 2_8 and days_from_phm_to_sunday == 6: return datetime(lowerCamelCase_ , 4 , 1_8 ) else: return datetime(lowerCamelCase_ , 3 , 2_2 ) + timedelta( days=int(days_to_add + days_from_phm_to_sunday ) ) if __name__ == "__main__": for year in (1994, 2000, 2010, 2021, 2023): SCREAMING_SNAKE_CASE__ = '''will be''' if year > datetime.now().year else '''was''' print(F"Easter in {year} {tense} {gauss_easter(year)}")
47
0
'''simple docstring''' import inspect import os import re from transformers.configuration_utils import PretrainedConfig from transformers.utils import direct_transformers_import # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_config_docstrings.py lowerCAmelCase_ : Dict = '''src/transformers''' # This is to make sure the transformers module imported is the one in the repo. lowerCAmelCase_ : Optional[int] = direct_transformers_import(PATH_TO_TRANSFORMERS) lowerCAmelCase_ : str = transformers.models.auto.configuration_auto.CONFIG_MAPPING lowerCAmelCase_ : str = { # used to compute the property `self.chunk_length` '''EncodecConfig''': ['''overlap'''], # used as `self.bert_model = BertModel(config, ...)` '''DPRConfig''': True, # not used in modeling files, but it's an important information '''FSMTConfig''': ['''langs'''], # used internally in the configuration class file '''GPTNeoConfig''': ['''attention_types'''], # used internally in the configuration class file '''EsmConfig''': ['''is_folding_model'''], # used during training (despite we don't have training script for these models yet) '''Mask2FormerConfig''': ['''ignore_value'''], # `ignore_value` used during training (despite we don't have training script for these models yet) # `norm` used in conversion script (despite not using in the modeling file) '''OneFormerConfig''': ['''ignore_value''', '''norm'''], # used during preprocessing and collation, see `collating_graphormer.py` '''GraphormerConfig''': ['''spatial_pos_max'''], # used internally in the configuration class file '''T5Config''': ['''feed_forward_proj'''], # used internally in the configuration class file # `tokenizer_class` get default value `T5Tokenizer` intentionally '''MT5Config''': ['''feed_forward_proj''', '''tokenizer_class'''], '''UMT5Config''': ['''feed_forward_proj''', '''tokenizer_class'''], # used internally in the configuration class file '''LongT5Config''': ['''feed_forward_proj'''], # used internally in the configuration class file '''SwitchTransformersConfig''': ['''feed_forward_proj'''], # having default values other than `1e-5` - we can't fix them without breaking '''BioGptConfig''': ['''layer_norm_eps'''], # having default values other than `1e-5` - we can't fix them without breaking '''GLPNConfig''': ['''layer_norm_eps'''], # having default values other than `1e-5` - we can't fix them without breaking '''SegformerConfig''': ['''layer_norm_eps'''], # having default values other than `1e-5` - we can't fix them without breaking '''CvtConfig''': ['''layer_norm_eps'''], # having default values other than `1e-5` - we can't fix them without breaking '''PerceiverConfig''': ['''layer_norm_eps'''], # used internally to calculate the feature size '''InformerConfig''': ['''num_static_real_features''', '''num_time_features'''], # used internally to calculate the feature size '''TimeSeriesTransformerConfig''': ['''num_static_real_features''', '''num_time_features'''], # used internally to calculate the feature size '''AutoformerConfig''': ['''num_static_real_features''', '''num_time_features'''], # used internally to calculate `mlp_dim` '''SamVisionConfig''': ['''mlp_ratio'''], # For (head) training, but so far not implemented '''ClapAudioConfig''': ['''num_classes'''], # Not used, but providing useful information to users '''SpeechT5HifiGanConfig''': ['''sampling_rate'''], } # TODO (ydshieh): Check the failing cases, try to fix them or move some cases to the above block once we are sure SPECIAL_CASES_TO_ALLOW.update( { '''CLIPSegConfig''': True, '''DeformableDetrConfig''': True, '''DetaConfig''': True, '''DinatConfig''': True, '''DonutSwinConfig''': True, '''EfficientFormerConfig''': True, '''FSMTConfig''': True, '''JukeboxConfig''': True, '''LayoutLMv2Config''': True, '''MaskFormerSwinConfig''': True, '''MT5Config''': True, '''NatConfig''': True, '''OneFormerConfig''': True, '''PerceiverConfig''': True, '''RagConfig''': True, '''SpeechT5Config''': True, '''SwinConfig''': True, '''Swin2SRConfig''': True, '''Swinv2Config''': True, '''SwitchTransformersConfig''': True, '''TableTransformerConfig''': True, '''TapasConfig''': True, '''TransfoXLConfig''': True, '''UniSpeechConfig''': True, '''UniSpeechSatConfig''': True, '''WavLMConfig''': True, '''WhisperConfig''': True, # TODO: @Arthur (for `alignment_head` and `alignment_layer`) '''JukeboxPriorConfig''': True, # TODO: @Younes (for `is_decoder`) '''Pix2StructTextConfig''': True, } ) def __A ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): _UpperCAmelCase : Dict = False for attribute in attributes: for modeling_source in source_strings: # check if we can find `config.xxx`, `getattr(config, "xxx", ...)` or `getattr(self.config, "xxx", ...)` if ( f"config.{attribute}" in modeling_source or f"getattr(config, \"{attribute}\"" in modeling_source or f"getattr(self.config, \"{attribute}\"" in modeling_source ): _UpperCAmelCase : Union[str, Any] = True # Deal with multi-line cases elif ( re.search( rf"getattr[ \t\v\n\r\f]*\([ \t\v\n\r\f]*(self\.)?config,[ \t\v\n\r\f]*\"{attribute}\"" , lowerCamelCase_ , ) is not None ): _UpperCAmelCase : Dict = True # `SequenceSummary` is called with `SequenceSummary(config)` elif attribute in [ "summary_type", "summary_use_proj", "summary_activation", "summary_last_dropout", "summary_proj_to_labels", "summary_first_dropout", ]: if "SequenceSummary" in modeling_source: _UpperCAmelCase : Dict = True if attribute_used: break if attribute_used: break # common and important attributes, even if they do not always appear in the modeling files _UpperCAmelCase : List[Any] = [ 'bos_index', 'eos_index', 'pad_index', 'unk_index', 'mask_index', 'image_size', 'use_cache', 'out_features', 'out_indices', ] _UpperCAmelCase : Optional[Any] = ['encoder_no_repeat_ngram_size'] # Special cases to be allowed _UpperCAmelCase : Optional[Any] = True if not attribute_used: _UpperCAmelCase : Any = False for attribute in attributes: # Allow if the default value in the configuration class is different from the one in `PretrainedConfig` if attribute in ["is_encoder_decoder"] and default_value is True: _UpperCAmelCase : int = True elif attribute in ["tie_word_embeddings"] and default_value is False: _UpperCAmelCase : List[str] = True # Allow cases without checking the default value in the configuration class elif attribute in attributes_to_allow + attributes_used_in_generation: _UpperCAmelCase : Tuple = True elif attribute.endswith("""_token_id""" ): _UpperCAmelCase : Dict = True # configuration class specific cases if not case_allowed: _UpperCAmelCase : Tuple = SPECIAL_CASES_TO_ALLOW.get(config_class.__name__ , [] ) _UpperCAmelCase : Optional[Any] = allowed_cases is True or attribute in allowed_cases return attribute_used or case_allowed def __A ( lowerCAmelCase_ ): _UpperCAmelCase : str = dict(inspect.signature(config_class.__init__ ).parameters ) _UpperCAmelCase : Optional[Any] = [x for x in list(signature.keys() ) if x not in ['self', 'kwargs']] _UpperCAmelCase : List[Any] = [signature[param].default for param in parameter_names] # If `attribute_map` exists, an attribute can have different names to be used in the modeling files, and as long # as one variant is used, the test should pass _UpperCAmelCase : Any = {} if len(config_class.attribute_map ) > 0: _UpperCAmelCase : Optional[int] = {v: k for k, v in config_class.attribute_map.items()} # Get the path to modeling source files _UpperCAmelCase : Union[str, Any] = inspect.getsourcefile(lowerCamelCase_ ) _UpperCAmelCase : str = os.path.dirname(lowerCamelCase_ ) # Let's check against all frameworks: as long as one framework uses an attribute, we are good. _UpperCAmelCase : Dict = [os.path.join(lowerCamelCase_ , lowerCamelCase_ ) for fn in os.listdir(lowerCamelCase_ ) if fn.startswith("""modeling_""" )] # Get the source code strings _UpperCAmelCase : str = [] for path in modeling_paths: if os.path.isfile(lowerCamelCase_ ): with open(lowerCamelCase_ ) as fp: modeling_sources.append(fp.read() ) _UpperCAmelCase : str = [] for config_param, default_value in zip(lowerCamelCase_ , lowerCamelCase_ ): # `attributes` here is all the variant names for `config_param` _UpperCAmelCase : Dict = [config_param] # some configuration classes have non-empty `attribute_map`, and both names could be used in the # corresponding modeling files. As long as one of them appears, it is fine. if config_param in reversed_attribute_map: attributes.append(reversed_attribute_map[config_param] ) if not check_attribute_being_used(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ): unused_attributes.append(attributes[0] ) return sorted(lowerCamelCase_ ) def __A ( ): _UpperCAmelCase : Dict = {} for _config_class in list(CONFIG_MAPPING.values() ): # Skip deprecated models if "models.deprecated" in _config_class.__module__: continue # Some config classes are not in `CONFIG_MAPPING` (e.g. `CLIPVisionConfig`, `Blip2VisionConfig`, etc.) _UpperCAmelCase : List[Any] = [ cls for name, cls in inspect.getmembers( inspect.getmodule(_config_class ) , lambda lowerCAmelCase_ : inspect.isclass(lowerCamelCase_ ) and issubclass(lowerCamelCase_ , lowerCamelCase_ ) and inspect.getmodule(lowerCamelCase_ ) == inspect.getmodule(_config_class ) , ) ] for config_class in config_classes_in_module: _UpperCAmelCase : List[Any] = check_config_attributes_being_used(lowerCamelCase_ ) if len(lowerCamelCase_ ) > 0: _UpperCAmelCase : Optional[Any] = unused_attributes if len(lowerCamelCase_ ) > 0: _UpperCAmelCase : str = 'The following configuration classes contain unused attributes in the corresponding modeling files:\n' for name, attributes in configs_with_unused_attributes.items(): error += f"{name}: {attributes}\n" raise ValueError(lowerCamelCase_ ) if __name__ == "__main__": check_config_attributes()
414
from typing import List, Optional, Union from ...configuration_utils import PretrainedConfig from ...utils import logging SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ = { '''huggingface/informer-tourism-monthly''': ( '''https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json''' ), # See all Informer models at https://huggingface.co/models?filter=informer } class _UpperCamelCase( __lowerCamelCase ): __SCREAMING_SNAKE_CASE : List[Any] = '''informer''' __SCREAMING_SNAKE_CASE : List[Any] = { '''hidden_size''': '''d_model''', '''num_attention_heads''': '''encoder_attention_heads''', '''num_hidden_layers''': '''encoder_layers''', } def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : str = "student_t" , SCREAMING_SNAKE_CASE__ : str = "nll" , SCREAMING_SNAKE_CASE__ : int = 1 , SCREAMING_SNAKE_CASE__ : List[int] = None , SCREAMING_SNAKE_CASE__ : Optional[Union[str, bool]] = "mean" , SCREAMING_SNAKE_CASE__ : int = 0 , SCREAMING_SNAKE_CASE__ : int = 0 , SCREAMING_SNAKE_CASE__ : int = 0 , SCREAMING_SNAKE_CASE__ : int = 0 , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None , SCREAMING_SNAKE_CASE__ : int = 6_4 , SCREAMING_SNAKE_CASE__ : int = 3_2 , SCREAMING_SNAKE_CASE__ : int = 3_2 , SCREAMING_SNAKE_CASE__ : int = 2 , SCREAMING_SNAKE_CASE__ : int = 2 , SCREAMING_SNAKE_CASE__ : int = 2 , SCREAMING_SNAKE_CASE__ : int = 2 , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : str = "gelu" , SCREAMING_SNAKE_CASE__ : float = 0.05 , SCREAMING_SNAKE_CASE__ : float = 0.1 , SCREAMING_SNAKE_CASE__ : float = 0.1 , SCREAMING_SNAKE_CASE__ : float = 0.1 , SCREAMING_SNAKE_CASE__ : float = 0.1 , SCREAMING_SNAKE_CASE__ : int = 1_0_0 , SCREAMING_SNAKE_CASE__ : float = 0.02 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=True , SCREAMING_SNAKE_CASE__ : str = "prob" , SCREAMING_SNAKE_CASE__ : int = 5 , SCREAMING_SNAKE_CASE__ : bool = True , **SCREAMING_SNAKE_CASE__ : Tuple , ): '''simple docstring''' __a : Dict = prediction_length __a : Tuple = context_length or prediction_length __a : Tuple = distribution_output __a : Tuple = loss __a : str = input_size __a : Dict = num_time_features __a : Optional[int] = lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7] __a : str = scaling __a : Tuple = num_dynamic_real_features __a : int = num_static_real_features __a : Dict = num_static_categorical_features # set cardinality if cardinality and num_static_categorical_features > 0: if len(SCREAMING_SNAKE_CASE__ ) != num_static_categorical_features: raise ValueError( 'The cardinality should be a list of the same length as `num_static_categorical_features`' ) __a : Optional[Any] = cardinality else: __a : Optional[int] = [0] # set embedding_dimension if embedding_dimension and num_static_categorical_features > 0: if len(SCREAMING_SNAKE_CASE__ ) != num_static_categorical_features: raise ValueError( 'The embedding dimension should be a list of the same length as `num_static_categorical_features`' ) __a : int = embedding_dimension else: __a : List[Any] = [min(5_0 , (cat + 1) // 2 ) for cat in self.cardinality] __a : int = num_parallel_samples # Transformer architecture configuration __a : str = input_size * len(self.lags_sequence ) + self._number_of_features __a : Optional[int] = d_model __a : Union[str, Any] = encoder_attention_heads __a : int = decoder_attention_heads __a : Any = encoder_ffn_dim __a : Union[str, Any] = decoder_ffn_dim __a : List[Any] = encoder_layers __a : Optional[int] = decoder_layers __a : int = dropout __a : Optional[Any] = attention_dropout __a : Dict = activation_dropout __a : Union[str, Any] = encoder_layerdrop __a : Optional[int] = decoder_layerdrop __a : List[str] = activation_function __a : str = init_std __a : Optional[int] = use_cache # Informer __a : Union[str, Any] = attention_type __a : str = sampling_factor __a : Dict = distil super().__init__(is_encoder_decoder=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) @property def __lowerCAmelCase ( self : Any ): '''simple docstring''' return ( sum(self.embedding_dimension ) + self.num_dynamic_real_features + self.num_time_features + self.num_static_real_features + self.input_size * 2 # the log1p(abs(loc)) and log(scale) features )
47
0
'''simple docstring''' from typing import Dict, Iterable, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, logging _UpperCAmelCase : Optional[Any] = logging.get_logger(__name__) class lowercase_ ( __lowerCamelCase ): """simple docstring""" __lowerCAmelCase = ['''pixel_values'''] def __init__( self : Optional[int], UpperCamelCase__ : bool = True, UpperCamelCase__ : Dict[str, int] = None, UpperCamelCase__ : PILImageResampling = PILImageResampling.BICUBIC, UpperCamelCase__ : bool = True, UpperCamelCase__ : Dict[str, int] = None, UpperCamelCase__ : bool = True, UpperCamelCase__ : Union[int, float] = 1 / 2_55, UpperCamelCase__ : bool = True, UpperCamelCase__ : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_MEAN, UpperCamelCase__ : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_STD, **UpperCamelCase__ : int, ) -> int: super().__init__(**SCREAMING_SNAKE_CASE__ ) _A = size if size is not None else {'shortest_edge': 2_24} _A = get_size_dict(SCREAMING_SNAKE_CASE__, default_to_square=SCREAMING_SNAKE_CASE__ ) _A = crop_size if crop_size is not None else {'height': 2_24, 'width': 2_24} _A = get_size_dict(SCREAMING_SNAKE_CASE__, param_name='crop_size' ) _A = do_resize _A = size _A = resample _A = do_center_crop _A = crop_size _A = do_rescale _A = rescale_factor _A = do_normalize _A = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN _A = image_std if image_std is not None else IMAGENET_DEFAULT_STD def __UpperCAmelCase ( self : Optional[Any], UpperCamelCase__ : np.ndarray, UpperCamelCase__ : Dict[str, int], UpperCamelCase__ : PILImageResampling = PILImageResampling.BICUBIC, UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None, **UpperCamelCase__ : Tuple, ) -> List[str]: _A = get_size_dict(SCREAMING_SNAKE_CASE__, default_to_square=SCREAMING_SNAKE_CASE__ ) # size_dict is a dict with either keys "height" and "width" or "shortest_edge" if "shortest_edge" in size: _A = int((2_56 / 2_24) * size['shortest_edge'] ) _A = get_resize_output_image_size(SCREAMING_SNAKE_CASE__, size=SCREAMING_SNAKE_CASE__, default_to_square=SCREAMING_SNAKE_CASE__ ) _A = {'height': output_size[0], 'width': output_size[1]} if "height" not in size_dict or "width" not in size_dict: raise ValueError( f'Size dict must have keys \'height\' and \'width\' or \'shortest_edge\'. Got {size_dict.keys()}' ) return resize( SCREAMING_SNAKE_CASE__, size=(size_dict['height'], size_dict['width']), resample=SCREAMING_SNAKE_CASE__, data_format=SCREAMING_SNAKE_CASE__, **SCREAMING_SNAKE_CASE__ ) def __UpperCAmelCase ( self : Union[str, Any], UpperCamelCase__ : np.ndarray, UpperCamelCase__ : Dict[str, int], UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None, **UpperCamelCase__ : int, ) -> Tuple: _A = get_size_dict(SCREAMING_SNAKE_CASE__ ) if "height" not in size or "width" not in size: raise ValueError(f'Size dict must have keys \'height\' and \'width\'. Got {size.keys()}' ) return center_crop(SCREAMING_SNAKE_CASE__, size=(size['height'], size['width']), data_format=SCREAMING_SNAKE_CASE__, **SCREAMING_SNAKE_CASE__ ) def __UpperCAmelCase ( self : Optional[Any], UpperCamelCase__ : np.ndarray, UpperCamelCase__ : Union[int, float], UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None, **UpperCamelCase__ : Tuple, ) -> Union[str, Any]: return rescale(SCREAMING_SNAKE_CASE__, scale=SCREAMING_SNAKE_CASE__, data_format=SCREAMING_SNAKE_CASE__, **SCREAMING_SNAKE_CASE__ ) def __UpperCAmelCase ( self : Any, UpperCamelCase__ : np.ndarray, UpperCamelCase__ : Union[float, List[float]], UpperCamelCase__ : Union[float, List[float]], UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None, **UpperCamelCase__ : Dict, ) -> Tuple: return normalize(SCREAMING_SNAKE_CASE__, mean=SCREAMING_SNAKE_CASE__, std=SCREAMING_SNAKE_CASE__, data_format=SCREAMING_SNAKE_CASE__, **SCREAMING_SNAKE_CASE__ ) def __UpperCAmelCase ( self : str, UpperCamelCase__ : ImageInput, UpperCamelCase__ : Optional[bool] = None, UpperCamelCase__ : Optional[Dict[str, int]] = None, UpperCamelCase__ : PILImageResampling = None, UpperCamelCase__ : Optional[bool] = None, UpperCamelCase__ : Optional[Dict[str, int]] = None, UpperCamelCase__ : Optional[bool] = None, UpperCamelCase__ : Optional[float] = None, UpperCamelCase__ : Optional[bool] = None, UpperCamelCase__ : Optional[Union[float, Iterable[float]]] = None, UpperCamelCase__ : Optional[Union[float, Iterable[float]]] = None, UpperCamelCase__ : Optional[TensorType] = None, UpperCamelCase__ : ChannelDimension = ChannelDimension.FIRST, **UpperCamelCase__ : Dict, ) -> Tuple: _A = do_resize if do_resize is not None else self.do_resize _A = resample if resample is not None else self.resample _A = do_center_crop if do_center_crop is not None else self.do_center_crop _A = do_rescale if do_rescale is not None else self.do_rescale _A = rescale_factor if rescale_factor is not None else self.rescale_factor _A = do_normalize if do_normalize is not None else self.do_normalize _A = image_mean if image_mean is not None else self.image_mean _A = image_std if image_std is not None else self.image_std _A = size if size is not None else self.size _A = get_size_dict(SCREAMING_SNAKE_CASE__, default_to_square=SCREAMING_SNAKE_CASE__ ) _A = crop_size if crop_size is not None else self.crop_size _A = get_size_dict(SCREAMING_SNAKE_CASE__, param_name='crop_size' ) _A = make_list_of_images(SCREAMING_SNAKE_CASE__ ) if not valid_images(SCREAMING_SNAKE_CASE__ ): raise ValueError( 'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ' 'torch.Tensor, tf.Tensor or jax.ndarray.' ) if do_resize and size is None: raise ValueError('Size must be specified if do_resize is True.' ) if do_center_crop and crop_size is None: raise ValueError('Crop size must be specified if do_center_crop is True.' ) if do_rescale and rescale_factor is None: raise ValueError('Rescale factor must be specified if do_rescale is True.' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('Image mean and std must be specified if do_normalize is True.' ) # All transformations expect numpy arrays. _A = [to_numpy_array(SCREAMING_SNAKE_CASE__ ) for image in images] if do_resize: _A = [self.resize(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) for image in images] if do_center_crop: _A = [self.center_crop(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) for image in images] if do_rescale: _A = [self.rescale(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) for image in images] if do_normalize: _A = [self.normalize(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) for image in images] _A = [to_channel_dimension_format(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) for image in images] _A = {'pixel_values': images} return BatchFeature(data=SCREAMING_SNAKE_CASE__, tensor_type=SCREAMING_SNAKE_CASE__ )
107
import torch from diffusers import DDIMParallelScheduler from .test_schedulers import SchedulerCommonTest class _UpperCamelCase( __lowerCamelCase ): __SCREAMING_SNAKE_CASE : int = (DDIMParallelScheduler,) __SCREAMING_SNAKE_CASE : Union[str, Any] = (('''eta''', 0.0), ('''num_inference_steps''', 50)) def __lowerCAmelCase ( self : str , **SCREAMING_SNAKE_CASE__ : Optional[int] ): '''simple docstring''' __a : List[Any] = { 'num_train_timesteps': 1_0_0_0, 'beta_start': 0.0_001, 'beta_end': 0.02, 'beta_schedule': 'linear', 'clip_sample': True, } config.update(**SCREAMING_SNAKE_CASE__ ) return config def __lowerCAmelCase ( self : str , **SCREAMING_SNAKE_CASE__ : Optional[int] ): '''simple docstring''' __a : Tuple = self.scheduler_classes[0] __a : Optional[Any] = self.get_scheduler_config(**SCREAMING_SNAKE_CASE__ ) __a : Any = scheduler_class(**SCREAMING_SNAKE_CASE__ ) __a , __a : List[str] = 1_0, 0.0 __a : Dict = self.dummy_model() __a : str = self.dummy_sample_deter scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ ) for t in scheduler.timesteps: __a : str = model(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) __a : List[str] = scheduler.step(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).prev_sample return sample def __lowerCAmelCase ( self : Tuple ): '''simple docstring''' for timesteps in [1_0_0, 5_0_0, 1_0_0_0]: self.check_over_configs(num_train_timesteps=SCREAMING_SNAKE_CASE__ ) def __lowerCAmelCase ( self : Dict ): '''simple docstring''' for steps_offset in [0, 1]: self.check_over_configs(steps_offset=SCREAMING_SNAKE_CASE__ ) __a : Optional[Any] = self.scheduler_classes[0] __a : List[str] = self.get_scheduler_config(steps_offset=1 ) __a : Optional[Any] = scheduler_class(**SCREAMING_SNAKE_CASE__ ) scheduler.set_timesteps(5 ) assert torch.equal(scheduler.timesteps , torch.LongTensor([8_0_1, 6_0_1, 4_0_1, 2_0_1, 1] ) ) def __lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' for beta_start, beta_end in zip([0.0_001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ): self.check_over_configs(beta_start=SCREAMING_SNAKE_CASE__ , beta_end=SCREAMING_SNAKE_CASE__ ) def __lowerCAmelCase ( self : int ): '''simple docstring''' for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=SCREAMING_SNAKE_CASE__ ) def __lowerCAmelCase ( self : Optional[Any] ): '''simple docstring''' for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=SCREAMING_SNAKE_CASE__ ) def __lowerCAmelCase ( self : List[Any] ): '''simple docstring''' for clip_sample in [True, False]: self.check_over_configs(clip_sample=SCREAMING_SNAKE_CASE__ ) def __lowerCAmelCase ( self : Optional[Any] ): '''simple docstring''' for timestep_spacing in ["trailing", "leading"]: self.check_over_configs(timestep_spacing=SCREAMING_SNAKE_CASE__ ) def __lowerCAmelCase ( self : int ): '''simple docstring''' for rescale_betas_zero_snr in [True, False]: self.check_over_configs(rescale_betas_zero_snr=SCREAMING_SNAKE_CASE__ ) def __lowerCAmelCase ( self : str ): '''simple docstring''' self.check_over_configs(thresholding=SCREAMING_SNAKE_CASE__ ) for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs( thresholding=SCREAMING_SNAKE_CASE__ , prediction_type=SCREAMING_SNAKE_CASE__ , sample_max_value=SCREAMING_SNAKE_CASE__ , ) def __lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' for t in [1, 1_0, 4_9]: self.check_over_forward(time_step=SCREAMING_SNAKE_CASE__ ) def __lowerCAmelCase ( self : List[str] ): '''simple docstring''' for t, num_inference_steps in zip([1, 1_0, 5_0] , [1_0, 5_0, 5_0_0] ): self.check_over_forward(time_step=SCREAMING_SNAKE_CASE__ , num_inference_steps=SCREAMING_SNAKE_CASE__ ) def __lowerCAmelCase ( self : List[Any] ): '''simple docstring''' for t, eta in zip([1, 1_0, 4_9] , [0.0, 0.5, 1.0] ): self.check_over_forward(time_step=SCREAMING_SNAKE_CASE__ , eta=SCREAMING_SNAKE_CASE__ ) def __lowerCAmelCase ( self : List[Any] ): '''simple docstring''' __a : List[str] = self.scheduler_classes[0] __a : Union[str, Any] = self.get_scheduler_config() __a : Any = scheduler_class(**SCREAMING_SNAKE_CASE__ ) assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(4_2_0 , 4_0_0 ) - 0.14_771 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(9_8_0 , 9_6_0 ) - 0.32_460 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(4_8_7 , 4_8_6 ) - 0.00_979 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(9_9_9 , 9_9_8 ) - 0.02 ) ) < 1e-5 def __lowerCAmelCase ( self : Optional[Any] ): '''simple docstring''' __a : List[str] = self.scheduler_classes[0] __a : List[str] = self.get_scheduler_config() __a : Optional[Any] = scheduler_class(**SCREAMING_SNAKE_CASE__ ) __a , __a : Any = 1_0, 0.0 scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ ) __a : List[Any] = self.dummy_model() __a : int = self.dummy_sample_deter __a : List[Any] = self.dummy_sample_deter + 0.1 __a : List[str] = self.dummy_sample_deter - 0.1 __a : Optional[Any] = samplea.shape[0] __a : Optional[Any] = torch.stack([samplea, samplea, samplea] , dim=0 ) __a : Union[str, Any] = torch.arange(SCREAMING_SNAKE_CASE__ )[0:3, None].repeat(1 , SCREAMING_SNAKE_CASE__ ) __a : int = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) ) __a : int = scheduler.batch_step_no_noise(SCREAMING_SNAKE_CASE__ , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) , SCREAMING_SNAKE_CASE__ ) __a : Dict = torch.sum(torch.abs(SCREAMING_SNAKE_CASE__ ) ) __a : Optional[Any] = torch.mean(torch.abs(SCREAMING_SNAKE_CASE__ ) ) assert abs(result_sum.item() - 1_147.7_904 ) < 1e-2 assert abs(result_mean.item() - 0.4_982 ) < 1e-3 def __lowerCAmelCase ( self : str ): '''simple docstring''' __a : List[str] = self.full_loop() __a : Tuple = torch.sum(torch.abs(SCREAMING_SNAKE_CASE__ ) ) __a : int = torch.mean(torch.abs(SCREAMING_SNAKE_CASE__ ) ) assert abs(result_sum.item() - 172.0_067 ) < 1e-2 assert abs(result_mean.item() - 0.223_967 ) < 1e-3 def __lowerCAmelCase ( self : Optional[int] ): '''simple docstring''' __a : Optional[int] = self.full_loop(prediction_type='v_prediction' ) __a : str = torch.sum(torch.abs(SCREAMING_SNAKE_CASE__ ) ) __a : Union[str, Any] = torch.mean(torch.abs(SCREAMING_SNAKE_CASE__ ) ) assert abs(result_sum.item() - 52.5_302 ) < 1e-2 assert abs(result_mean.item() - 0.0_684 ) < 1e-3 def __lowerCAmelCase ( self : int ): '''simple docstring''' __a : Union[str, Any] = self.full_loop(set_alpha_to_one=SCREAMING_SNAKE_CASE__ , beta_start=0.01 ) __a : Optional[int] = torch.sum(torch.abs(SCREAMING_SNAKE_CASE__ ) ) __a : Optional[int] = torch.mean(torch.abs(SCREAMING_SNAKE_CASE__ ) ) assert abs(result_sum.item() - 149.8_295 ) < 1e-2 assert abs(result_mean.item() - 0.1_951 ) < 1e-3 def __lowerCAmelCase ( self : str ): '''simple docstring''' __a : Dict = self.full_loop(set_alpha_to_one=SCREAMING_SNAKE_CASE__ , beta_start=0.01 ) __a : str = torch.sum(torch.abs(SCREAMING_SNAKE_CASE__ ) ) __a : Tuple = torch.mean(torch.abs(SCREAMING_SNAKE_CASE__ ) ) assert abs(result_sum.item() - 149.0_784 ) < 1e-2 assert abs(result_mean.item() - 0.1_941 ) < 1e-3
47
0
'''simple docstring''' import unittest import numpy as np from diffusers import LMSDiscreteScheduler, OnnxStableDiffusionInpaintPipeline from diffusers.utils.testing_utils import ( is_onnx_available, load_image, nightly, require_onnxruntime, require_torch_gpu, ) from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin if is_onnx_available(): import onnxruntime as ort class _UpperCamelCase ( __lowerCamelCase , unittest.TestCase): '''simple docstring''' pass @nightly @require_onnxruntime @require_torch_gpu class _UpperCamelCase ( unittest.TestCase): '''simple docstring''' @property def a__ ( self ) -> Any: return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def a__ ( self ) -> Any: lowercase : int = ort.SessionOptions() lowercase : str = False return options def a__ ( self ) -> List[str]: lowercase : str = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/in_paint/overture-creations-5sI6fQgYIuo.png" ) lowercase : int = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/in_paint/overture-creations-5sI6fQgYIuo_mask.png" ) lowercase : str = OnnxStableDiffusionInpaintPipeline.from_pretrained( "runwayml/stable-diffusion-inpainting" , revision="onnx" , safety_checker=SCREAMING_SNAKE_CASE__ , feature_extractor=SCREAMING_SNAKE_CASE__ , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ ) lowercase : int = 'A red cat sitting on a park bench' lowercase : Union[str, Any] = np.random.RandomState(0 ) lowercase : Any = pipe( prompt=SCREAMING_SNAKE_CASE__ , image=SCREAMING_SNAKE_CASE__ , mask_image=SCREAMING_SNAKE_CASE__ , guidance_scale=7.5 , num_inference_steps=1_0 , generator=SCREAMING_SNAKE_CASE__ , output_type="np" , ) lowercase : Dict = output.images lowercase : List[str] = images[0, 2_5_5:2_5_8, 2_5_5:2_5_8, -1] assert images.shape == (1, 5_1_2, 5_1_2, 3) lowercase : Dict = np.array([0.25_14, 0.30_07, 0.35_17, 0.17_90, 0.23_82, 0.31_67, 0.19_44, 0.22_73, 0.24_64] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def a__ ( self ) -> Any: lowercase : List[Any] = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/in_paint/overture-creations-5sI6fQgYIuo.png" ) lowercase : List[str] = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/in_paint/overture-creations-5sI6fQgYIuo_mask.png" ) lowercase : Optional[int] = LMSDiscreteScheduler.from_pretrained( "runwayml/stable-diffusion-inpainting" , subfolder="scheduler" , revision="onnx" ) lowercase : Union[str, Any] = OnnxStableDiffusionInpaintPipeline.from_pretrained( "runwayml/stable-diffusion-inpainting" , revision="onnx" , scheduler=SCREAMING_SNAKE_CASE__ , safety_checker=SCREAMING_SNAKE_CASE__ , feature_extractor=SCREAMING_SNAKE_CASE__ , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ ) lowercase : Optional[Any] = 'A red cat sitting on a park bench' lowercase : Optional[int] = np.random.RandomState(0 ) lowercase : List[str] = pipe( prompt=SCREAMING_SNAKE_CASE__ , image=SCREAMING_SNAKE_CASE__ , mask_image=SCREAMING_SNAKE_CASE__ , guidance_scale=7.5 , num_inference_steps=2_0 , generator=SCREAMING_SNAKE_CASE__ , output_type="np" , ) lowercase : Tuple = output.images lowercase : Union[str, Any] = images[0, 2_5_5:2_5_8, 2_5_5:2_5_8, -1] assert images.shape == (1, 5_1_2, 5_1_2, 3) lowercase : Optional[Any] = np.array([0.00_86, 0.00_77, 0.00_83, 0.00_93, 0.01_07, 0.01_39, 0.00_94, 0.00_97, 0.01_25] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
372
def UpperCAmelCase__ ( lowerCamelCase_ : list[int] , lowerCamelCase_ : list[int] ): # Check if the input is valid if not len(lowerCamelCase_ ) == len(lowerCamelCase_ ) == 3: raise ValueError('Please enter a valid equation.' ) if equationa[0] == equationa[1] == equationa[0] == equationa[1] == 0: raise ValueError('Both a & b of two equations can\'t be zero.' ) # Extract the coefficients __a , __a , __a : Optional[Any] = equationa __a , __a , __a : Optional[int] = equationa # Calculate the determinants of the matrices __a : str = aa * ba - aa * ba __a : Tuple = ca * ba - ca * ba __a : Union[str, Any] = aa * ca - aa * ca # Check if the system of linear equations has a solution (using Cramer's rule) if determinant == 0: if determinant_x == determinant_y == 0: raise ValueError('Infinite solutions. (Consistent system)' ) else: raise ValueError('No solution. (Inconsistent system)' ) else: if determinant_x == determinant_y == 0: # Trivial solution (Inconsistent system) return (0.0, 0.0) else: __a : Any = determinant_x / determinant __a : Optional[Any] = determinant_y / determinant # Non-Trivial Solution (Consistent system) return (x, y)
47
0
"""simple docstring""" from . import __version__ # Backward compatibility imports, to make sure all those objects can be found in file_utils from .utils import ( CLOUDFRONT_DISTRIB_PREFIX, CONFIG_NAME, DISABLE_TELEMETRY, DUMMY_INPUTS, DUMMY_MASK, ENV_VARS_TRUE_AND_AUTO_VALUES, ENV_VARS_TRUE_VALUES, FEATURE_EXTRACTOR_NAME, FLAX_WEIGHTS_NAME, HF_MODULES_CACHE, HUGGINGFACE_CO_PREFIX, HUGGINGFACE_CO_RESOLVE_ENDPOINT, MODEL_CARD_NAME, MULTIPLE_CHOICE_DUMMY_INPUTS, PYTORCH_PRETRAINED_BERT_CACHE, PYTORCH_TRANSFORMERS_CACHE, S3_BUCKET_PREFIX, SENTENCEPIECE_UNDERLINE, SPIECE_UNDERLINE, TF2_WEIGHTS_NAME, TF_WEIGHTS_NAME, TORCH_FX_REQUIRED_VERSION, TRANSFORMERS_CACHE, TRANSFORMERS_DYNAMIC_MODULE_NAME, USE_JAX, USE_TF, USE_TORCH, WEIGHTS_INDEX_NAME, WEIGHTS_NAME, ContextManagers, DummyObject, EntryNotFoundError, ExplicitEnum, ModelOutput, PaddingStrategy, PushToHubMixin, RepositoryNotFoundError, RevisionNotFoundError, TensorType, _LazyModule, add_code_sample_docstrings, add_end_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, cached_property, copy_func, default_cache_path, define_sagemaker_information, get_cached_models, get_file_from_repo, get_full_repo_name, get_torch_version, has_file, http_user_agent, is_apex_available, is_bsa_available, is_coloredlogs_available, is_datasets_available, is_detectrona_available, is_faiss_available, is_flax_available, is_ftfy_available, is_in_notebook, is_ipex_available, is_librosa_available, is_offline_mode, is_onnx_available, is_pandas_available, is_phonemizer_available, is_protobuf_available, is_psutil_available, is_pyanvml_available, is_pyctcdecode_available, is_pytesseract_available, is_pytorch_quantization_available, is_rjieba_available, is_sagemaker_dp_enabled, is_sagemaker_mp_enabled, is_scipy_available, is_sentencepiece_available, is_seqio_available, is_sklearn_available, is_soundfile_availble, is_spacy_available, is_speech_available, is_tensor, is_tensorflow_probability_available, is_tfaonnx_available, is_tf_available, is_timm_available, is_tokenizers_available, is_torch_available, is_torch_bfaa_available, is_torch_cuda_available, is_torch_fx_available, is_torch_fx_proxy, is_torch_mps_available, is_torch_tfaa_available, is_torch_tpu_available, is_torchaudio_available, is_training_run_on_sagemaker, is_vision_available, replace_return_docstrings, requires_backends, to_numpy, to_py_obj, torch_only_method, )
200
from ...utils import is_note_seq_available, is_transformers_available, is_torch_available from ...utils import OptionalDependencyNotAvailable try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .notes_encoder import SpectrogramNotesEncoder from .continous_encoder import SpectrogramContEncoder from .pipeline_spectrogram_diffusion import ( SpectrogramContEncoder, SpectrogramDiffusionPipeline, TaFilmDecoder, ) try: if not (is_transformers_available() and is_torch_available() and is_note_seq_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403 else: from .midi_utils import MidiProcessor
47
0
__magic_name__ = tuple[float, float, float] __magic_name__ = tuple[float, float, float] def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_): '''simple docstring''' lowerCamelCase_ : int = end_pointa[0] - end_pointa[0] lowerCamelCase_ : List[Any] = end_pointa[1] - end_pointa[1] lowerCamelCase_ : List[str] = end_pointa[2] - end_pointa[2] return (x, y, z) def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_): '''simple docstring''' lowerCamelCase_ : List[Any] = ab[1] * ac[2] - ab[2] * ac[1] # *i lowerCamelCase_ : List[str] = (ab[0] * ac[2] - ab[2] * ac[0]) * -1 # *j lowerCamelCase_ : List[str] = ab[0] * ac[1] - ab[1] * ac[0] # *k return (x, y, z) def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_): '''simple docstring''' return tuple(round(lowerCamelCase_ , lowerCamelCase_) for x in vector) == (0, 0, 0) def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 10): '''simple docstring''' lowerCamelCase_ : Dict = create_vector(lowerCamelCase_ , lowerCamelCase_) lowerCamelCase_ : Union[str, Any] = create_vector(lowerCamelCase_ , lowerCamelCase_) return is_zero_vector(get_ad_vectors_cross(lowerCamelCase_ , lowerCamelCase_) , lowerCamelCase_)
250
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available SCREAMING_SNAKE_CASE__ = { '''configuration_bridgetower''': [ '''BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BridgeTowerConfig''', '''BridgeTowerTextConfig''', '''BridgeTowerVisionConfig''', ], '''processing_bridgetower''': ['''BridgeTowerProcessor'''], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ = ['''BridgeTowerImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ = [ '''BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''BridgeTowerForContrastiveLearning''', '''BridgeTowerForImageAndTextRetrieval''', '''BridgeTowerForMaskedLM''', '''BridgeTowerModel''', '''BridgeTowerPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_bridgetower import ( BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP, BridgeTowerConfig, BridgeTowerTextConfig, BridgeTowerVisionConfig, ) from .processing_bridgetower import BridgeTowerProcessor try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_bridgetower import BridgeTowerImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_bridgetower import ( BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST, BridgeTowerForContrastiveLearning, BridgeTowerForImageAndTextRetrieval, BridgeTowerForMaskedLM, BridgeTowerModel, BridgeTowerPreTrainedModel, ) else: import sys SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
47
0
"""simple docstring""" import warnings from contextlib import contextmanager from ....processing_utils import ProcessorMixin class UpperCAmelCase_ ( __lowerCamelCase ): UpperCamelCase ='''MCTCTFeatureExtractor''' UpperCamelCase ='''AutoTokenizer''' def __init__( self , UpperCamelCase_ , UpperCamelCase_ ) -> Tuple: super().__init__(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) __lowercase : int = self.feature_extractor __lowercase : Union[str, Any] = False def __call__( self , *UpperCamelCase_ , **UpperCamelCase_ ) -> List[str]: if self._in_target_context_manager: return self.current_processor(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) if "raw_speech" in kwargs: warnings.warn('''Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.''' ) __lowercase : Optional[Any] = kwargs.pop('''raw_speech''' ) else: __lowercase : Union[str, Any] = kwargs.pop('''audio''' , SCREAMING_SNAKE_CASE__ ) __lowercase : Tuple = kwargs.pop('''sampling_rate''' , SCREAMING_SNAKE_CASE__ ) __lowercase : Any = kwargs.pop('''text''' , SCREAMING_SNAKE_CASE__ ) if len(SCREAMING_SNAKE_CASE__ ) > 0: __lowercase : List[str] = args[0] __lowercase : Tuple = args[1:] if audio is None and text is None: raise ValueError('''You need to specify either an `audio` or `text` input to process.''' ) if audio is not None: __lowercase : Union[str, Any] = self.feature_extractor(SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ , sampling_rate=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) if text is not None: __lowercase : Union[str, Any] = self.tokenizer(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) if text is None: return inputs elif audio is None: return encodings else: __lowercase : List[str] = encodings['input_ids'] return inputs def _lowerCamelCase ( self , *UpperCamelCase_ , **UpperCamelCase_ ) -> Optional[Any]: return self.tokenizer.batch_decode(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) def _lowerCamelCase ( self , *UpperCamelCase_ , **UpperCamelCase_ ) -> Union[str, Any]: if self._in_target_context_manager: return self.current_processor.pad(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) __lowercase : Dict = kwargs.pop('''input_features''' , SCREAMING_SNAKE_CASE__ ) __lowercase : Union[str, Any] = kwargs.pop('''labels''' , SCREAMING_SNAKE_CASE__ ) if len(SCREAMING_SNAKE_CASE__ ) > 0: __lowercase : Any = args[0] __lowercase : Union[str, Any] = args[1:] if input_features is not None: __lowercase : Tuple = self.feature_extractor.pad(SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) if labels is not None: __lowercase : int = self.tokenizer.pad(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) if labels is None: return input_features elif input_features is None: return labels else: __lowercase : str = labels['input_ids'] return input_features def _lowerCamelCase ( self , *UpperCamelCase_ , **UpperCamelCase_ ) -> List[str]: return self.tokenizer.decode(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) @contextmanager def _lowerCamelCase ( self ) -> Optional[int]: warnings.warn( '''`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your ''' '''labels by using the argument `text` of the regular `__call__` method (either in the same call as ''' '''your audio inputs, or in a separate call.''' ) __lowercase : Any = True __lowercase : str = self.tokenizer yield __lowercase : List[str] = self.feature_extractor __lowercase : Any = False
76
from string import ascii_lowercase, ascii_uppercase def UpperCAmelCase__ ( lowerCamelCase_ : str ): if not sentence: return "" __a : Union[str, Any] = dict(zip(lowerCamelCase_ , lowerCamelCase_ ) ) return lower_to_upper.get(sentence[0] , sentence[0] ) + sentence[1:] if __name__ == "__main__": from doctest import testmod testmod()
47
0
"""simple docstring""" import pytest import requests from datasets.utils.file_utils import http_head from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline @pytest.mark.integration def __lowerCAmelCase ( ): '''simple docstring''' with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ): with pytest.raises(lowerCamelCase_ ): requests.request("""GET""" , """https://huggingface.co""" ) with pytest.raises(requests.exceptions.ConnectTimeout ): requests.request("""GET""" , """https://huggingface.co""" , timeout=1.0 ) @pytest.mark.integration def __lowerCAmelCase ( ): '''simple docstring''' with offline(OfflineSimulationMode.CONNECTION_FAILS ): with pytest.raises(requests.exceptions.ConnectionError ): requests.request("""GET""" , """https://huggingface.co""" ) def __lowerCAmelCase ( ): '''simple docstring''' with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ): with pytest.raises(lowerCamelCase_ ): http_head("""https://huggingface.co""" )
58
import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ = { '''asapp/sew-d-tiny-100k''': '''https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json''', # See all SEW-D models at https://huggingface.co/models?filter=sew-d } class _UpperCamelCase( __lowerCamelCase ): __SCREAMING_SNAKE_CASE : Optional[Any] = '''sew-d''' def __init__( self : Dict , SCREAMING_SNAKE_CASE__ : Dict=3_2 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=7_6_8 , SCREAMING_SNAKE_CASE__ : Optional[Any]=1_2 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=1_2 , SCREAMING_SNAKE_CASE__ : str=3_0_7_2 , SCREAMING_SNAKE_CASE__ : str=2 , SCREAMING_SNAKE_CASE__ : Optional[Any]=5_1_2 , SCREAMING_SNAKE_CASE__ : List[str]=2_5_6 , SCREAMING_SNAKE_CASE__ : Optional[Any]=True , SCREAMING_SNAKE_CASE__ : Tuple=True , SCREAMING_SNAKE_CASE__ : List[str]=("p2c", "c2p") , SCREAMING_SNAKE_CASE__ : str="layer_norm" , SCREAMING_SNAKE_CASE__ : Tuple="gelu_python" , SCREAMING_SNAKE_CASE__ : Tuple=0.1 , SCREAMING_SNAKE_CASE__ : Optional[int]=0.1 , SCREAMING_SNAKE_CASE__ : Any=0.1 , SCREAMING_SNAKE_CASE__ : Any=0.0 , SCREAMING_SNAKE_CASE__ : str=0.1 , SCREAMING_SNAKE_CASE__ : Optional[int]=0.02 , SCREAMING_SNAKE_CASE__ : int=1e-7 , SCREAMING_SNAKE_CASE__ : Any=1e-5 , SCREAMING_SNAKE_CASE__ : Optional[int]="group" , SCREAMING_SNAKE_CASE__ : Optional[Any]="gelu" , SCREAMING_SNAKE_CASE__ : Optional[int]=(6_4, 1_2_8, 1_2_8, 1_2_8, 1_2_8, 2_5_6, 2_5_6, 2_5_6, 2_5_6, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , SCREAMING_SNAKE_CASE__ : List[Any]=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , SCREAMING_SNAKE_CASE__ : str=(1_0, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , SCREAMING_SNAKE_CASE__ : Optional[int]=False , SCREAMING_SNAKE_CASE__ : Optional[int]=1_2_8 , SCREAMING_SNAKE_CASE__ : Tuple=1_6 , SCREAMING_SNAKE_CASE__ : Any=True , SCREAMING_SNAKE_CASE__ : List[Any]=0.05 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=1_0 , SCREAMING_SNAKE_CASE__ : List[Any]=2 , SCREAMING_SNAKE_CASE__ : int=0.0 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=1_0 , SCREAMING_SNAKE_CASE__ : Optional[int]=0 , SCREAMING_SNAKE_CASE__ : Optional[int]="mean" , SCREAMING_SNAKE_CASE__ : List[Any]=False , SCREAMING_SNAKE_CASE__ : List[str]=False , SCREAMING_SNAKE_CASE__ : str=2_5_6 , SCREAMING_SNAKE_CASE__ : str=0 , SCREAMING_SNAKE_CASE__ : List[Any]=1 , SCREAMING_SNAKE_CASE__ : List[Any]=2 , **SCREAMING_SNAKE_CASE__ : Any , ): '''simple docstring''' super().__init__(**SCREAMING_SNAKE_CASE__ , pad_token_id=SCREAMING_SNAKE_CASE__ , bos_token_id=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ ) __a : Optional[int] = hidden_size __a : Optional[Any] = feat_extract_norm __a : List[str] = feat_extract_activation __a : Dict = list(SCREAMING_SNAKE_CASE__ ) __a : Union[str, Any] = list(SCREAMING_SNAKE_CASE__ ) __a : List[str] = list(SCREAMING_SNAKE_CASE__ ) __a : int = conv_bias __a : Tuple = num_conv_pos_embeddings __a : List[str] = num_conv_pos_embedding_groups __a : Optional[Any] = len(self.conv_dim ) __a : Union[str, Any] = num_hidden_layers __a : Optional[Any] = intermediate_size __a : Union[str, Any] = squeeze_factor __a : List[Any] = max_position_embeddings __a : Tuple = position_buckets __a : Optional[int] = share_att_key __a : List[str] = relative_attention __a : Any = norm_rel_ebd __a : Any = list(SCREAMING_SNAKE_CASE__ ) __a : Union[str, Any] = hidden_act __a : str = num_attention_heads __a : Union[str, Any] = hidden_dropout __a : Optional[int] = attention_dropout __a : List[str] = activation_dropout __a : int = feat_proj_dropout __a : int = final_dropout __a : Dict = layer_norm_eps __a : Tuple = feature_layer_norm_eps __a : str = initializer_range __a : Tuple = vocab_size if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( 'Configuration for convolutional layers is incorrect.' 'It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,' f'''but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)''' f'''= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 __a : Tuple = apply_spec_augment __a : Optional[Any] = mask_time_prob __a : Any = mask_time_length __a : List[str] = mask_time_min_masks __a : List[str] = mask_feature_prob __a : Tuple = mask_feature_length __a : Any = mask_feature_min_masks # ctc loss __a : Optional[int] = ctc_loss_reduction __a : List[Any] = ctc_zero_infinity # sequence classification __a : Dict = use_weighted_layer_sum __a : Optional[Any] = classifier_proj_size @property def __lowerCAmelCase ( self : Optional[Any] ): '''simple docstring''' return functools.reduce(operator.mul , self.conv_stride , 1 )
47
0