code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> str:
return " ".join(input_str.split()[::-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 350 | """simple docstring"""
import warnings
from diffusers import StableDiffusionImgaImgPipeline # noqa F401
warnings.warn(
"The `image_to_image.py` script is outdated. Please use directly `from diffusers import"
" StableDiffusionImg2ImgPipeline` instead."
)
| 2 | 0 |
"""simple docstring"""
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class UpperCAmelCase (A__ ):
"""simple docstring"""
_UpperCAmelCase :List[str] = ["image_processor", "tokenizer"]
_UpperCAmelCase :str = "ViltImageProcessor"
_UpperCAmelCase :str = ("BertTokenizer", "BertTokenizerFast")
def __init__( self , _UpperCAmelCase=None , _UpperCAmelCase=None , **_UpperCAmelCase ):
lowercase__: Any = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , __A , )
lowercase__: Optional[Any] = kwargs.pop('''feature_extractor''' )
lowercase__: Dict = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(__A , __A )
lowercase__: Optional[int] = self.image_processor
def __call__( self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = True , _UpperCAmelCase = False , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = 0 , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = False , _UpperCAmelCase = False , _UpperCAmelCase = False , _UpperCAmelCase = False , _UpperCAmelCase = True , _UpperCAmelCase = None , **_UpperCAmelCase , ):
lowercase__: int = self.tokenizer(
text=__A , add_special_tokens=__A , padding=__A , truncation=__A , max_length=__A , stride=__A , pad_to_multiple_of=__A , return_token_type_ids=__A , return_attention_mask=__A , return_overflowing_tokens=__A , return_special_tokens_mask=__A , return_offsets_mapping=__A , return_length=__A , verbose=__A , return_tensors=__A , **__A , )
# add pixel_values + pixel_mask
lowercase__: int = self.image_processor(__A , return_tensors=__A )
encoding.update(__A )
return encoding
def _snake_case ( self , *_UpperCAmelCase , **_UpperCAmelCase ):
return self.tokenizer.batch_decode(*__A , **__A )
def _snake_case ( self , *_UpperCAmelCase , **_UpperCAmelCase ):
return self.tokenizer.decode(*__A , **__A )
@property
def _snake_case ( self ):
lowercase__: Dict = self.tokenizer.model_input_names
lowercase__: Dict = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def _snake_case ( self ):
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , __A , )
return self.image_processor_class
@property
def _snake_case ( self ):
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , __A , )
return self.image_processor
| 351 | """simple docstring"""
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
"Salesforce/codegen-350M-nl": "https://huggingface.co/Salesforce/codegen-350M-nl/resolve/main/config.json",
"Salesforce/codegen-350M-multi": "https://huggingface.co/Salesforce/codegen-350M-multi/resolve/main/config.json",
"Salesforce/codegen-350M-mono": "https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/config.json",
"Salesforce/codegen-2B-nl": "https://huggingface.co/Salesforce/codegen-2B-nl/resolve/main/config.json",
"Salesforce/codegen-2B-multi": "https://huggingface.co/Salesforce/codegen-2B-multi/resolve/main/config.json",
"Salesforce/codegen-2B-mono": "https://huggingface.co/Salesforce/codegen-2B-mono/resolve/main/config.json",
"Salesforce/codegen-6B-nl": "https://huggingface.co/Salesforce/codegen-6B-nl/resolve/main/config.json",
"Salesforce/codegen-6B-multi": "https://huggingface.co/Salesforce/codegen-6B-multi/resolve/main/config.json",
"Salesforce/codegen-6B-mono": "https://huggingface.co/Salesforce/codegen-6B-mono/resolve/main/config.json",
"Salesforce/codegen-16B-nl": "https://huggingface.co/Salesforce/codegen-16B-nl/resolve/main/config.json",
"Salesforce/codegen-16B-multi": "https://huggingface.co/Salesforce/codegen-16B-multi/resolve/main/config.json",
"Salesforce/codegen-16B-mono": "https://huggingface.co/Salesforce/codegen-16B-mono/resolve/main/config.json",
}
class UpperCAmelCase (_UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase :List[str] = "codegen"
_UpperCAmelCase :Optional[int] = {
"max_position_embeddings": "n_positions",
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self , _UpperCAmelCase=50400 , _UpperCAmelCase=2048 , _UpperCAmelCase=2048 , _UpperCAmelCase=4096 , _UpperCAmelCase=28 , _UpperCAmelCase=16 , _UpperCAmelCase=64 , _UpperCAmelCase=None , _UpperCAmelCase="gelu_new" , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.0 , _UpperCAmelCase=1e-5 , _UpperCAmelCase=0.02 , _UpperCAmelCase=True , _UpperCAmelCase=50256 , _UpperCAmelCase=50256 , _UpperCAmelCase=False , **_UpperCAmelCase , ):
lowercase__: int = vocab_size
lowercase__: str = n_ctx
lowercase__: List[Any] = n_positions
lowercase__: Union[str, Any] = n_embd
lowercase__: Optional[Any] = n_layer
lowercase__: str = n_head
lowercase__: List[Any] = n_inner
lowercase__: Union[str, Any] = rotary_dim
lowercase__: Optional[Any] = activation_function
lowercase__: Union[str, Any] = resid_pdrop
lowercase__: Optional[int] = embd_pdrop
lowercase__: Optional[Any] = attn_pdrop
lowercase__: Optional[int] = layer_norm_epsilon
lowercase__: List[Any] = initializer_range
lowercase__: Tuple = use_cache
lowercase__: Any = bos_token_id
lowercase__: Any = eos_token_id
super().__init__(
bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , tie_word_embeddings=_UpperCAmelCase , **_UpperCAmelCase )
class UpperCAmelCase (_UpperCAmelCase ):
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase = "default" , _UpperCAmelCase = None , _UpperCAmelCase = False , ):
super().__init__(_UpperCAmelCase , task=_UpperCAmelCase , patching_specs=_UpperCAmelCase , use_past=_UpperCAmelCase )
if not getattr(self._config , '''pad_token_id''' , _UpperCAmelCase ):
# TODO: how to do that better?
lowercase__: Any = 0
@property
def _snake_case ( self ):
lowercase__: int = OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}} )
if self.use_past:
self.fill_with_past_key_values_(_UpperCAmelCase , direction='''inputs''' )
lowercase__: int = {0: '''batch''', 1: '''past_sequence + sequence'''}
else:
lowercase__: Tuple = {0: '''batch''', 1: '''sequence'''}
return common_inputs
@property
def _snake_case ( self ):
return self._config.n_layer
@property
def _snake_case ( self ):
return self._config.n_head
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase = -1 , _UpperCAmelCase = -1 , _UpperCAmelCase = False , _UpperCAmelCase = None , ):
lowercase__: Optional[int] = super(_UpperCAmelCase , self ).generate_dummy_inputs(
_UpperCAmelCase , batch_size=_UpperCAmelCase , seq_length=_UpperCAmelCase , is_pair=_UpperCAmelCase , framework=_UpperCAmelCase )
# We need to order the input in the way they appears in the forward()
lowercase__: List[Any] = OrderedDict({'''input_ids''': common_inputs['''input_ids''']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
lowercase__, lowercase__: Union[str, Any] = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
lowercase__: Any = seqlen + 2
lowercase__: List[str] = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
lowercase__: Optional[Any] = [
(torch.zeros(_UpperCAmelCase ), torch.zeros(_UpperCAmelCase )) for _ in range(self.num_layers )
]
lowercase__: Optional[Any] = common_inputs['''attention_mask''']
if self.use_past:
lowercase__: List[str] = ordered_inputs['''attention_mask'''].dtype
lowercase__: List[Any] = torch.cat(
[ordered_inputs['''attention_mask'''], torch.ones(_UpperCAmelCase , _UpperCAmelCase , dtype=_UpperCAmelCase )] , dim=1 )
return ordered_inputs
@property
def _snake_case ( self ):
return 13
| 2 | 0 |
"""simple docstring"""
import os
import re
import unicodedata
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import is_torch_available, logging
if is_torch_available():
import torch
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__A = logging.get_logger(__name__)
__A = {"vocab_file": "spiece.model"}
__A = {
"vocab_file": {
"AI-Sweden/gpt-sw3-126m": "https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model",
"AI-Sweden/gpt-sw3-350m": "https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model",
"AI-Sweden/gpt-sw3-1.6b": "https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model",
"AI-Sweden/gpt-sw3-6.7b": "https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model",
"AI-Sweden/gpt-sw3-20b": "https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model",
}
}
__A = {
"AI-Sweden/gpt-sw3-126m": 2_0_4_8,
"AI-Sweden/gpt-sw3-350m": 2_0_4_8,
"AI-Sweden/gpt-sw3-1.6b": 2_0_4_8,
"AI-Sweden/gpt-sw3-6.7b": 2_0_4_8,
"AI-Sweden/gpt-sw3-20b": 2_0_4_8,
}
class UpperCAmelCase (A__ ):
"""simple docstring"""
_UpperCAmelCase :Optional[int] = VOCAB_FILES_NAMES
_UpperCAmelCase :List[str] = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase :Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase :str = ["input_ids", "attention_mask"]
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase = None , **_UpperCAmelCase , ):
lowercase__: List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
lowercase__: Dict = kwargs.get('''name_or_path''' )
if name_or_path is None:
logger.warning(
'''name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,'''
''' you are testing the model, this can safely be ignored''' )
lowercase__: Optional[int] = '''None'''
# Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing
lowercase__: Any = '''<|endoftext|>''' if eos_token is None else eos_token
lowercase__: Union[str, Any] = '''<unk>''' if unk_token is None else unk_token
if "gpt-sw3-7b" in name_or_path:
lowercase__: List[str] = unk_token if pad_token is None else pad_token
lowercase__: Tuple = eos_token if bos_token is None else bos_token
else:
lowercase__: List[Any] = '''<pad>''' if pad_token is None else pad_token
lowercase__: Optional[int] = '''<s>''' if bos_token is None else bos_token
super().__init__(
do_lower_case=__snake_case , remove_space=__snake_case , keep_accents=__snake_case , bos_token=__snake_case , eos_token=__snake_case , unk_token=__snake_case , pad_token=__snake_case , sp_model_kwargs=self.sp_model_kwargs , **__snake_case , )
lowercase__: Optional[int] = do_lower_case
lowercase__: List[str] = remove_space
lowercase__: Any = keep_accents
lowercase__: int = vocab_file
lowercase__: List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__snake_case )
# Used for whitespace normalization in input texts
# fmt : off
lowercase__: Optional[Any] = {''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', '''''', ''''''}
# fmt : on
# Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing
lowercase__: Any = re.compile(
F"""[{''.join(map(__snake_case , list(range(0 , 9 ) ) + list(range(11 , 32 ) ) + list(range(127 , 160 ) ) + [160, 173, 8203] ) )}]""" )
def __getstate__( self ):
lowercase__: int = self.__dict__.copy()
lowercase__: Union[str, Any] = None
return state
def __setstate__( self , _UpperCAmelCase ):
lowercase__: Dict = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
lowercase__: Dict = {}
lowercase__: Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
@property
# Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size
def _snake_case ( self ):
return len(self.sp_model )
def _snake_case ( self , _UpperCAmelCase ):
lowercase__: Any = self.non_printing_characters_re.sub('''''' , __snake_case )
# Normalize whitespaces
lowercase__: str = ''''''.join([char if char not in self.whitespaces else ''' ''' for char in text] )
# NFC Unicode normalization
lowercase__: Tuple = unicodedata.normalize('''NFC''' , __snake_case )
return text
def _snake_case ( self , _UpperCAmelCase , **_UpperCAmelCase ):
lowercase__: Tuple = self.preprocess_text(__snake_case )
return self.sp_model.encode(__snake_case , out_type=__snake_case )
def _snake_case ( self , _UpperCAmelCase ):
return self.sp_model.PieceToId(__snake_case )
def _snake_case ( self , _UpperCAmelCase ):
return self.sp_model.IdToPiece(__snake_case )
@staticmethod
def _snake_case ( _UpperCAmelCase ):
return out_string
def _snake_case ( self , _UpperCAmelCase ):
lowercase__: List[Any] = []
lowercase__: Tuple = ''''''
lowercase__: Optional[int] = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
# TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__snake_case ) + token
lowercase__: Any = True
lowercase__: int = []
else:
current_sub_tokens.append(__snake_case )
lowercase__: int = False
out_string += self.sp_model.decode(__snake_case )
return out_string
def _snake_case ( self ):
lowercase__: List[str] = {self.convert_ids_to_tokens(__snake_case ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase = None ):
if not os.path.isdir(__snake_case ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowercase__: Optional[int] = os.path.join(
__snake_case , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__snake_case ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __snake_case )
elif not os.path.isfile(self.vocab_file ):
with open(__snake_case , '''wb''' ) as fi:
lowercase__: Optional[int] = self.sp_model.serialized_model_proto()
fi.write(__snake_case )
return (out_vocab_file,)
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase = False ):
if isinstance(__snake_case , __snake_case ):
lowercase__: Optional[int] = self.preprocess_text(__snake_case )
lowercase__: str = self.sp_model.encode(__snake_case )
else:
lowercase__: List[Any] = [self.preprocess_text(__snake_case ) for t in text]
lowercase__: Tuple = self.sp_model.encode(__snake_case )
if return_tensors is True or return_tensors == "pt":
lowercase__: List[str] = torch.tensor(__snake_case )
return token_ids
def _snake_case ( self , _UpperCAmelCase ):
return self.sp_model.decode(__snake_case )
def _snake_case ( self , _UpperCAmelCase ):
lowercase__: Optional[int] = [F"""User: {text}""" if is_user else F"""Bot: {text}""" for is_user, text in conversation.iter_texts()]
lowercase__: Tuple = (
F"""{self.eos_token}{self.bos_token}""" + F"""{self.bos_token}""".join(__snake_case ) + F"""{self.bos_token}Bot:"""
)
return self.encode(text=__snake_case )
| 352 | """simple docstring"""
from dataclasses import dataclass, field
from typing import Optional
from transformers import AutoConfig, AutoImageProcessor, AutoTokenizer, FlaxVisionEncoderDecoderModel, HfArgumentParser
@dataclass
class UpperCAmelCase :
"""simple docstring"""
_UpperCAmelCase :str = field(
metadata={"help": "The output directory where the model will be written."} ,)
_UpperCAmelCase :str = field(
metadata={
"help": (
"The encoder model checkpoint for weights initialization."
"Don't set if you want to train an encoder model from scratch."
)
} ,)
_UpperCAmelCase :str = field(
metadata={
"help": (
"The decoder model checkpoint for weights initialization."
"Don't set if you want to train a decoder model from scratch."
)
} ,)
_UpperCAmelCase :Optional[str] = field(
default=_UpperCAmelCase ,metadata={"help": "Pretrained encoder config name or path if not the same as encoder_model_name"} )
_UpperCAmelCase :Optional[str] = field(
default=_UpperCAmelCase ,metadata={"help": "Pretrained decoder config name or path if not the same as decoder_model_name"} )
def SCREAMING_SNAKE_CASE__ ( ) -> Tuple:
lowercase__: Dict = HfArgumentParser((ModelArguments,) )
((lowercase__), ): List[str] = parser.parse_args_into_dataclasses()
# Load pretrained model and tokenizer
# Use explicit specified encoder config
if model_args.encoder_config_name:
lowercase__: List[Any] = AutoConfig.from_pretrained(model_args.encoder_config_name )
# Use pretrained encoder model's config
else:
lowercase__: int = AutoConfig.from_pretrained(model_args.encoder_model_name_or_path )
# Use explicit specified decoder config
if model_args.decoder_config_name:
lowercase__: str = AutoConfig.from_pretrained(model_args.decoder_config_name )
# Use pretrained decoder model's config
else:
lowercase__: Union[str, Any] = AutoConfig.from_pretrained(model_args.decoder_model_name_or_path )
# necessary for `from_encoder_decoder_pretrained` when `decoder_config` is passed
lowercase__: Tuple = True
lowercase__: int = True
lowercase__: Any = FlaxVisionEncoderDecoderModel.from_encoder_decoder_pretrained(
encoder_pretrained_model_name_or_path=model_args.encoder_model_name_or_path , decoder_pretrained_model_name_or_path=model_args.decoder_model_name_or_path , encoder_config=__UpperCAmelCase , decoder_config=__UpperCAmelCase , )
# GPT2 only has bos/eos tokens but not decoder_start/pad tokens
lowercase__: int = decoder_config.decoder_start_token_id
lowercase__: Tuple = decoder_config.pad_token_id
if decoder_start_token_id is None:
lowercase__: Tuple = decoder_config.bos_token_id
if pad_token_id is None:
lowercase__: Optional[int] = decoder_config.eos_token_id
# This is necessary to make Flax's generate() work
lowercase__: Optional[Any] = decoder_config.eos_token_id
lowercase__: Tuple = decoder_start_token_id
lowercase__: Dict = pad_token_id
lowercase__: Optional[int] = AutoImageProcessor.from_pretrained(model_args.encoder_model_name_or_path )
lowercase__: Union[str, Any] = AutoTokenizer.from_pretrained(model_args.decoder_model_name_or_path )
lowercase__: Tuple = tokenizer.convert_ids_to_tokens(model.config.pad_token_id )
model.save_pretrained(model_args.output_dir )
image_processor.save_pretrained(model_args.output_dir )
tokenizer.save_pretrained(model_args.output_dir )
if __name__ == "__main__":
main()
| 2 | 0 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_nllb import NllbTokenizer
else:
__A = None
__A = logging.get_logger(__name__)
__A = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"}
__A = {
"vocab_file": {
"facebook/nllb-200-distilled-600M": (
"https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model"
),
},
"tokenizer_file": {
"facebook/nllb-200-distilled-600M": (
"https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json"
),
},
}
__A = {
"facebook/nllb-large-en-ro": 1_0_2_4,
"facebook/nllb-200-distilled-600M": 1_0_2_4,
}
# fmt: off
__A = ["ace_Arab", "ace_Latn", "acm_Arab", "acq_Arab", "aeb_Arab", "afr_Latn", "ajp_Arab", "aka_Latn", "amh_Ethi", "apc_Arab", "arb_Arab", "ars_Arab", "ary_Arab", "arz_Arab", "asm_Beng", "ast_Latn", "awa_Deva", "ayr_Latn", "azb_Arab", "azj_Latn", "bak_Cyrl", "bam_Latn", "ban_Latn", "bel_Cyrl", "bem_Latn", "ben_Beng", "bho_Deva", "bjn_Arab", "bjn_Latn", "bod_Tibt", "bos_Latn", "bug_Latn", "bul_Cyrl", "cat_Latn", "ceb_Latn", "ces_Latn", "cjk_Latn", "ckb_Arab", "crh_Latn", "cym_Latn", "dan_Latn", "deu_Latn", "dik_Latn", "dyu_Latn", "dzo_Tibt", "ell_Grek", "eng_Latn", "epo_Latn", "est_Latn", "eus_Latn", "ewe_Latn", "fao_Latn", "pes_Arab", "fij_Latn", "fin_Latn", "fon_Latn", "fra_Latn", "fur_Latn", "fuv_Latn", "gla_Latn", "gle_Latn", "glg_Latn", "grn_Latn", "guj_Gujr", "hat_Latn", "hau_Latn", "heb_Hebr", "hin_Deva", "hne_Deva", "hrv_Latn", "hun_Latn", "hye_Armn", "ibo_Latn", "ilo_Latn", "ind_Latn", "isl_Latn", "ita_Latn", "jav_Latn", "jpn_Jpan", "kab_Latn", "kac_Latn", "kam_Latn", "kan_Knda", "kas_Arab", "kas_Deva", "kat_Geor", "knc_Arab", "knc_Latn", "kaz_Cyrl", "kbp_Latn", "kea_Latn", "khm_Khmr", "kik_Latn", "kin_Latn", "kir_Cyrl", "kmb_Latn", "kon_Latn", "kor_Hang", "kmr_Latn", "lao_Laoo", "lvs_Latn", "lij_Latn", "lim_Latn", "lin_Latn", "lit_Latn", "lmo_Latn", "ltg_Latn", "ltz_Latn", "lua_Latn", "lug_Latn", "luo_Latn", "lus_Latn", "mag_Deva", "mai_Deva", "mal_Mlym", "mar_Deva", "min_Latn", "mkd_Cyrl", "plt_Latn", "mlt_Latn", "mni_Beng", "khk_Cyrl", "mos_Latn", "mri_Latn", "zsm_Latn", "mya_Mymr", "nld_Latn", "nno_Latn", "nob_Latn", "npi_Deva", "nso_Latn", "nus_Latn", "nya_Latn", "oci_Latn", "gaz_Latn", "ory_Orya", "pag_Latn", "pan_Guru", "pap_Latn", "pol_Latn", "por_Latn", "prs_Arab", "pbt_Arab", "quy_Latn", "ron_Latn", "run_Latn", "rus_Cyrl", "sag_Latn", "san_Deva", "sat_Beng", "scn_Latn", "shn_Mymr", "sin_Sinh", "slk_Latn", "slv_Latn", "smo_Latn", "sna_Latn", "snd_Arab", "som_Latn", "sot_Latn", "spa_Latn", "als_Latn", "srd_Latn", "srp_Cyrl", "ssw_Latn", "sun_Latn", "swe_Latn", "swh_Latn", "szl_Latn", "tam_Taml", "tat_Cyrl", "tel_Telu", "tgk_Cyrl", "tgl_Latn", "tha_Thai", "tir_Ethi", "taq_Latn", "taq_Tfng", "tpi_Latn", "tsn_Latn", "tso_Latn", "tuk_Latn", "tum_Latn", "tur_Latn", "twi_Latn", "tzm_Tfng", "uig_Arab", "ukr_Cyrl", "umb_Latn", "urd_Arab", "uzn_Latn", "vec_Latn", "vie_Latn", "war_Latn", "wol_Latn", "xho_Latn", "ydd_Hebr", "yor_Latn", "yue_Hant", "zho_Hans", "zho_Hant", "zul_Latn"]
class UpperCAmelCase (a__ ):
"""simple docstring"""
_UpperCAmelCase :Optional[Any] = VOCAB_FILES_NAMES
_UpperCAmelCase :int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase :Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase :Optional[int] = ["input_ids", "attention_mask"]
_UpperCAmelCase :Any = NllbTokenizer
_UpperCAmelCase :List[Any] = []
_UpperCAmelCase :Dict = []
def __init__( self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase="<s>" , _UpperCAmelCase="</s>" , _UpperCAmelCase="</s>" , _UpperCAmelCase="<s>" , _UpperCAmelCase="<unk>" , _UpperCAmelCase="<pad>" , _UpperCAmelCase="<mask>" , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=False , **_UpperCAmelCase , ):
lowercase__: Dict = AddedToken(_lowerCamelCase , lstrip=_lowerCamelCase , rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase , _lowerCamelCase ) else mask_token
lowercase__: Union[str, Any] = legacy_behaviour
super().__init__(
vocab_file=_lowerCamelCase , tokenizer_file=_lowerCamelCase , bos_token=_lowerCamelCase , eos_token=_lowerCamelCase , sep_token=_lowerCamelCase , cls_token=_lowerCamelCase , unk_token=_lowerCamelCase , pad_token=_lowerCamelCase , mask_token=_lowerCamelCase , src_lang=_lowerCamelCase , tgt_lang=_lowerCamelCase , additional_special_tokens=_lowerCamelCase , legacy_behaviour=_lowerCamelCase , **_lowerCamelCase , )
lowercase__: str = vocab_file
lowercase__: int = False if not self.vocab_file else True
lowercase__: Any = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({'''additional_special_tokens''': _additional_special_tokens} )
lowercase__: str = {
lang_code: self.convert_tokens_to_ids(_lowerCamelCase ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
lowercase__: Any = src_lang if src_lang is not None else '''eng_Latn'''
lowercase__: Tuple = self.convert_tokens_to_ids(self._src_lang )
lowercase__: Union[str, Any] = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def _snake_case ( self ):
return self._src_lang
@src_lang.setter
def _snake_case ( self , _UpperCAmelCase ):
lowercase__: Dict = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase = None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase = None ):
lowercase__: Dict = [self.sep_token_id]
lowercase__: List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ):
if src_lang is None or tgt_lang is None:
raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' )
lowercase__: List[Any] = src_lang
lowercase__: List[Any] = self(_lowerCamelCase , add_special_tokens=_lowerCamelCase , return_tensors=_lowerCamelCase , **_lowerCamelCase )
lowercase__: str = self.convert_tokens_to_ids(_lowerCamelCase )
lowercase__: Tuple = tgt_lang_id
return inputs
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase = "eng_Latn" , _UpperCAmelCase = None , _UpperCAmelCase = "fra_Latn" , **_UpperCAmelCase , ):
lowercase__: Dict = src_lang
lowercase__: Any = tgt_lang
return super().prepare_seqaseq_batch(_lowerCamelCase , _lowerCamelCase , **_lowerCamelCase )
def _snake_case ( self ):
return self.set_src_lang_special_tokens(self.src_lang )
def _snake_case ( self ):
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def _snake_case ( self , _UpperCAmelCase ):
lowercase__: Optional[int] = self.convert_tokens_to_ids(_lowerCamelCase )
if self.legacy_behaviour:
lowercase__: Tuple = []
lowercase__: int = [self.eos_token_id, self.cur_lang_code]
else:
lowercase__: Any = [self.cur_lang_code]
lowercase__: str = [self.eos_token_id]
lowercase__: Dict = self.convert_ids_to_tokens(self.prefix_tokens )
lowercase__: str = self.convert_ids_to_tokens(self.suffix_tokens )
lowercase__: Tuple = processors.TemplateProcessing(
single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def _snake_case ( self , _UpperCAmelCase ):
lowercase__: str = self.convert_tokens_to_ids(_lowerCamelCase )
if self.legacy_behaviour:
lowercase__: Tuple = []
lowercase__: Optional[Any] = [self.eos_token_id, self.cur_lang_code]
else:
lowercase__: Dict = [self.cur_lang_code]
lowercase__: Tuple = [self.eos_token_id]
lowercase__: List[Any] = self.convert_ids_to_tokens(self.prefix_tokens )
lowercase__: int = self.convert_ids_to_tokens(self.suffix_tokens )
lowercase__: Union[str, Any] = processors.TemplateProcessing(
single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(_lowerCamelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory.""" )
return
lowercase__: Any = os.path.join(
_lowerCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCamelCase ):
copyfile(self.vocab_file , _lowerCamelCase )
return (out_vocab_file,)
| 353 | """simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {"ctrl": "https://huggingface.co/ctrl/resolve/main/config.json"}
class UpperCAmelCase (_UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase :Optional[Any] = "ctrl"
_UpperCAmelCase :int = ["past_key_values"]
_UpperCAmelCase :Dict = {
"max_position_embeddings": "n_positions",
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self , _UpperCAmelCase=246534 , _UpperCAmelCase=256 , _UpperCAmelCase=1280 , _UpperCAmelCase=8192 , _UpperCAmelCase=48 , _UpperCAmelCase=16 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=1e-6 , _UpperCAmelCase=0.02 , _UpperCAmelCase=True , **_UpperCAmelCase , ):
lowercase__: Union[str, Any] = vocab_size
lowercase__: Optional[int] = n_positions
lowercase__: Optional[int] = n_embd
lowercase__: Any = n_layer
lowercase__: Any = n_head
lowercase__: int = dff
lowercase__: Dict = resid_pdrop
lowercase__: Any = embd_pdrop
lowercase__: Any = layer_norm_epsilon
lowercase__: Optional[int] = initializer_range
lowercase__: Dict = use_cache
super().__init__(**_UpperCAmelCase )
| 2 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
"google/vivit-b-16x2-kinetics400": (
"https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json"
),
# See all Vivit models at https://huggingface.co/models?filter=vivit
}
class UpperCAmelCase (lowercase__ ):
"""simple docstring"""
_UpperCAmelCase :List[Any] = '''vivit'''
def __init__( self , _UpperCAmelCase=224 , _UpperCAmelCase=32 , _UpperCAmelCase=[2, 16, 16] , _UpperCAmelCase=3 , _UpperCAmelCase=768 , _UpperCAmelCase=12 , _UpperCAmelCase=12 , _UpperCAmelCase=3072 , _UpperCAmelCase="gelu_fast" , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.02 , _UpperCAmelCase=1e-0_6 , _UpperCAmelCase=True , **_UpperCAmelCase , ):
lowercase__: Any = hidden_size
lowercase__: Tuple = num_hidden_layers
lowercase__: Tuple = num_attention_heads
lowercase__: Tuple = intermediate_size
lowercase__: List[str] = hidden_act
lowercase__: int = hidden_dropout_prob
lowercase__: Any = attention_probs_dropout_prob
lowercase__: Union[str, Any] = initializer_range
lowercase__: Any = layer_norm_eps
lowercase__: Dict = image_size
lowercase__: Optional[int] = num_frames
lowercase__: str = tubelet_size
lowercase__: Dict = num_channels
lowercase__: Optional[Any] = qkv_bias
super().__init__(**_a )
| 354 | """simple docstring"""
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase = 5_0 ) -> int:
lowercase__: str = [[0] * 3 for _ in range(length + 1 )]
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
different_colour_ways_number[row_length][tile_length - 2] += (
different_colour_ways_number[row_length - tile_start - tile_length][
tile_length - 2
]
+ 1
)
return sum(different_colour_ways_number[length] )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 2 | 0 |
"""simple docstring"""
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
__A = """\
@misc{wu2016googles,
title={Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},
author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey
and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin
Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto
Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and
Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes
and Jeffrey Dean},
year={2016},
eprint={1609.08144},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
"""
__A = """\
The BLEU score has some undesirable properties when used for single
sentences, as it was designed to be a corpus measure. We therefore
use a slightly different score for our RL experiments which we call
the 'GLEU score'. For the GLEU score, we record all sub-sequences of
1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then
compute a recall, which is the ratio of the number of matching n-grams
to the number of total n-grams in the target (ground truth) sequence,
and a precision, which is the ratio of the number of matching n-grams
to the number of total n-grams in the generated output sequence. Then
GLEU score is simply the minimum of recall and precision. This GLEU
score's range is always between 0 (no matches) and 1 (all match) and
it is symmetrical when switching output and target. According to
our experiments, GLEU score correlates quite well with the BLEU
metric on a corpus level but does not have its drawbacks for our per
sentence reward objective.
"""
__A = """\
Computes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.
Instead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching
tokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.
Args:
predictions (list of str): list of translations to score.
Each translation should be tokenized into a list of tokens.
references (list of list of str): list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.
max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.
Returns:
'google_bleu': google_bleu score
Examples:
Example 1:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results[\"google_bleu\"], 2))
0.44
Example 2:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',
... 'heed', 'the', 'cat', 'commands']
>>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results[\"google_bleu\"], 2))
0.61
Example 3:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',
... 'heed', 'the', 'cat', 'commands']
>>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)
>>> print(round(results[\"google_bleu\"], 2))
0.53
Example 4:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',
... 'heed', 'the', 'cat', 'commands']
>>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)
>>> print(round(results[\"google_bleu\"], 2))
0.4
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class UpperCAmelCase (datasets.Metric ):
"""simple docstring"""
def _snake_case ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''string''' , id='''token''' ) , id='''sequence''' ),
'''references''': datasets.Sequence(
datasets.Sequence(datasets.Value('''string''' , id='''token''' ) , id='''sequence''' ) , id='''references''' ),
} ) , )
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = 1 , _UpperCAmelCase = 4 , ):
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=_UpperCAmelCase , hypotheses=_UpperCAmelCase , min_len=_UpperCAmelCase , max_len=_UpperCAmelCase )
}
| 355 | """simple docstring"""
import pickle
import numpy as np
from matplotlib import pyplot as plt
class UpperCAmelCase :
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=0.2 , _UpperCAmelCase=0.2 ):
lowercase__: int = bp_numa
lowercase__: Union[str, Any] = bp_numa
lowercase__: List[str] = bp_numa
lowercase__: str = conva_get[:2]
lowercase__: Union[str, Any] = conva_get[2]
lowercase__: Any = size_pa
lowercase__: Optional[Any] = rate_w
lowercase__: Tuple = rate_t
lowercase__: List[str] = [
np.mat(-1 * np.random.rand(self.conva[0] , self.conva[0] ) + 0.5 )
for i in range(self.conva[1] )
]
lowercase__: Dict = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 )
lowercase__: str = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 )
lowercase__: Union[str, Any] = -2 * np.random.rand(self.conva[1] ) + 1
lowercase__: Any = -2 * np.random.rand(self.num_bpa ) + 1
lowercase__: Any = -2 * np.random.rand(self.num_bpa ) + 1
def _snake_case ( self , _UpperCAmelCase ):
# save model dict with pickle
lowercase__: int = {
'''num_bp1''': self.num_bpa,
'''num_bp2''': self.num_bpa,
'''num_bp3''': self.num_bpa,
'''conv1''': self.conva,
'''step_conv1''': self.step_conva,
'''size_pooling1''': self.size_poolinga,
'''rate_weight''': self.rate_weight,
'''rate_thre''': self.rate_thre,
'''w_conv1''': self.w_conva,
'''wkj''': self.wkj,
'''vji''': self.vji,
'''thre_conv1''': self.thre_conva,
'''thre_bp2''': self.thre_bpa,
'''thre_bp3''': self.thre_bpa,
}
with open(_UpperCAmelCase , '''wb''' ) as f:
pickle.dump(_UpperCAmelCase , _UpperCAmelCase )
print(F"""Model saved: {save_path}""" )
@classmethod
def _snake_case ( cls , _UpperCAmelCase ):
# read saved model
with open(_UpperCAmelCase , '''rb''' ) as f:
lowercase__: Optional[int] = pickle.load(_UpperCAmelCase ) # noqa: S301
lowercase__: Tuple = model_dic.get('''conv1''' )
conv_get.append(model_dic.get('''step_conv1''' ) )
lowercase__: Any = model_dic.get('''size_pooling1''' )
lowercase__: int = model_dic.get('''num_bp1''' )
lowercase__: Optional[int] = model_dic.get('''num_bp2''' )
lowercase__: str = model_dic.get('''num_bp3''' )
lowercase__: Any = model_dic.get('''rate_weight''' )
lowercase__: Union[str, Any] = model_dic.get('''rate_thre''' )
# create model instance
lowercase__: str = CNN(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# modify model parameter
lowercase__: Dict = model_dic.get('''w_conv1''' )
lowercase__: Dict = model_dic.get('''wkj''' )
lowercase__: str = model_dic.get('''vji''' )
lowercase__: List[Any] = model_dic.get('''thre_conv1''' )
lowercase__: Optional[int] = model_dic.get('''thre_bp2''' )
lowercase__: Tuple = model_dic.get('''thre_bp3''' )
return conv_ins
def _snake_case ( self , _UpperCAmelCase ):
return 1 / (1 + np.exp(-1 * x ))
def _snake_case ( self , _UpperCAmelCase ):
return round(_UpperCAmelCase , 3 )
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
# convolution process
lowercase__: Any = convs[0]
lowercase__: Tuple = convs[1]
lowercase__: List[Any] = np.shape(_UpperCAmelCase )[0]
# get the data slice of original image data, data_focus
lowercase__: List[Any] = []
for i_focus in range(0 , size_data - size_conv + 1 , _UpperCAmelCase ):
for j_focus in range(0 , size_data - size_conv + 1 , _UpperCAmelCase ):
lowercase__: Tuple = data[
i_focus : i_focus + size_conv, j_focus : j_focus + size_conv
]
data_focus.append(_UpperCAmelCase )
# calculate the feature map of every single kernel, and saved as list of matrix
lowercase__: Optional[int] = []
lowercase__: Optional[int] = int((size_data - size_conv) / conv_step + 1 )
for i_map in range(_UpperCAmelCase ):
lowercase__: str = []
for i_focus in range(len(_UpperCAmelCase ) ):
lowercase__: Any = (
np.sum(np.multiply(data_focus[i_focus] , w_convs[i_map] ) )
- thre_convs[i_map]
)
featuremap.append(self.sig(_UpperCAmelCase ) )
lowercase__: str = np.asmatrix(_UpperCAmelCase ).reshape(
_UpperCAmelCase , _UpperCAmelCase )
data_featuremap.append(_UpperCAmelCase )
# expanding the data slice to One dimenssion
lowercase__: Union[str, Any] = []
for each_focus in data_focus:
focusa_list.extend(self.Expand_Mat(_UpperCAmelCase ) )
lowercase__: Any = np.asarray(_UpperCAmelCase )
return focus_list, data_featuremap
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase="average_pool" ):
# pooling process
lowercase__: List[Any] = len(featuremaps[0] )
lowercase__: Any = int(size_map / size_pooling )
lowercase__: List[Any] = []
for i_map in range(len(_UpperCAmelCase ) ):
lowercase__: Any = featuremaps[i_map]
lowercase__: Tuple = []
for i_focus in range(0 , _UpperCAmelCase , _UpperCAmelCase ):
for j_focus in range(0 , _UpperCAmelCase , _UpperCAmelCase ):
lowercase__: Optional[Any] = feature_map[
i_focus : i_focus + size_pooling,
j_focus : j_focus + size_pooling,
]
if pooling_type == "average_pool":
# average pooling
map_pooled.append(np.average(_UpperCAmelCase ) )
elif pooling_type == "max_pooling":
# max pooling
map_pooled.append(np.max(_UpperCAmelCase ) )
lowercase__: str = np.asmatrix(_UpperCAmelCase ).reshape(_UpperCAmelCase , _UpperCAmelCase )
featuremap_pooled.append(_UpperCAmelCase )
return featuremap_pooled
def _snake_case ( self , _UpperCAmelCase ):
# expanding three dimension data to one dimension list
lowercase__: Optional[Any] = []
for i in range(len(_UpperCAmelCase ) ):
lowercase__: Any = np.shape(data[i] )
lowercase__: List[Any] = data[i].reshape(1 , shapes[0] * shapes[1] )
lowercase__: List[str] = data_listed.getA().tolist()[0]
data_expanded.extend(_UpperCAmelCase )
lowercase__: List[str] = np.asarray(_UpperCAmelCase )
return data_expanded
def _snake_case ( self , _UpperCAmelCase ):
# expanding matrix to one dimension list
lowercase__: Union[str, Any] = np.asarray(_UpperCAmelCase )
lowercase__: List[str] = np.shape(_UpperCAmelCase )
lowercase__: List[Any] = data_mat.reshape(1 , shapes[0] * shapes[1] )
return data_expanded
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
lowercase__: str = []
lowercase__: List[str] = 0
for i_map in range(_UpperCAmelCase ):
lowercase__: Union[str, Any] = np.ones((size_map, size_map) )
for i in range(0 , _UpperCAmelCase , _UpperCAmelCase ):
for j in range(0 , _UpperCAmelCase , _UpperCAmelCase ):
lowercase__: Optional[Any] = pd_pool[
i_pool
]
lowercase__: List[Any] = i_pool + 1
lowercase__: str = np.multiply(
_UpperCAmelCase , np.multiply(out_map[i_map] , (1 - out_map[i_map]) ) )
pd_all.append(_UpperCAmelCase )
return pd_all
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=bool ):
# model traning
print('''----------------------Start Training-------------------------''' )
print((''' - - Shape: Train_Data ''', np.shape(_UpperCAmelCase )) )
print((''' - - Shape: Teach_Data ''', np.shape(_UpperCAmelCase )) )
lowercase__: Tuple = 0
lowercase__: Tuple = []
lowercase__: Optional[int] = 10000
while rp < n_repeat and mse >= error_accuracy:
lowercase__: Tuple = 0
print(F"""-------------Learning Time {rp}--------------""" )
for p in range(len(_UpperCAmelCase ) ):
# print('------------Learning Image: %d--------------'%p)
lowercase__: List[Any] = np.asmatrix(datas_train[p] )
lowercase__: Optional[int] = np.asarray(datas_teach[p] )
lowercase__, lowercase__: List[str] = self.convolute(
_UpperCAmelCase , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
lowercase__: Optional[int] = self.pooling(_UpperCAmelCase , self.size_poolinga )
lowercase__: int = np.shape(_UpperCAmelCase )
lowercase__: Optional[Any] = self._expand(_UpperCAmelCase )
lowercase__: Any = data_bp_input
lowercase__: Any = np.dot(_UpperCAmelCase , self.vji.T ) - self.thre_bpa
lowercase__: str = self.sig(_UpperCAmelCase )
lowercase__: Optional[Any] = np.dot(_UpperCAmelCase , self.wkj.T ) - self.thre_bpa
lowercase__: Dict = self.sig(_UpperCAmelCase )
# --------------Model Leaning ------------------------
# calculate error and gradient---------------
lowercase__: str = np.multiply(
(data_teach - bp_outa) , np.multiply(_UpperCAmelCase , (1 - bp_outa) ) )
lowercase__: str = np.multiply(
np.dot(_UpperCAmelCase , self.wkj ) , np.multiply(_UpperCAmelCase , (1 - bp_outa) ) )
lowercase__: Dict = np.dot(_UpperCAmelCase , self.vji )
lowercase__: Any = pd_i_all / (self.size_poolinga * self.size_poolinga)
lowercase__: List[str] = pd_conva_pooled.T.getA().tolist()
lowercase__: Optional[Any] = self._calculate_gradient_from_pool(
_UpperCAmelCase , _UpperCAmelCase , shape_featuremapa[0] , shape_featuremapa[1] , self.size_poolinga , )
# weight and threshold learning process---------
# convolution layer
for k_conv in range(self.conva[1] ):
lowercase__: str = self._expand_mat(pd_conva_all[k_conv] )
lowercase__: str = self.rate_weight * np.dot(_UpperCAmelCase , _UpperCAmelCase )
lowercase__: Any = self.w_conva[k_conv] + delta_w.reshape(
(self.conva[0], self.conva[0]) )
lowercase__: List[Any] = (
self.thre_conva[k_conv]
- np.sum(pd_conva_all[k_conv] ) * self.rate_thre
)
# all connected layer
lowercase__: Optional[int] = self.wkj + pd_k_all.T * bp_outa * self.rate_weight
lowercase__: List[Any] = self.vji + pd_j_all.T * bp_outa * self.rate_weight
lowercase__: List[str] = self.thre_bpa - pd_k_all * self.rate_thre
lowercase__: Optional[Any] = self.thre_bpa - pd_j_all * self.rate_thre
# calculate the sum error of all single image
lowercase__: Optional[Any] = np.sum(abs(data_teach - bp_outa ) )
error_count += errors
# print(' ----Teach ',data_teach)
# print(' ----BP_output ',bp_out3)
lowercase__: str = rp + 1
lowercase__: Optional[Any] = error_count / patterns
all_mse.append(_UpperCAmelCase )
def draw_error():
lowercase__: Union[str, Any] = [error_accuracy for i in range(int(n_repeat * 1.2 ) )]
plt.plot(_UpperCAmelCase , '''+-''' )
plt.plot(_UpperCAmelCase , '''r--''' )
plt.xlabel('''Learning Times''' )
plt.ylabel('''All_mse''' )
plt.grid(_UpperCAmelCase , alpha=0.5 )
plt.show()
print('''------------------Training Complished---------------------''' )
print((''' - - Training epoch: ''', rp, F""" - - Mse: {mse:.6f}""") )
if draw_e:
draw_error()
return mse
def _snake_case ( self , _UpperCAmelCase ):
# model predict
lowercase__: Union[str, Any] = []
print('''-------------------Start Testing-------------------------''' )
print((''' - - Shape: Test_Data ''', np.shape(_UpperCAmelCase )) )
for p in range(len(_UpperCAmelCase ) ):
lowercase__: Union[str, Any] = np.asmatrix(datas_test[p] )
lowercase__, lowercase__: Any = self.convolute(
_UpperCAmelCase , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
lowercase__: List[str] = self.pooling(_UpperCAmelCase , self.size_poolinga )
lowercase__: str = self._expand(_UpperCAmelCase )
lowercase__: List[Any] = data_bp_input
lowercase__: List[str] = bp_outa * self.vji.T - self.thre_bpa
lowercase__: Any = self.sig(_UpperCAmelCase )
lowercase__: Optional[int] = bp_outa * self.wkj.T - self.thre_bpa
lowercase__: Any = self.sig(_UpperCAmelCase )
produce_out.extend(bp_outa.getA().tolist() )
lowercase__: str = [list(map(self.do_round , _UpperCAmelCase ) ) for each in produce_out]
return np.asarray(_UpperCAmelCase )
def _snake_case ( self , _UpperCAmelCase ):
# return the data of image after convoluting process so we can check it out
lowercase__: int = np.asmatrix(_UpperCAmelCase )
lowercase__, lowercase__: Optional[int] = self.convolute(
_UpperCAmelCase , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
lowercase__: List[Any] = self.pooling(_UpperCAmelCase , self.size_poolinga )
return data_conveda, data_pooleda
if __name__ == "__main__":
pass
| 2 | 0 |
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase = 0 ) -> list:
lowercase__: int = length or len(__A )
lowercase__: List[str] = False
for i in range(length - 1 ):
if list_data[i] > list_data[i + 1]:
lowercase__, lowercase__: int = list_data[i + 1], list_data[i]
lowercase__: List[str] = True
return list_data if not swapped else bubble_sort(__A , length - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 356 | """simple docstring"""
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class UpperCAmelCase (_UpperCAmelCase ,unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase :Union[str, Any] = CTRLTokenizer
_UpperCAmelCase :Any = False
_UpperCAmelCase :List[Any] = False
def _snake_case ( self ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowercase__: Dict = ['''adapt''', '''re@@''', '''a@@''', '''apt''', '''c@@''', '''t''', '''<unk>''']
lowercase__: Any = dict(zip(_UpperCAmelCase , range(len(_UpperCAmelCase ) ) ) )
lowercase__: Optional[int] = ['''#version: 0.2''', '''a p''', '''ap t</w>''', '''r e''', '''a d''', '''ad apt</w>''', '''''']
lowercase__: Optional[Any] = {'''unk_token''': '''<unk>'''}
lowercase__: Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
lowercase__: int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_UpperCAmelCase ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(_UpperCAmelCase ) )
def _snake_case ( self , **_UpperCAmelCase ):
kwargs.update(self.special_tokens_map )
return CTRLTokenizer.from_pretrained(self.tmpdirname , **_UpperCAmelCase )
def _snake_case ( self , _UpperCAmelCase ):
lowercase__: Optional[int] = '''adapt react readapt apt'''
lowercase__: Optional[int] = '''adapt react readapt apt'''
return input_text, output_text
def _snake_case ( self ):
lowercase__: List[str] = CTRLTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
lowercase__: Optional[int] = '''adapt react readapt apt'''
lowercase__: Any = '''adapt re@@ a@@ c@@ t re@@ adapt apt'''.split()
lowercase__: Optional[Any] = tokenizer.tokenize(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
lowercase__: int = tokens + [tokenizer.unk_token]
lowercase__: str = [0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) , _UpperCAmelCase )
| 2 | 0 |
"""simple docstring"""
import numpy as np
from transformers import Pipeline
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> List[str]:
lowercase__: List[str] = np.max(lowerCamelCase_ , axis=-1 , keepdims=lowerCamelCase_ )
lowercase__: List[Any] = np.exp(outputs - maxes )
return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=lowerCamelCase_ )
class UpperCAmelCase (lowercase__ ):
"""simple docstring"""
def _snake_case ( self , **_UpperCAmelCase ):
lowercase__: Any = {}
if "second_text" in kwargs:
lowercase__: Tuple = kwargs["""second_text"""]
return preprocess_kwargs, {}, {}
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase=None ):
return self.tokenizer(_UpperCamelCase , text_pair=_UpperCamelCase , return_tensors=self.framework )
def _snake_case ( self , _UpperCAmelCase ):
return self.model(**_UpperCamelCase )
def _snake_case ( self , _UpperCAmelCase ):
lowercase__: Dict = model_outputs.logits[0].numpy()
lowercase__: Optional[int] = softmax(_UpperCamelCase )
lowercase__: Dict = np.argmax(_UpperCamelCase )
lowercase__: Any = self.model.config.idalabel[best_class]
lowercase__: Tuple = probabilities[best_class].item()
lowercase__: Union[str, Any] = logits.tolist()
return {"label": label, "score": score, "logits": logits}
| 357 | """simple docstring"""
import os
import re
import shutil
from argparse import ArgumentParser, Namespace
from datasets.commands import BaseDatasetsCLICommand
from datasets.utils.logging import get_logger
__A = "<<<<<<< This should probably be modified because it mentions: "
__A = "=======\n>>>>>>>\n"
__A = [
"TextEncoderConfig",
"ByteTextEncoder",
"SubwordTextEncoder",
"encoder_config",
"maybe_build_from_corpus",
"manual_dir",
]
__A = [
# (pattern, replacement)
# Order is important here for some replacements
(R"tfds\.core", R"datasets"),
(R"tf\.io\.gfile\.GFile", R"open"),
(R"tf\.([\w\d]+)", R"datasets.Value('\1')"),
(R"tfds\.features\.Text\(\)", R"datasets.Value('string')"),
(R"tfds\.features\.Text\(", R"datasets.Value('string'),"),
(R"features\s*=\s*tfds.features.FeaturesDict\(", R"features=datasets.Features("),
(R"tfds\.features\.FeaturesDict\(", R"dict("),
(R"The TensorFlow Datasets Authors", R"The TensorFlow Datasets Authors and the HuggingFace Datasets Authors"),
(R"tfds\.", R"datasets."),
(R"dl_manager\.manual_dir", R"self.config.data_dir"),
(R"self\.builder_config", R"self.config"),
]
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> Tuple:
return ConvertCommand(args.tfds_path , args.datasets_directory )
class UpperCAmelCase (_UpperCAmelCase ):
"""simple docstring"""
@staticmethod
def _snake_case ( _UpperCAmelCase ):
lowercase__: int = parser.add_parser(
'''convert''' , help='''Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset.''' , )
train_parser.add_argument(
'''--tfds_path''' , type=_UpperCAmelCase , required=_UpperCAmelCase , help='''Path to a TensorFlow Datasets folder to convert or a single tfds file to convert.''' , )
train_parser.add_argument(
'''--datasets_directory''' , type=_UpperCAmelCase , required=_UpperCAmelCase , help='''Path to the HuggingFace Datasets folder.''' )
train_parser.set_defaults(func=_UpperCAmelCase )
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , *_UpperCAmelCase ):
lowercase__: List[str] = get_logger('''datasets-cli/converting''' )
lowercase__: Optional[Any] = tfds_path
lowercase__: Dict = datasets_directory
def _snake_case ( self ):
if os.path.isdir(self._tfds_path ):
lowercase__: Optional[Any] = os.path.abspath(self._tfds_path )
elif os.path.isfile(self._tfds_path ):
lowercase__: Optional[int] = os.path.dirname(self._tfds_path )
else:
raise ValueError('''--tfds_path is neither a directory nor a file. Please check path.''' )
lowercase__: int = os.path.abspath(self._datasets_directory )
self._logger.info(F"""Converting datasets from {abs_tfds_path} to {abs_datasets_path}""" )
lowercase__: Tuple = []
lowercase__: Dict = []
lowercase__: Any = {}
if os.path.isdir(self._tfds_path ):
lowercase__: Dict = os.listdir(_UpperCAmelCase )
else:
lowercase__: Dict = [os.path.basename(self._tfds_path )]
for f_name in file_names:
self._logger.info(F"""Looking at file {f_name}""" )
lowercase__: Tuple = os.path.join(_UpperCAmelCase , _UpperCAmelCase )
lowercase__: Optional[int] = os.path.join(_UpperCAmelCase , _UpperCAmelCase )
if not os.path.isfile(_UpperCAmelCase ) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name:
self._logger.info('''Skipping file''' )
continue
with open(_UpperCAmelCase , encoding='''utf-8''' ) as f:
lowercase__: Tuple = f.readlines()
lowercase__: Optional[Any] = []
lowercase__: Dict = False
lowercase__: List[str] = False
lowercase__: List[Any] = []
for line in lines:
lowercase__: List[str] = line
# Convert imports
if "import tensorflow.compat.v2 as tf" in out_line:
continue
elif "@tfds.core" in out_line:
continue
elif "builder=self" in out_line:
continue
elif "import tensorflow_datasets.public_api as tfds" in out_line:
lowercase__: Optional[int] = '''import datasets\n'''
elif "import tensorflow" in out_line:
# order is important here
lowercase__: Dict = ''''''
continue
elif "from absl import logging" in out_line:
lowercase__: Tuple = '''from datasets import logging\n'''
elif "getLogger" in out_line:
lowercase__: Optional[Any] = out_line.replace('''getLogger''' , '''get_logger''' )
elif any(expression in out_line for expression in TO_HIGHLIGHT ):
lowercase__: Any = True
lowercase__: str = list(filter(lambda _UpperCAmelCase : e in out_line , _UpperCAmelCase ) )
out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(_UpperCAmelCase ) + '''\n''' )
out_lines.append(_UpperCAmelCase )
out_lines.append(_UpperCAmelCase )
continue
else:
for pattern, replacement in TO_CONVERT:
lowercase__: List[Any] = re.sub(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# Take care of saving utilities (to later move them together with main script)
if "tensorflow_datasets" in out_line:
lowercase__: Any = re.match(r'''from\stensorflow_datasets.*import\s([^\.\r\n]+)''' , _UpperCAmelCase )
tfds_imports.extend(imp.strip() for imp in match.group(1 ).split(''',''' ) )
lowercase__: List[str] = '''from . import ''' + match.group(1 )
# Check we have not forget anything
if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line:
raise ValueError(F"""Error converting {out_line.strip()}""" )
if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line:
lowercase__: Optional[Any] = True
out_lines.append(_UpperCAmelCase )
if is_builder or "wmt" in f_name:
# We create a new directory for each dataset
lowercase__: Dict = f_name.replace('''.py''' , '''''' )
lowercase__: Dict = os.path.join(_UpperCAmelCase , _UpperCAmelCase )
lowercase__: Optional[Any] = os.path.join(_UpperCAmelCase , _UpperCAmelCase )
os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase )
self._logger.info(F"""Adding directory {output_dir}""" )
imports_to_builder_map.update({imp: output_dir for imp in tfds_imports} )
else:
# Utilities will be moved at the end
utils_files.append(_UpperCAmelCase )
if needs_manual_update:
with_manual_update.append(_UpperCAmelCase )
with open(_UpperCAmelCase , '''w''' , encoding='''utf-8''' ) as f:
f.writelines(_UpperCAmelCase )
self._logger.info(F"""Converted in {output_file}""" )
for utils_file in utils_files:
try:
lowercase__: str = os.path.basename(_UpperCAmelCase )
lowercase__: Union[str, Any] = imports_to_builder_map[f_name.replace('''.py''' , '''''' )]
self._logger.info(F"""Moving {dest_folder} to {utils_file}""" )
shutil.copy(_UpperCAmelCase , _UpperCAmelCase )
except KeyError:
self._logger.error(F"""Cannot find destination folder for {utils_file}. Please copy manually.""" )
if with_manual_update:
for file_path in with_manual_update:
self._logger.warning(
F"""You need to manually update file {file_path} to remove configurations using 'TextEncoderConfig'.""" )
| 2 | 0 |
import argparse
import os
import pickle
import sys
import torch
from transformers import TransfoXLConfig, TransfoXLLMHeadModel, load_tf_weights_in_transfo_xl
from transformers.models.transfo_xl import tokenization_transfo_xl as data_utils
from transformers.models.transfo_xl.tokenization_transfo_xl import CORPUS_NAME, VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
# We do this to be able to load python 2 datasets pickles
# See e.g. https://stackoverflow.com/questions/2121874/python-pickling-after-changing-a-modules-directory/2121918#2121918
__A = data_utils.TransfoXLTokenizer
__A = data_utils.TransfoXLCorpus
__A = data_utils
__A = data_utils
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Union[str, Any]:
if transfo_xl_dataset_file:
# Convert a pre-processed corpus (see original TensorFlow repo)
with open(__UpperCAmelCase , '''rb''' ) as fp:
lowercase__: str = pickle.load(__UpperCAmelCase , encoding='''latin1''' )
# Save vocabulary and dataset cache as Dictionaries (should be better than pickles for the long-term)
lowercase__: Dict = pytorch_dump_folder_path + '''/''' + VOCAB_FILES_NAMES['''pretrained_vocab_file''']
print(F"""Save vocabulary to {pytorch_vocab_dump_path}""" )
lowercase__: Tuple = corpus.vocab.__dict__
torch.save(__UpperCAmelCase , __UpperCAmelCase )
lowercase__: Any = corpus.__dict__
corpus_dict_no_vocab.pop('''vocab''' , __UpperCAmelCase )
lowercase__: Optional[int] = pytorch_dump_folder_path + '''/''' + CORPUS_NAME
print(F"""Save dataset to {pytorch_dataset_dump_path}""" )
torch.save(__UpperCAmelCase , __UpperCAmelCase )
if tf_checkpoint_path:
# Convert a pre-trained TensorFlow model
lowercase__: List[Any] = os.path.abspath(__UpperCAmelCase )
lowercase__: List[Any] = os.path.abspath(__UpperCAmelCase )
print(F"""Converting Transformer XL checkpoint from {tf_path} with config at {config_path}.""" )
# Initialise PyTorch model
if transfo_xl_config_file == "":
lowercase__: Optional[Any] = TransfoXLConfig()
else:
lowercase__: Any = TransfoXLConfig.from_json_file(__UpperCAmelCase )
print(F"""Building PyTorch model from configuration: {config}""" )
lowercase__: Optional[Any] = TransfoXLLMHeadModel(__UpperCAmelCase )
lowercase__: Optional[Any] = load_tf_weights_in_transfo_xl(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# Save pytorch-model
lowercase__: List[Any] = os.path.join(__UpperCAmelCase , __UpperCAmelCase )
lowercase__: Optional[int] = os.path.join(__UpperCAmelCase , __UpperCAmelCase )
print(F"""Save PyTorch model to {os.path.abspath(__UpperCAmelCase )}""" )
torch.save(model.state_dict() , __UpperCAmelCase )
print(F"""Save configuration file to {os.path.abspath(__UpperCAmelCase )}""" )
with open(__UpperCAmelCase , '''w''' , encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
required=True,
help="Path to the folder to store the PyTorch model or dataset/vocab.",
)
parser.add_argument(
"--tf_checkpoint_path",
default="",
type=str,
help="An optional path to a TensorFlow checkpoint path to be converted.",
)
parser.add_argument(
"--transfo_xl_config_file",
default="",
type=str,
help=(
"An optional config json file corresponding to the pre-trained BERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--transfo_xl_dataset_file",
default="",
type=str,
help="An optional dataset file to be converted in a vocabulary.",
)
__A = parser.parse_args()
convert_transfo_xl_checkpoint_to_pytorch(
args.tf_checkpoint_path,
args.transfo_xl_config_file,
args.pytorch_dump_folder_path,
args.transfo_xl_dataset_file,
)
| 358 | """simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
"microsoft/cvt-13": "https://huggingface.co/microsoft/cvt-13/resolve/main/config.json",
# See all Cvt models at https://huggingface.co/models?filter=cvt
}
class UpperCAmelCase (_UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase :Tuple = "cvt"
def __init__( self , _UpperCAmelCase=3 , _UpperCAmelCase=[7, 3, 3] , _UpperCAmelCase=[4, 2, 2] , _UpperCAmelCase=[2, 1, 1] , _UpperCAmelCase=[64, 192, 384] , _UpperCAmelCase=[1, 3, 6] , _UpperCAmelCase=[1, 2, 10] , _UpperCAmelCase=[4.0, 4.0, 4.0] , _UpperCAmelCase=[0.0, 0.0, 0.0] , _UpperCAmelCase=[0.0, 0.0, 0.0] , _UpperCAmelCase=[0.0, 0.0, 0.1] , _UpperCAmelCase=[True, True, True] , _UpperCAmelCase=[False, False, True] , _UpperCAmelCase=["dw_bn", "dw_bn", "dw_bn"] , _UpperCAmelCase=[3, 3, 3] , _UpperCAmelCase=[1, 1, 1] , _UpperCAmelCase=[2, 2, 2] , _UpperCAmelCase=[1, 1, 1] , _UpperCAmelCase=[1, 1, 1] , _UpperCAmelCase=0.02 , _UpperCAmelCase=1e-1_2 , **_UpperCAmelCase , ):
super().__init__(**_UpperCAmelCase )
lowercase__: Dict = num_channels
lowercase__: str = patch_sizes
lowercase__: Optional[Any] = patch_stride
lowercase__: List[str] = patch_padding
lowercase__: Optional[Any] = embed_dim
lowercase__: Optional[int] = num_heads
lowercase__: Any = depth
lowercase__: str = mlp_ratio
lowercase__: Any = attention_drop_rate
lowercase__: Any = drop_rate
lowercase__: Optional[Any] = drop_path_rate
lowercase__: Dict = qkv_bias
lowercase__: Dict = cls_token
lowercase__: Any = qkv_projection_method
lowercase__: List[str] = kernel_qkv
lowercase__: Union[str, Any] = padding_kv
lowercase__: Optional[int] = stride_kv
lowercase__: int = padding_q
lowercase__: Dict = stride_q
lowercase__: Any = initializer_range
lowercase__: Union[str, Any] = layer_norm_eps
| 2 | 0 |
"""simple docstring"""
# Logistic Regression from scratch
# In[62]:
# In[63]:
# importing all the required libraries
import numpy as np
from matplotlib import pyplot as plt
from sklearn import datasets
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> Dict:
return 1 / (1 + np.exp(-z ))
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase ) -> Optional[Any]:
return (-y * np.log(__lowerCamelCase ) - (1 - y) * np.log(1 - h )).mean()
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Optional[int]:
lowercase__: List[str] = np.dot(__lowerCamelCase , __lowerCamelCase )
return np.sum(y * scores - np.log(1 + np.exp(__lowerCamelCase ) ) )
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=7_0_0_0_0 ) -> Union[str, Any]:
lowercase__: Optional[Any] = np.zeros(x.shape[1] )
for iterations in range(__lowerCamelCase ):
lowercase__: int = np.dot(__lowerCamelCase , __lowerCamelCase )
lowercase__: List[str] = sigmoid_function(__lowerCamelCase )
lowercase__: Tuple = np.dot(x.T , h - y ) / y.size
lowercase__: Union[str, Any] = theta - alpha * gradient # updating the weights
lowercase__: Optional[int] = np.dot(__lowerCamelCase , __lowerCamelCase )
lowercase__: List[Any] = sigmoid_function(__lowerCamelCase )
lowercase__: Optional[int] = cost_function(__lowerCamelCase , __lowerCamelCase )
if iterations % 1_0_0 == 0:
print(F"""loss: {j} \t""" ) # printing the loss after every 100 iterations
return theta
# In[68]:
if __name__ == "__main__":
__A = datasets.load_iris()
__A = iris.data[:, :2]
__A = (iris.target != 0) * 1
__A = 0.1
__A = logistic_reg(alpha, x, y, max_iterations=7_0_0_0_0)
print("theta: ", theta) # printing the theta i.e our weights vector
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> List[str]:
return sigmoid_function(
np.dot(__lowerCamelCase , __lowerCamelCase ) ) # predicting the value of probability from the logistic regression algorithm
plt.figure(figsize=(1_0, 6))
plt.scatter(x[y == 0][:, 0], x[y == 0][:, 1], color="b", label="0")
plt.scatter(x[y == 1][:, 0], x[y == 1][:, 1], color="r", label="1")
((__A) ,(__A)) = (x[:, 0].min(), x[:, 0].max())
((__A) ,(__A)) = (x[:, 1].min(), x[:, 1].max())
((__A) ,(__A)) = np.meshgrid(np.linspace(xa_min, xa_max), np.linspace(xa_min, xa_max))
__A = np.c_[xxa.ravel(), xxa.ravel()]
__A = predict_prob(grid).reshape(xxa.shape)
plt.contour(xxa, xxa, probs, [0.5], linewidths=1, colors="black")
plt.legend()
plt.show()
| 359 | """simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import add_start_docstrings
__A = R"\n [`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and\n can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.\n\n Args:\n title_sep (`str`, *optional*, defaults to `\" / \"`):\n Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].\n doc_sep (`str`, *optional*, defaults to `\" // \"`):\n Separator inserted between the text of the retrieved document and the original input when calling\n [`RagRetriever`].\n n_docs (`int`, *optional*, defaults to 5):\n Number of documents to retrieve.\n max_combined_length (`int`, *optional*, defaults to 300):\n Max length of contextualized input returned by [`~RagRetriever.__call__`].\n retrieval_vector_size (`int`, *optional*, defaults to 768):\n Dimensionality of the document embeddings indexed by [`RagRetriever`].\n retrieval_batch_size (`int`, *optional*, defaults to 8):\n Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated\n [`RagRetriever`].\n dataset (`str`, *optional*, defaults to `\"wiki_dpr\"`):\n A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids\n using `datasets.list_datasets()`).\n dataset_split (`str`, *optional*, defaults to `\"train\"`)\n Which split of the `dataset` to load.\n index_name (`str`, *optional*, defaults to `\"compressed\"`)\n The index name of the index associated with the `dataset`. One can choose between `\"legacy\"`, `\"exact\"` and\n `\"compressed\"`.\n index_path (`str`, *optional*)\n The path to the serialized faiss index on disk.\n passages_path (`str`, *optional*):\n A path to text passages compatible with the faiss index. Required if using\n [`~models.rag.retrieval_rag.LegacyIndex`]\n use_dummy_dataset (`bool`, *optional*, defaults to `False`)\n Whether to load a \"dummy\" variant of the dataset specified by `dataset`.\n label_smoothing (`float`, *optional*, defaults to 0.0):\n Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing\n in the loss calculation. If set to 0, no label smoothing is performed.\n do_marginalize (`bool`, *optional*, defaults to `False`):\n If `True`, the logits are marginalized over all documents by making use of\n `torch.nn.functional.log_softmax`.\n reduce_loss (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.\n do_deduplication (`bool`, *optional*, defaults to `True`):\n Whether or not to deduplicate the generations from different context documents for a given input. Has to be\n set to `False` if used while training with distributed backend.\n exclude_bos_score (`bool`, *optional*, defaults to `False`):\n Whether or not to disregard the BOS token when computing the loss.\n output_retrieved(`bool`, *optional*, defaults to `False`):\n If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and\n `context_attention_mask` are returned. See returned tensors for more detail.\n use_cache (`bool`, *optional*, defaults to `True`):\n Whether or not the model should return the last key/values attentions (not used by all models).\n forced_eos_token_id (`int`, *optional*):\n The id of the token to force as the last generated token when `max_length` is reached. Usually set to\n `eos_token_id`.\n"
@add_start_docstrings(_UpperCAmelCase )
class UpperCAmelCase (_UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase :Optional[int] = "rag"
_UpperCAmelCase :List[Any] = True
def __init__( self , _UpperCAmelCase=None , _UpperCAmelCase=True , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=" / " , _UpperCAmelCase=" // " , _UpperCAmelCase=5 , _UpperCAmelCase=300 , _UpperCAmelCase=768 , _UpperCAmelCase=8 , _UpperCAmelCase="wiki_dpr" , _UpperCAmelCase="train" , _UpperCAmelCase="compressed" , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=0.0 , _UpperCAmelCase=True , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=True , _UpperCAmelCase=None , **_UpperCAmelCase , ):
super().__init__(
bos_token_id=_UpperCAmelCase , pad_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , decoder_start_token_id=_UpperCAmelCase , forced_eos_token_id=_UpperCAmelCase , is_encoder_decoder=_UpperCAmelCase , prefix=_UpperCAmelCase , vocab_size=_UpperCAmelCase , **_UpperCAmelCase , )
assert (
"question_encoder" in kwargs and "generator" in kwargs
), "Config has to be initialized with question_encoder and generator config"
lowercase__: Optional[Any] = kwargs.pop('''question_encoder''' )
lowercase__: Any = question_encoder_config.pop('''model_type''' )
lowercase__: Tuple = kwargs.pop('''generator''' )
lowercase__: Union[str, Any] = decoder_config.pop('''model_type''' )
from ..auto.configuration_auto import AutoConfig
lowercase__: Optional[int] = AutoConfig.for_model(_UpperCAmelCase , **_UpperCAmelCase )
lowercase__: Any = AutoConfig.for_model(_UpperCAmelCase , **_UpperCAmelCase )
lowercase__: str = reduce_loss
lowercase__: str = label_smoothing
lowercase__: Dict = exclude_bos_score
lowercase__: Any = do_marginalize
lowercase__: Optional[int] = title_sep
lowercase__: Any = doc_sep
lowercase__: Any = n_docs
lowercase__: List[Any] = max_combined_length
lowercase__: int = dataset
lowercase__: int = dataset_split
lowercase__: str = index_name
lowercase__: Dict = retrieval_vector_size
lowercase__: Dict = retrieval_batch_size
lowercase__: List[str] = passages_path
lowercase__: str = index_path
lowercase__: Optional[Any] = use_dummy_dataset
lowercase__: str = output_retrieved
lowercase__: List[str] = do_deduplication
lowercase__: List[Any] = use_cache
if self.forced_eos_token_id is None:
lowercase__: int = getattr(self.generator , '''forced_eos_token_id''' , _UpperCAmelCase )
@classmethod
def _snake_case ( cls , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ):
return cls(question_encoder=question_encoder_config.to_dict() , generator=generator_config.to_dict() , **_UpperCAmelCase )
def _snake_case ( self ):
lowercase__: List[str] = copy.deepcopy(self.__dict__ )
lowercase__: str = self.question_encoder.to_dict()
lowercase__: str = self.generator.to_dict()
lowercase__: str = self.__class__.model_type
return output
| 2 | 0 |
"""simple docstring"""
import copy
import tempfile
import unittest
from huggingface_hub import HfFolder, delete_repo
from parameterized import parameterized
from requests.exceptions import HTTPError
from transformers import AutoConfig, GenerationConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
class UpperCAmelCase (unittest.TestCase ):
"""simple docstring"""
@parameterized.expand([(None,), ('''foo.json''',)] )
def _snake_case ( self , _UpperCAmelCase ):
lowercase__ = GenerationConfig(
do_sample=_a , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(_a , config_name=_a )
lowercase__ = GenerationConfig.from_pretrained(_a , config_name=_a )
# Checks parameters that were specified
self.assertEqual(loaded_config.do_sample , _a )
self.assertEqual(loaded_config.temperature , 0.7 )
self.assertEqual(loaded_config.length_penalty , 1.0 )
self.assertEqual(loaded_config.bad_words_ids , [[1, 2, 3], [4, 5]] )
# Checks parameters that were not specified (defaults)
self.assertEqual(loaded_config.top_k , 50 )
self.assertEqual(loaded_config.max_length , 20 )
self.assertEqual(loaded_config.max_time , _a )
def _snake_case ( self ):
lowercase__ = AutoConfig.from_pretrained('''gpt2''' )
lowercase__ = GenerationConfig.from_model_config(_a )
lowercase__ = GenerationConfig()
# The generation config has loaded a few non-default parameters from the model config
self.assertNotEqual(_a , _a )
# One of those parameters is eos_token_id -- check if it matches
self.assertNotEqual(generation_config_from_model.eos_token_id , default_generation_config.eos_token_id )
self.assertEqual(generation_config_from_model.eos_token_id , model_config.eos_token_id )
def _snake_case ( self ):
lowercase__ = GenerationConfig()
lowercase__ = {
'''max_new_tokens''': 1024,
'''foo''': '''bar''',
}
lowercase__ = copy.deepcopy(_a )
lowercase__ = generation_config.update(**_a )
# update_kwargs was not modified (no side effects)
self.assertEqual(_a , _a )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(generation_config.max_new_tokens , 1024 )
# `.update()` returns a dictionary of unused kwargs
self.assertEqual(_a , {'''foo''': '''bar'''} )
def _snake_case ( self ):
lowercase__ = GenerationConfig()
lowercase__ = '''bar'''
with tempfile.TemporaryDirectory('''test-generation-config''' ) as tmp_dir:
generation_config.save_pretrained(_a )
lowercase__ = GenerationConfig.from_pretrained(_a )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(new_config.foo , '''bar''' )
lowercase__ = GenerationConfig.from_model_config(_a )
assert not hasattr(_a , '''foo''' ) # no new kwargs should be initialized if from config
def _snake_case ( self ):
lowercase__ = GenerationConfig()
self.assertEqual(default_config.temperature , 1.0 )
self.assertEqual(default_config.do_sample , _a )
self.assertEqual(default_config.num_beams , 1 )
lowercase__ = GenerationConfig(
do_sample=_a , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
self.assertEqual(config.temperature , 0.7 )
self.assertEqual(config.do_sample , _a )
self.assertEqual(config.num_beams , 1 )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(_a )
lowercase__ = GenerationConfig.from_pretrained(_a , temperature=1.0 )
self.assertEqual(loaded_config.temperature , 1.0 )
self.assertEqual(loaded_config.do_sample , _a )
self.assertEqual(loaded_config.num_beams , 1 ) # default value
@is_staging_test
class UpperCAmelCase (unittest.TestCase ):
"""simple docstring"""
@classmethod
def _snake_case ( cls ):
lowercase__ = TOKEN
HfFolder.save_token(_a )
@classmethod
def _snake_case ( cls ):
try:
delete_repo(token=cls._token , repo_id='''test-generation-config''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-generation-config-org''' )
except HTTPError:
pass
def _snake_case ( self ):
lowercase__ = GenerationConfig(
do_sample=_a , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub('''test-generation-config''' , use_auth_token=self._token )
lowercase__ = GenerationConfig.from_pretrained(F"""{USER}/test-generation-config""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_a , getattr(_a , _a ) )
# Reset repo
delete_repo(token=self._token , repo_id='''test-generation-config''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
_a , repo_id='''test-generation-config''' , push_to_hub=_a , use_auth_token=self._token )
lowercase__ = GenerationConfig.from_pretrained(F"""{USER}/test-generation-config""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_a , getattr(_a , _a ) )
def _snake_case ( self ):
lowercase__ = GenerationConfig(
do_sample=_a , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub('''valid_org/test-generation-config-org''' , use_auth_token=self._token )
lowercase__ = GenerationConfig.from_pretrained('''valid_org/test-generation-config-org''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_a , getattr(_a , _a ) )
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-generation-config-org''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
_a , repo_id='''valid_org/test-generation-config-org''' , push_to_hub=_a , use_auth_token=self._token )
lowercase__ = GenerationConfig.from_pretrained('''valid_org/test-generation-config-org''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_a , getattr(_a , _a ) )
| 360 | """simple docstring"""
import json
import os
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from requests.exceptions import HTTPError
from transformers.utils import (
CONFIG_NAME,
FLAX_WEIGHTS_NAME,
TF2_WEIGHTS_NAME,
TRANSFORMERS_CACHE,
WEIGHTS_NAME,
cached_file,
get_file_from_repo,
has_file,
)
__A = "hf-internal-testing/tiny-random-bert"
__A = os.path.join(TRANSFORMERS_CACHE, "models--hf-internal-testing--tiny-random-bert")
__A = "9b8c223d42b2188cb49d29af482996f9d0f3e5a6"
class UpperCAmelCase (unittest.TestCase ):
"""simple docstring"""
def _snake_case ( self ):
lowercase__: Union[str, Any] = cached_file(_UpperCAmelCase , _UpperCAmelCase )
# Should have downloaded the file in here
self.assertTrue(os.path.isdir(_UpperCAmelCase ) )
# Cache should contain at least those three subfolders:
for subfolder in ["blobs", "refs", "snapshots"]:
self.assertTrue(os.path.isdir(os.path.join(_UpperCAmelCase , _UpperCAmelCase ) ) )
with open(os.path.join(_UpperCAmelCase , '''refs''' , '''main''' ) ) as f:
lowercase__: Dict = f.read()
self.assertEqual(_UpperCAmelCase , os.path.join(_UpperCAmelCase , '''snapshots''' , _UpperCAmelCase , _UpperCAmelCase ) )
self.assertTrue(os.path.isfile(_UpperCAmelCase ) )
# File is cached at the same place the second time.
lowercase__: Any = cached_file(_UpperCAmelCase , _UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
# Using a specific revision to test the full commit hash.
lowercase__: Dict = cached_file(_UpperCAmelCase , _UpperCAmelCase , revision='''9b8c223''' )
self.assertEqual(_UpperCAmelCase , os.path.join(_UpperCAmelCase , '''snapshots''' , _UpperCAmelCase , _UpperCAmelCase ) )
def _snake_case ( self ):
with self.assertRaisesRegex(_UpperCAmelCase , '''is not a valid model identifier''' ):
lowercase__: int = cached_file('''tiny-random-bert''' , _UpperCAmelCase )
with self.assertRaisesRegex(_UpperCAmelCase , '''is not a valid git identifier''' ):
lowercase__: List[Any] = cached_file(_UpperCAmelCase , _UpperCAmelCase , revision='''aaaa''' )
with self.assertRaisesRegex(_UpperCAmelCase , '''does not appear to have a file named''' ):
lowercase__: Dict = cached_file(_UpperCAmelCase , '''conf''' )
def _snake_case ( self ):
with self.assertRaisesRegex(_UpperCAmelCase , '''does not appear to have a file named''' ):
lowercase__: Optional[Any] = cached_file(_UpperCAmelCase , '''conf''' )
with open(os.path.join(_UpperCAmelCase , '''refs''' , '''main''' ) ) as f:
lowercase__: int = f.read()
self.assertTrue(os.path.isfile(os.path.join(_UpperCAmelCase , '''.no_exist''' , _UpperCAmelCase , '''conf''' ) ) )
lowercase__: Dict = cached_file(_UpperCAmelCase , '''conf''' , _raise_exceptions_for_missing_entries=_UpperCAmelCase )
self.assertIsNone(_UpperCAmelCase )
lowercase__: List[str] = cached_file(_UpperCAmelCase , '''conf''' , local_files_only=_UpperCAmelCase , _raise_exceptions_for_missing_entries=_UpperCAmelCase )
self.assertIsNone(_UpperCAmelCase )
lowercase__: Union[str, Any] = mock.Mock()
lowercase__: str = 500
lowercase__: Union[str, Any] = {}
lowercase__: List[str] = HTTPError
lowercase__: int = {}
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('''requests.Session.request''' , return_value=_UpperCAmelCase ) as mock_head:
lowercase__: Any = cached_file(_UpperCAmelCase , '''conf''' , _raise_exceptions_for_connection_errors=_UpperCAmelCase )
self.assertIsNone(_UpperCAmelCase )
# This check we did call the fake head request
mock_head.assert_called()
def _snake_case ( self ):
self.assertTrue(has_file('''hf-internal-testing/tiny-bert-pt-only''' , _UpperCAmelCase ) )
self.assertFalse(has_file('''hf-internal-testing/tiny-bert-pt-only''' , _UpperCAmelCase ) )
self.assertFalse(has_file('''hf-internal-testing/tiny-bert-pt-only''' , _UpperCAmelCase ) )
def _snake_case ( self ):
# `get_file_from_repo` returns None if the file does not exist
self.assertIsNone(get_file_from_repo('''bert-base-cased''' , '''ahah.txt''' ) )
# The function raises if the repository does not exist.
with self.assertRaisesRegex(_UpperCAmelCase , '''is not a valid model identifier''' ):
get_file_from_repo('''bert-base-case''' , _UpperCAmelCase )
# The function raises if the revision does not exist.
with self.assertRaisesRegex(_UpperCAmelCase , '''is not a valid git identifier''' ):
get_file_from_repo('''bert-base-cased''' , _UpperCAmelCase , revision='''ahaha''' )
lowercase__: Optional[Any] = get_file_from_repo('''bert-base-cased''' , _UpperCAmelCase )
# The name is the cached name which is not very easy to test, so instead we load the content.
lowercase__: Optional[Any] = json.loads(open(_UpperCAmelCase , '''r''' ).read() )
self.assertEqual(config['''hidden_size'''] , 768 )
def _snake_case ( self ):
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase__: Any = Path(_UpperCAmelCase ) / '''a.txt'''
filename.touch()
self.assertEqual(get_file_from_repo(_UpperCAmelCase , '''a.txt''' ) , str(_UpperCAmelCase ) )
self.assertIsNone(get_file_from_repo(_UpperCAmelCase , '''b.txt''' ) )
| 2 | 0 |
"""simple docstring"""
import inspect
import unittest
from transformers import RegNetConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from transformers.utils import cached_property, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCAmelCase (unittest.TestCase ):
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=3 , _UpperCAmelCase=32 , _UpperCAmelCase=3 , _UpperCAmelCase=10 , _UpperCAmelCase=[10, 20, 30, 40] , _UpperCAmelCase=[1, 1, 2, 1] , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase="relu" , _UpperCAmelCase=3 , _UpperCAmelCase=None , ):
lowercase__: List[str] = parent
lowercase__: Tuple = batch_size
lowercase__: Optional[Any] = image_size
lowercase__: Dict = num_channels
lowercase__: List[Any] = embeddings_size
lowercase__: Any = hidden_sizes
lowercase__: Dict = depths
lowercase__: Union[str, Any] = is_training
lowercase__: str = use_labels
lowercase__: Optional[int] = hidden_act
lowercase__: Optional[Any] = num_labels
lowercase__: Tuple = scope
lowercase__: Optional[Any] = len(_UpperCAmelCase )
def _snake_case ( self ):
lowercase__: List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase__: List[str] = self.get_config()
return config, pixel_values
def _snake_case ( self ):
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase ):
lowercase__: Union[str, Any] = FlaxRegNetModel(config=_UpperCAmelCase )
lowercase__: str = model(_UpperCAmelCase )
# Output shape (b, c, h, w)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase ):
lowercase__: Tuple = self.num_labels
lowercase__: Optional[Any] = FlaxRegNetForImageClassification(config=_UpperCAmelCase )
lowercase__: List[Any] = model(_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _snake_case ( self ):
lowercase__: int = self.prepare_config_and_inputs()
lowercase__: Tuple = config_and_inputs
lowercase__: Any = {'pixel_values': pixel_values}
return config, inputs_dict
@require_flax
class UpperCAmelCase (_UpperCAmelCase ,unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase :Dict = (FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else ()
_UpperCAmelCase :int = False
_UpperCAmelCase :List[str] = False
_UpperCAmelCase :str = False
def _snake_case ( self ):
lowercase__: Union[str, Any] = FlaxRegNetModelTester(self )
lowercase__: List[str] = ConfigTester(self , config_class=_UpperCAmelCase , has_text_modality=_UpperCAmelCase )
def _snake_case ( self ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _snake_case ( self ):
return
def _snake_case ( self ):
lowercase__: List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def _snake_case ( self ):
lowercase__: Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_UpperCAmelCase )
@unittest.skip(reason='''RegNet does not use inputs_embeds''' )
def _snake_case ( self ):
pass
@unittest.skip(reason='''RegNet does not support input and output embeddings''' )
def _snake_case ( self ):
pass
def _snake_case ( self ):
lowercase__: List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__: Optional[int] = model_class(_UpperCAmelCase )
lowercase__: Dict = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__: int = [*signature.parameters.keys()]
lowercase__: Tuple = ['pixel_values']
self.assertListEqual(arg_names[:1] , _UpperCAmelCase )
def _snake_case ( self ):
def check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
lowercase__: str = model_class(_UpperCAmelCase )
lowercase__: Optional[int] = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
lowercase__: Dict = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowercase__: List[str] = self.model_tester.num_stages
self.assertEqual(len(_UpperCAmelCase ) , expected_num_stages + 1 )
lowercase__: Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__: Optional[int] = True
check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__: Tuple = True
check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def _snake_case ( self ):
lowercase__: List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
lowercase__: Tuple = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase )
lowercase__: Optional[int] = model_class(_UpperCAmelCase )
@jax.jit
def model_jitted(_UpperCAmelCase , **_UpperCAmelCase ):
return model(pixel_values=_UpperCAmelCase , **_UpperCAmelCase )
with self.subTest('''JIT Enabled''' ):
lowercase__: str = model_jitted(**_UpperCAmelCase ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
lowercase__: Optional[int] = model_jitted(**_UpperCAmelCase ).to_tuple()
self.assertEqual(len(_UpperCAmelCase ) , len(_UpperCAmelCase ) )
for jitted_output, output in zip(_UpperCAmelCase , _UpperCAmelCase ):
self.assertEqual(jitted_output.shape , output.shape )
def SCREAMING_SNAKE_CASE__ ( ) -> Tuple:
lowercase__: Optional[Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_flax
class UpperCAmelCase (unittest.TestCase ):
"""simple docstring"""
@cached_property
def _snake_case ( self ):
return AutoImageProcessor.from_pretrained('''facebook/regnet-y-040''' ) if is_vision_available() else None
@slow
def _snake_case ( self ):
lowercase__: int = FlaxRegNetForImageClassification.from_pretrained('''facebook/regnet-y-040''' )
lowercase__: Union[str, Any] = self.default_image_processor
lowercase__: Dict = prepare_img()
lowercase__: Dict = image_processor(images=_UpperCAmelCase , return_tensors='''np''' )
lowercase__: List[str] = model(**_UpperCAmelCase )
# verify the logits
lowercase__: Optional[int] = (1, 1000)
self.assertEqual(outputs.logits.shape , _UpperCAmelCase )
lowercase__: Optional[Any] = jnp.array([-0.4_180, -1.5_051, -3.4_836] )
self.assertTrue(jnp.allclose(outputs.logits[0, :3] , _UpperCAmelCase , atol=1e-4 ) )
| 361 | """simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
"microsoft/beit-base-patch16-224-pt22k": (
"https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json"
),
# See all BEiT models at https://huggingface.co/models?filter=beit
}
class UpperCAmelCase (_UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase :Optional[Any] = "beit"
def __init__( self , _UpperCAmelCase=8192 , _UpperCAmelCase=768 , _UpperCAmelCase=12 , _UpperCAmelCase=12 , _UpperCAmelCase=3072 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.02 , _UpperCAmelCase=1e-1_2 , _UpperCAmelCase=224 , _UpperCAmelCase=16 , _UpperCAmelCase=3 , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=True , _UpperCAmelCase=[3, 5, 7, 11] , _UpperCAmelCase=[1, 2, 3, 6] , _UpperCAmelCase=True , _UpperCAmelCase=0.4 , _UpperCAmelCase=256 , _UpperCAmelCase=1 , _UpperCAmelCase=False , _UpperCAmelCase=255 , **_UpperCAmelCase , ):
super().__init__(**_UpperCAmelCase )
lowercase__: Union[str, Any] = vocab_size
lowercase__: List[Any] = hidden_size
lowercase__: Optional[int] = num_hidden_layers
lowercase__: Optional[int] = num_attention_heads
lowercase__: int = intermediate_size
lowercase__: List[str] = hidden_act
lowercase__: List[Any] = hidden_dropout_prob
lowercase__: Dict = attention_probs_dropout_prob
lowercase__: List[str] = initializer_range
lowercase__: Optional[int] = layer_norm_eps
lowercase__: int = image_size
lowercase__: Tuple = patch_size
lowercase__: int = num_channels
lowercase__: Optional[Any] = use_mask_token
lowercase__: List[Any] = use_absolute_position_embeddings
lowercase__: Optional[int] = use_relative_position_bias
lowercase__: Optional[int] = use_shared_relative_position_bias
lowercase__: Optional[Any] = layer_scale_init_value
lowercase__: Union[str, Any] = drop_path_rate
lowercase__: Tuple = use_mean_pooling
# decode head attributes (semantic segmentation)
lowercase__: Tuple = out_indices
lowercase__: Optional[int] = pool_scales
# auxiliary head attributes (semantic segmentation)
lowercase__: List[str] = use_auxiliary_head
lowercase__: Optional[Any] = auxiliary_loss_weight
lowercase__: str = auxiliary_channels
lowercase__: List[str] = auxiliary_num_convs
lowercase__: Tuple = auxiliary_concat_input
lowercase__: Dict = semantic_loss_ignore_index
class UpperCAmelCase (_UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase :Dict = version.parse("1.11" )
@property
def _snake_case ( self ):
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def _snake_case ( self ):
return 1e-4
| 2 | 0 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
__A = logging.get_logger(__name__)
if is_vision_available():
import PIL
class UpperCAmelCase (__lowercase ):
"""simple docstring"""
_UpperCAmelCase :Optional[int] = ['''pixel_values''']
def __init__( self , _UpperCAmelCase = True , _UpperCAmelCase = None , _UpperCAmelCase = PILImageResampling.BICUBIC , _UpperCAmelCase = True , _UpperCAmelCase = None , _UpperCAmelCase = True , _UpperCAmelCase = 1 / 255 , _UpperCAmelCase = True , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = True , **_UpperCAmelCase , ):
super().__init__(**_UpperCAmelCase )
lowercase__: Optional[int] = size if size is not None else {"""shortest_edge""": 224}
lowercase__: int = get_size_dict(_UpperCAmelCase , default_to_square=_UpperCAmelCase )
lowercase__: int = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
lowercase__: Dict = get_size_dict(_UpperCAmelCase , default_to_square=_UpperCAmelCase , param_name='''crop_size''' )
lowercase__: Dict = do_resize
lowercase__: Tuple = size
lowercase__: Union[str, Any] = resample
lowercase__: Optional[Any] = do_center_crop
lowercase__: List[str] = crop_size
lowercase__: Dict = do_rescale
lowercase__: Optional[Any] = rescale_factor
lowercase__: int = do_normalize
lowercase__: Tuple = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
lowercase__: List[Any] = image_std if image_std is not None else OPENAI_CLIP_STD
lowercase__: List[str] = do_convert_rgb
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = PILImageResampling.BICUBIC , _UpperCAmelCase = None , **_UpperCAmelCase , ):
lowercase__: Optional[Any] = get_size_dict(_UpperCAmelCase , default_to_square=_UpperCAmelCase )
if "shortest_edge" not in size:
raise ValueError(F"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" )
lowercase__: Dict = get_resize_output_image_size(_UpperCAmelCase , size=size['''shortest_edge'''] , default_to_square=_UpperCAmelCase )
return resize(_UpperCAmelCase , size=_UpperCAmelCase , resample=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase )
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None , **_UpperCAmelCase , ):
lowercase__: Any = get_size_dict(_UpperCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(F"""The `size` parameter must contain the keys (height, width). Got {size.keys()}""" )
return center_crop(_UpperCAmelCase , size=(size['''height'''], size['''width''']) , data_format=_UpperCAmelCase , **_UpperCAmelCase )
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None , **_UpperCAmelCase , ):
return rescale(_UpperCAmelCase , scale=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase )
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None , **_UpperCAmelCase , ):
return normalize(_UpperCAmelCase , mean=_UpperCAmelCase , std=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase )
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = ChannelDimension.FIRST , **_UpperCAmelCase , ):
lowercase__: Tuple = do_resize if do_resize is not None else self.do_resize
lowercase__: Union[str, Any] = size if size is not None else self.size
lowercase__: Tuple = get_size_dict(_UpperCAmelCase , param_name='''size''' , default_to_square=_UpperCAmelCase )
lowercase__: Optional[Any] = resample if resample is not None else self.resample
lowercase__: Union[str, Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
lowercase__: List[str] = crop_size if crop_size is not None else self.crop_size
lowercase__: Union[str, Any] = get_size_dict(_UpperCAmelCase , param_name='''crop_size''' , default_to_square=_UpperCAmelCase )
lowercase__: Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale
lowercase__: int = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase__: List[str] = do_normalize if do_normalize is not None else self.do_normalize
lowercase__: Optional[int] = image_mean if image_mean is not None else self.image_mean
lowercase__: Any = image_std if image_std is not None else self.image_std
lowercase__: int = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
lowercase__: Optional[int] = make_list_of_images(_UpperCAmelCase )
if not valid_images(_UpperCAmelCase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
lowercase__: int = [convert_to_rgb(_UpperCAmelCase ) for image in images]
# All transformations expect numpy arrays.
lowercase__: List[str] = [to_numpy_array(_UpperCAmelCase ) for image in images]
if do_resize:
lowercase__: Optional[int] = [self.resize(image=_UpperCAmelCase , size=_UpperCAmelCase , resample=_UpperCAmelCase ) for image in images]
if do_center_crop:
lowercase__: Optional[Any] = [self.center_crop(image=_UpperCAmelCase , size=_UpperCAmelCase ) for image in images]
if do_rescale:
lowercase__: Optional[int] = [self.rescale(image=_UpperCAmelCase , scale=_UpperCAmelCase ) for image in images]
if do_normalize:
lowercase__: Tuple = [self.normalize(image=_UpperCAmelCase , mean=_UpperCAmelCase , std=_UpperCAmelCase ) for image in images]
lowercase__: Tuple = [to_channel_dimension_format(_UpperCAmelCase , _UpperCAmelCase ) for image in images]
lowercase__: int = {"""pixel_values""": images}
return BatchFeature(data=_UpperCAmelCase , tensor_type=_UpperCAmelCase )
| 362 | """simple docstring"""
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase ) -> str:
lowercase__: int = ''''''
for word_or_phrase in separated:
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
raise Exception('''join() accepts only strings to be joined''' )
joined += word_or_phrase + separator
return joined.strip(__UpperCAmelCase )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 2 | 0 |
"""simple docstring"""
import argparse
import os
import shutil
import torch
from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> List[str]:
lowercase__: Dict = args.pruning_method
lowercase__: Union[str, Any] = args.threshold
lowercase__: Dict = args.model_name_or_path.rstrip('''/''' )
lowercase__: Union[str, Any] = args.target_model_path
print(F"""Load fine-pruned model from {model_name_or_path}""" )
lowercase__: Union[str, Any] = torch.load(os.path.join(a_ , '''pytorch_model.bin''' ) )
lowercase__: str = {}
for name, tensor in model.items():
if "embeddings" in name or "LayerNorm" in name or "pooler" in name:
lowercase__: Any = tensor
print(F"""Copied layer {name}""" )
elif "classifier" in name or "qa_output" in name:
lowercase__: List[str] = tensor
print(F"""Copied layer {name}""" )
elif "bias" in name:
lowercase__: List[Any] = tensor
print(F"""Copied layer {name}""" )
else:
if pruning_method == "magnitude":
lowercase__: List[Any] = MagnitudeBinarizer.apply(inputs=a_ , threshold=a_ )
lowercase__: Optional[Any] = tensor * mask
print(F"""Pruned layer {name}""" )
elif pruning_method == "topK":
if "mask_scores" in name:
continue
lowercase__: Union[str, Any] = name[:-6]
lowercase__: Dict = model[F"""{prefix_}mask_scores"""]
lowercase__: Dict = TopKBinarizer.apply(a_ , a_ )
lowercase__: Union[str, Any] = tensor * mask
print(F"""Pruned layer {name}""" )
elif pruning_method == "sigmoied_threshold":
if "mask_scores" in name:
continue
lowercase__: Optional[int] = name[:-6]
lowercase__: str = model[F"""{prefix_}mask_scores"""]
lowercase__: List[Any] = ThresholdBinarizer.apply(a_ , a_ , a_ )
lowercase__: Union[str, Any] = tensor * mask
print(F"""Pruned layer {name}""" )
elif pruning_method == "l0":
if "mask_scores" in name:
continue
lowercase__: str = name[:-6]
lowercase__: str = model[F"""{prefix_}mask_scores"""]
lowercase__, lowercase__: Optional[int] = -0.1, 1.1
lowercase__: List[Any] = torch.sigmoid(a_ )
lowercase__: Optional[int] = s * (r - l) + l
lowercase__: List[str] = s_bar.clamp(min=0.0 , max=1.0 )
lowercase__: List[Any] = tensor * mask
print(F"""Pruned layer {name}""" )
else:
raise ValueError('''Unknown pruning method''' )
if target_model_path is None:
lowercase__: Union[str, Any] = os.path.join(
os.path.dirname(a_ ) , F"""bertarized_{os.path.basename(a_ )}""" )
if not os.path.isdir(a_ ):
shutil.copytree(a_ , a_ )
print(F"""\nCreated folder {target_model_path}""" )
torch.save(a_ , os.path.join(a_ , '''pytorch_model.bin''' ) )
print('''\nPruned model saved! See you later!''' )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
parser.add_argument(
"--pruning_method",
choices=["l0", "magnitude", "topK", "sigmoied_threshold"],
type=str,
required=True,
help=(
"Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning,"
" sigmoied_threshold = Soft movement pruning)"
),
)
parser.add_argument(
"--threshold",
type=float,
required=False,
help=(
"For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model."
"For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared."
"Not needed for `l0`"
),
)
parser.add_argument(
"--model_name_or_path",
type=str,
required=True,
help="Folder containing the model that was previously fine-pruned",
)
parser.add_argument(
"--target_model_path",
default=None,
type=str,
required=False,
help="Folder containing the model that was previously fine-pruned",
)
__A = parser.parse_args()
main(args)
| 363 | """simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPanoramaPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
@skip_mps
class UpperCAmelCase (_UpperCAmelCase ,_UpperCAmelCase ,unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase :Optional[int] = StableDiffusionPanoramaPipeline
_UpperCAmelCase :List[str] = TEXT_TO_IMAGE_PARAMS
_UpperCAmelCase :str = TEXT_TO_IMAGE_BATCH_PARAMS
_UpperCAmelCase :Dict = TEXT_TO_IMAGE_IMAGE_PARAMS
_UpperCAmelCase :List[Any] = TEXT_TO_IMAGE_IMAGE_PARAMS
def _snake_case ( self ):
torch.manual_seed(0 )
lowercase__: Optional[int] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
lowercase__: List[Any] = DDIMScheduler()
torch.manual_seed(0 )
lowercase__: Tuple = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
lowercase__: Tuple = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
lowercase__: List[str] = CLIPTextModel(_UpperCAmelCase )
lowercase__: int = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
lowercase__: int = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase=0 ):
lowercase__: int = torch.manual_seed(_UpperCAmelCase )
lowercase__: List[Any] = {
'''prompt''': '''a photo of the dolomites''',
'''generator''': generator,
# Setting height and width to None to prevent OOMs on CPU.
'''height''': None,
'''width''': None,
'''num_inference_steps''': 1,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def _snake_case ( self ):
lowercase__: Optional[int] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowercase__: List[str] = self.get_dummy_components()
lowercase__: Union[str, Any] = StableDiffusionPanoramaPipeline(**_UpperCAmelCase )
lowercase__: int = sd_pipe.to(_UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=_UpperCAmelCase )
lowercase__: str = self.get_dummy_inputs(_UpperCAmelCase )
lowercase__: Any = sd_pipe(**_UpperCAmelCase ).images
lowercase__: Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase__: List[str] = np.array([0.6_186, 0.5_374, 0.4_915, 0.4_135, 0.4_114, 0.4_563, 0.5_128, 0.4_977, 0.4_757] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _snake_case ( self ):
super().test_inference_batch_consistent(batch_sizes=[1, 2] )
def _snake_case ( self ):
super().test_inference_batch_single_identical(batch_size=2 , expected_max_diff=3.2_5e-3 )
def _snake_case ( self ):
lowercase__: Optional[Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowercase__: Union[str, Any] = self.get_dummy_components()
lowercase__: str = StableDiffusionPanoramaPipeline(**_UpperCAmelCase )
lowercase__: str = sd_pipe.to(_UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=_UpperCAmelCase )
lowercase__: str = self.get_dummy_inputs(_UpperCAmelCase )
lowercase__: Union[str, Any] = '''french fries'''
lowercase__: Union[str, Any] = sd_pipe(**_UpperCAmelCase , negative_prompt=_UpperCAmelCase )
lowercase__: Optional[Any] = output.images
lowercase__: str = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase__: Optional[int] = np.array([0.6_187, 0.5_375, 0.4_915, 0.4_136, 0.4_114, 0.4_563, 0.5_128, 0.4_976, 0.4_757] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _snake_case ( self ):
lowercase__: Optional[Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowercase__: Union[str, Any] = self.get_dummy_components()
lowercase__: Optional[Any] = StableDiffusionPanoramaPipeline(**_UpperCAmelCase )
lowercase__: str = sd_pipe.to(_UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=_UpperCAmelCase )
lowercase__: Optional[int] = self.get_dummy_inputs(_UpperCAmelCase )
lowercase__: Union[str, Any] = sd_pipe(**_UpperCAmelCase , view_batch_size=2 )
lowercase__: List[str] = output.images
lowercase__: List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase__: List[Any] = np.array([0.6_187, 0.5_375, 0.4_915, 0.4_136, 0.4_114, 0.4_563, 0.5_128, 0.4_976, 0.4_757] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _snake_case ( self ):
lowercase__: Optional[int] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowercase__: int = self.get_dummy_components()
lowercase__: List[str] = EulerAncestralDiscreteScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='''scaled_linear''' )
lowercase__: Any = StableDiffusionPanoramaPipeline(**_UpperCAmelCase )
lowercase__: Any = sd_pipe.to(_UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=_UpperCAmelCase )
lowercase__: int = self.get_dummy_inputs(_UpperCAmelCase )
lowercase__: Dict = sd_pipe(**_UpperCAmelCase ).images
lowercase__: Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase__: List[Any] = np.array([0.4_024, 0.6_510, 0.4_901, 0.5_378, 0.5_813, 0.5_622, 0.4_795, 0.4_467, 0.4_952] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _snake_case ( self ):
lowercase__: int = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowercase__: List[Any] = self.get_dummy_components()
lowercase__: Any = PNDMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , skip_prk_steps=_UpperCAmelCase )
lowercase__: Dict = StableDiffusionPanoramaPipeline(**_UpperCAmelCase )
lowercase__: int = sd_pipe.to(_UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=_UpperCAmelCase )
lowercase__: Optional[int] = self.get_dummy_inputs(_UpperCAmelCase )
lowercase__: Dict = sd_pipe(**_UpperCAmelCase ).images
lowercase__: str = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase__: List[Any] = np.array([0.6_391, 0.6_291, 0.4_861, 0.5_134, 0.5_552, 0.4_578, 0.5_032, 0.5_023, 0.4_539] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class UpperCAmelCase (unittest.TestCase ):
"""simple docstring"""
def _snake_case ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _snake_case ( self , _UpperCAmelCase=0 ):
lowercase__: Union[str, Any] = torch.manual_seed(_UpperCAmelCase )
lowercase__: int = {
'''prompt''': '''a photo of the dolomites''',
'''generator''': generator,
'''num_inference_steps''': 3,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def _snake_case ( self ):
lowercase__: Any = '''stabilityai/stable-diffusion-2-base'''
lowercase__: str = DDIMScheduler.from_pretrained(_UpperCAmelCase , subfolder='''scheduler''' )
lowercase__: Dict = StableDiffusionPanoramaPipeline.from_pretrained(_UpperCAmelCase , scheduler=_UpperCAmelCase , safety_checker=_UpperCAmelCase )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
pipe.enable_attention_slicing()
lowercase__: Tuple = self.get_inputs()
lowercase__: Optional[Any] = pipe(**_UpperCAmelCase ).images
lowercase__: Optional[Any] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 2048, 3)
lowercase__: List[Any] = np.array(
[
0.36_968_392,
0.27_025_372,
0.32_446_766,
0.28_379_387,
0.36_363_274,
0.30_733_347,
0.27_100_027,
0.27_054_125,
0.25_536_096,
] )
assert np.abs(expected_slice - image_slice ).max() < 1e-2
def _snake_case ( self ):
lowercase__: int = StableDiffusionPanoramaPipeline.from_pretrained(
'''stabilityai/stable-diffusion-2-base''' , safety_checker=_UpperCAmelCase )
lowercase__: Tuple = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
pipe.enable_attention_slicing()
lowercase__: List[str] = self.get_inputs()
lowercase__: Dict = pipe(**_UpperCAmelCase ).images
lowercase__: Tuple = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 2048, 3)
lowercase__: List[Any] = np.array(
[
[
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
]
] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def _snake_case ( self ):
lowercase__: int = 0
def callback_fn(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> None:
lowercase__: List[str] = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
lowercase__: Dict = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 256)
lowercase__: Any = latents[0, -3:, -3:, -1]
lowercase__: List[Any] = np.array(
[
0.18_681_869,
0.33_907_816,
0.5_361_276,
0.14_432_865,
-0.02_856_611,
-0.73_941_123,
0.23_397_987,
0.47_322_682,
-0.37_823_164,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
elif step == 2:
lowercase__: Tuple = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 256)
lowercase__: Optional[Any] = latents[0, -3:, -3:, -1]
lowercase__: Any = np.array(
[
0.18_539_645,
0.33_987_248,
0.5_378_559,
0.14_437_142,
-0.02_455_261,
-0.7_338_317,
0.23_990_755,
0.47_356_272,
-0.3_786_505,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
lowercase__: int = False
lowercase__: str = '''stabilityai/stable-diffusion-2-base'''
lowercase__: Union[str, Any] = DDIMScheduler.from_pretrained(_UpperCAmelCase , subfolder='''scheduler''' )
lowercase__: Tuple = StableDiffusionPanoramaPipeline.from_pretrained(_UpperCAmelCase , scheduler=_UpperCAmelCase , safety_checker=_UpperCAmelCase )
lowercase__: Optional[Any] = pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
pipe.enable_attention_slicing()
lowercase__: Tuple = self.get_inputs()
pipe(**_UpperCAmelCase , callback=_UpperCAmelCase , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def _snake_case ( self ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowercase__: List[Any] = '''stabilityai/stable-diffusion-2-base'''
lowercase__: Any = DDIMScheduler.from_pretrained(_UpperCAmelCase , subfolder='''scheduler''' )
lowercase__: int = StableDiffusionPanoramaPipeline.from_pretrained(_UpperCAmelCase , scheduler=_UpperCAmelCase , safety_checker=_UpperCAmelCase )
lowercase__: List[Any] = pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
lowercase__: Any = self.get_inputs()
lowercase__: List[str] = pipe(**_UpperCAmelCase )
lowercase__: Optional[int] = torch.cuda.max_memory_allocated()
# make sure that less than 5.2 GB is allocated
assert mem_bytes < 5.5 * 10**9
| 2 | 0 |
"""simple docstring"""
import os
from tempfile import TemporaryDirectory
from unittest import TestCase
import pytest
from absl.testing import parameterized
from datasets import config
from datasets.arrow_reader import HF_GCP_BASE_URL
from datasets.builder import DatasetBuilder
from datasets.dataset_dict import IterableDatasetDict
from datasets.iterable_dataset import IterableDataset
from datasets.load import dataset_module_factory, import_main_class
from datasets.utils.file_utils import cached_path
__A = [
{"dataset": "wikipedia", "config_name": "20220301.de"},
{"dataset": "wikipedia", "config_name": "20220301.en"},
{"dataset": "wikipedia", "config_name": "20220301.fr"},
{"dataset": "wikipedia", "config_name": "20220301.frr"},
{"dataset": "wikipedia", "config_name": "20220301.it"},
{"dataset": "wikipedia", "config_name": "20220301.simple"},
{"dataset": "snli", "config_name": "plain_text"},
{"dataset": "eli5", "config_name": "LFQA_reddit"},
{"dataset": "wiki40b", "config_name": "en"},
{"dataset": "wiki_dpr", "config_name": "psgs_w100.nq.compressed"},
{"dataset": "wiki_dpr", "config_name": "psgs_w100.nq.no_index"},
{"dataset": "wiki_dpr", "config_name": "psgs_w100.multiset.no_index"},
{"dataset": "natural_questions", "config_name": "default"},
]
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase=True ) -> Optional[int]:
if with_config:
return [
{
"testcase_name": d["dataset"] + "/" + d["config_name"],
"dataset": d["dataset"],
"config_name": d["config_name"],
}
for d in DATASETS_ON_HF_GCP
]
else:
return [
{"testcase_name": dataset, "dataset": dataset} for dataset in {d["dataset"] for d in DATASETS_ON_HF_GCP}
]
@parameterized.named_parameters(list_datasets_on_hf_gcp_parameters(with_config=__lowercase ) )
class UpperCAmelCase (__lowercase ):
"""simple docstring"""
_UpperCAmelCase :int = None
_UpperCAmelCase :int = None
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase ):
with TemporaryDirectory() as tmp_dir:
lowercase__: Union[str, Any] = dataset_module_factory(_a , cache_dir=_a )
lowercase__: Tuple = import_main_class(dataset_module.module_path , dataset=_a )
lowercase__: DatasetBuilder = builder_cls(
cache_dir=_a , config_name=_a , hash=dataset_module.hash , )
lowercase__: Union[str, Any] = '''/'''.join(
[
HF_GCP_BASE_URL,
builder_instance._relative_data_dir(with_hash=_a ).replace(os.sep , '''/''' ),
config.DATASET_INFO_FILENAME,
] )
lowercase__: Dict = cached_path(_a , cache_dir=_a )
self.assertTrue(os.path.exists(_a ) )
@pytest.mark.integration
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> int:
lowercase__: Union[str, Any] = tmp_path_factory.mktemp('''test_hf_gcp''' ) / '''test_wikipedia_simple'''
lowercase__: str = dataset_module_factory('''wikipedia''' , cache_dir=__a )
lowercase__: Dict = import_main_class(dataset_module.module_path )
lowercase__: DatasetBuilder = builder_cls(
cache_dir=__a , config_name='''20220301.frr''' , hash=dataset_module.hash , )
# use the HF cloud storage, not the original download_and_prepare that uses apache-beam
lowercase__: Union[str, Any] = None
builder_instance.download_and_prepare()
lowercase__: Optional[int] = builder_instance.as_dataset()
assert ds
@pytest.mark.integration
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> Dict:
lowercase__: Optional[Any] = dataset_module_factory('''wikipedia''' , cache_dir=__a )
lowercase__: Any = import_main_class(dataset_module.module_path , dataset=__a )
lowercase__: DatasetBuilder = builder_cls(
cache_dir=__a , config_name='''20220301.frr''' , hash=dataset_module.hash , )
lowercase__: Dict = builder_instance.as_streaming_dataset()
assert ds
assert isinstance(__a , __a )
assert "train" in ds
assert isinstance(ds['''train'''] , __a )
assert next(iter(ds['''train'''] ) )
| 364 | """simple docstring"""
import unittest
from transformers import DebertaVaTokenizer, DebertaVaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
__A = get_tests_dir("fixtures/spiece.model")
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase (_UpperCAmelCase ,unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase :Dict = DebertaVaTokenizer
_UpperCAmelCase :Tuple = DebertaVaTokenizerFast
_UpperCAmelCase :int = True
_UpperCAmelCase :int = True
def _snake_case ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
lowercase__: List[Any] = DebertaVaTokenizer(_UpperCAmelCase , unk_token='''<unk>''' )
tokenizer.save_pretrained(self.tmpdirname )
def _snake_case ( self , _UpperCAmelCase ):
lowercase__: List[str] = '''this is a test'''
lowercase__: int = '''this is a test'''
return input_text, output_text
def _snake_case ( self ):
lowercase__: Optional[int] = '''<pad>'''
lowercase__: Optional[int] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_UpperCAmelCase ) , _UpperCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_UpperCAmelCase ) , _UpperCAmelCase )
def _snake_case ( self ):
lowercase__: Union[str, Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<pad>''' )
self.assertEqual(vocab_keys[1] , '''<unk>''' )
self.assertEqual(vocab_keys[-1] , '''[PAD]''' )
self.assertEqual(len(_UpperCAmelCase ) , 30001 )
def _snake_case ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 30000 )
def _snake_case ( self ):
# fmt: off
lowercase__: int = ''' \tHeLLo!how \n Are yoU? '''
lowercase__: List[str] = ['''▁hello''', '''!''', '''how''', '''▁are''', '''▁you''', '''?''']
# fmt: on
lowercase__: Any = DebertaVaTokenizer(_UpperCAmelCase , do_lower_case=_UpperCAmelCase )
lowercase__: Union[str, Any] = tokenizer.convert_ids_to_tokens(tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
lowercase__: Optional[Any] = DebertaVaTokenizerFast(_UpperCAmelCase , do_lower_case=_UpperCAmelCase )
lowercase__: Optional[Any] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
@unittest.skip('''There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.''' )
def _snake_case ( self ):
pass
@unittest.skip('''There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.''' )
def _snake_case ( self ):
pass
def _snake_case ( self ):
# fmt: off
lowercase__: Dict = '''I was born in 92000, and this is falsé.'''
lowercase__: str = ['''▁''', '''<unk>''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', '''▁''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''▁''', '''.''', ]
# fmt: on
lowercase__: Union[str, Any] = DebertaVaTokenizer(_UpperCAmelCase , split_by_punct=_UpperCAmelCase )
lowercase__: str = tokenizer.convert_ids_to_tokens(tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
lowercase__: Tuple = DebertaVaTokenizerFast(_UpperCAmelCase , split_by_punct=_UpperCAmelCase )
lowercase__: Union[str, Any] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
def _snake_case ( self ):
# fmt: off
lowercase__: Any = '''I was born in 92000, and this is falsé.'''
lowercase__: str = ['''▁i''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', '''▁''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''▁''', '''.''', ]
# fmt: on
lowercase__: Optional[int] = DebertaVaTokenizer(_UpperCAmelCase , do_lower_case=_UpperCAmelCase , split_by_punct=_UpperCAmelCase )
lowercase__: List[Any] = tokenizer.convert_ids_to_tokens(tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
lowercase__: Optional[int] = DebertaVaTokenizerFast(_UpperCAmelCase , do_lower_case=_UpperCAmelCase , split_by_punct=_UpperCAmelCase )
lowercase__: Tuple = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
def _snake_case ( self ):
# fmt: off
lowercase__: List[str] = '''I was born in 92000, and this is falsé.'''
lowercase__: List[str] = ['''▁i''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''.''', ]
# fmt: on
lowercase__: Union[str, Any] = DebertaVaTokenizer(_UpperCAmelCase , do_lower_case=_UpperCAmelCase , split_by_punct=_UpperCAmelCase )
lowercase__: Union[str, Any] = tokenizer.convert_ids_to_tokens(tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
lowercase__: int = DebertaVaTokenizerFast(_UpperCAmelCase , do_lower_case=_UpperCAmelCase , split_by_punct=_UpperCAmelCase )
lowercase__: Tuple = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
def _snake_case ( self ):
# fmt: off
lowercase__: Union[str, Any] = '''I was born in 92000, and this is falsé.'''
lowercase__: int = ['''▁''', '''<unk>''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', '''▁''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''▁''', '''.''', ]
# fmt: on
lowercase__: Optional[int] = DebertaVaTokenizer(_UpperCAmelCase , do_lower_case=_UpperCAmelCase , split_by_punct=_UpperCAmelCase )
lowercase__: Dict = tokenizer.convert_ids_to_tokens(tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
lowercase__: Union[str, Any] = DebertaVaTokenizerFast(_UpperCAmelCase , do_lower_case=_UpperCAmelCase , split_by_punct=_UpperCAmelCase )
lowercase__: Dict = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
def _snake_case ( self ):
# fmt: off
lowercase__: Optional[int] = ''' \tHeLLo!how \n Are yoU? '''
lowercase__: str = ['''▁''', '''<unk>''', '''e''', '''<unk>''', '''o''', '''!''', '''how''', '''▁''', '''<unk>''', '''re''', '''▁yo''', '''<unk>''', '''?''']
# fmt: on
lowercase__: Dict = DebertaVaTokenizer(_UpperCAmelCase , do_lower_case=_UpperCAmelCase , split_by_punct=_UpperCAmelCase )
lowercase__: List[str] = tokenizer.convert_ids_to_tokens(tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
lowercase__: Optional[Any] = DebertaVaTokenizerFast(_UpperCAmelCase , do_lower_case=_UpperCAmelCase , split_by_punct=_UpperCAmelCase )
lowercase__: List[str] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
def _snake_case ( self ):
lowercase__: int = self.get_tokenizer()
lowercase__: List[Any] = self.get_rust_tokenizer()
lowercase__: List[str] = '''I was born in 92000, and this is falsé.'''
lowercase__: Any = tokenizer.convert_ids_to_tokens(tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) )
lowercase__: List[str] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
lowercase__: Dict = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
lowercase__: Tuple = rust_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
lowercase__: Any = self.get_rust_tokenizer()
lowercase__: str = tokenizer.encode(_UpperCAmelCase )
lowercase__: Any = rust_tokenizer.encode(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
def _snake_case ( self ):
lowercase__: Optional[Any] = '''This is a test'''
lowercase__: str = [13, 1, 4398, 25, 21, 1289]
lowercase__: List[Any] = ['''▁''', '''T''', '''his''', '''▁is''', '''▁a''', '''▁test''']
lowercase__: Any = ['''▁''', '''<unk>''', '''his''', '''▁is''', '''▁a''', '''▁test''']
lowercase__: int = DebertaVaTokenizer(_UpperCAmelCase , keep_accents=_UpperCAmelCase )
lowercase__: int = DebertaVaTokenizerFast(_UpperCAmelCase , keep_accents=_UpperCAmelCase )
lowercase__: Any = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
lowercase__: str = tokenizer.tokenize(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
lowercase__: Any = tokenizer.convert_ids_to_tokens(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
lowercase__: Union[str, Any] = rust_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
lowercase__: List[Any] = rust_tokenizer.tokenize(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
lowercase__: str = rust_tokenizer.convert_ids_to_tokens(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
# fmt: off
lowercase__: str = '''I was born in 92000, and this is falsé.'''
lowercase__: Dict = [13, 1, 23, 386, 19, 561, 3050, 15, 17, 48, 25, 8256, 18, 1, 9]
lowercase__: Tuple = ['''▁''', '''I''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''é''', '''.''', ]
lowercase__: Dict = ['''▁''', '''<unk>''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''.''', ]
# fmt: on
lowercase__: Optional[Any] = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
lowercase__: Dict = tokenizer.tokenize(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
lowercase__: Optional[Any] = tokenizer.convert_ids_to_tokens(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
lowercase__: List[Any] = rust_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
lowercase__: Dict = rust_tokenizer.tokenize(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
lowercase__: Optional[Any] = rust_tokenizer.convert_ids_to_tokens(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
def _snake_case ( self ):
lowercase__: Optional[int] = DebertaVaTokenizer(_UpperCAmelCase )
lowercase__: Optional[int] = tokenizer.encode('''sequence builders''' )
lowercase__: Optional[Any] = tokenizer.encode('''multi-sequence build''' )
lowercase__: Union[str, Any] = tokenizer.build_inputs_with_special_tokens(_UpperCAmelCase )
lowercase__: Dict = tokenizer.build_inputs_with_special_tokens(_UpperCAmelCase , _UpperCAmelCase )
self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] , _UpperCAmelCase )
self.assertEqual(
[tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [tokenizer.sep_token_id] , _UpperCAmelCase , )
@slow
def _snake_case ( self ):
# fmt: off
lowercase__: List[Any] = {'''input_ids''': [[1, 39867, 36, 19390, 486, 27, 35052, 81436, 18, 60685, 1225, 7, 35052, 81436, 18, 9367, 16899, 18, 15937, 53, 594, 773, 18, 16287, 30465, 36, 15937, 6, 41139, 38, 36979, 60763, 191, 6, 34132, 99, 6, 50538, 390, 43230, 6, 34132, 2779, 20850, 14, 699, 1072, 1194, 36, 382, 10901, 53, 7, 699, 1072, 2084, 36, 20422, 630, 53, 19, 105, 3049, 1896, 1053, 16899, 1506, 11, 37978, 4243, 7, 1237, 31869, 200, 16566, 654, 6, 35052, 81436, 7, 55630, 13593, 4, 2], [1, 26, 15011, 13, 667, 8, 1053, 18, 23611, 1237, 72356, 12820, 34, 104134, 1209, 35, 13313, 6627, 21, 202, 347, 7, 164, 2399, 11, 46, 4485, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 1232, 2864, 15785, 14951, 105, 5, 8581, 1250, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''token_type_ids''': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_UpperCAmelCase , model_name='''microsoft/deberta-v2-xlarge''' , revision='''ad6e42c1532ddf3a15c39246b63f5559d558b670''' , )
| 2 | 0 |
"""simple docstring"""
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
__A = logging.get_logger(__name__)
__A = {
"salesforce/blip2-opt-2.7b": "https://huggingface.co/salesforce/blip2-opt-2.7b/resolve/main/config.json",
}
class UpperCAmelCase (__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_UpperCAmelCase :str = "blip_2_vision_model"
def __init__( self , _UpperCAmelCase=1408 , _UpperCAmelCase=6144 , _UpperCAmelCase=39 , _UpperCAmelCase=16 , _UpperCAmelCase=224 , _UpperCAmelCase=14 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.00_001 , _UpperCAmelCase=0.0 , _UpperCAmelCase=1e-1_0 , _UpperCAmelCase=True , **_UpperCAmelCase , ):
super().__init__(**__UpperCAmelCase )
lowercase__: Union[str, Any] = hidden_size
lowercase__: Any = intermediate_size
lowercase__: Optional[int] = num_hidden_layers
lowercase__: Union[str, Any] = num_attention_heads
lowercase__: List[Any] = patch_size
lowercase__: Tuple = image_size
lowercase__: Tuple = initializer_range
lowercase__: Optional[int] = attention_dropout
lowercase__: Any = layer_norm_eps
lowercase__: Dict = hidden_act
lowercase__: List[Any] = qkv_bias
@classmethod
def _snake_case ( cls , _UpperCAmelCase , **_UpperCAmelCase ):
cls._set_token_in_kwargs(__UpperCAmelCase )
lowercase__: int = cls.get_config_dict(__UpperCAmelCase , **__UpperCAmelCase )
# get the vision config dict if we are loading from Blip2Config
if config_dict.get('''model_type''' ) == "blip-2":
lowercase__: List[str] = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(__UpperCAmelCase , **__UpperCAmelCase )
class UpperCAmelCase (__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_UpperCAmelCase :Dict = "blip_2_qformer"
def __init__( self , _UpperCAmelCase=30522 , _UpperCAmelCase=768 , _UpperCAmelCase=12 , _UpperCAmelCase=12 , _UpperCAmelCase=3072 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=512 , _UpperCAmelCase=0.02 , _UpperCAmelCase=1e-1_2 , _UpperCAmelCase=0 , _UpperCAmelCase="absolute" , _UpperCAmelCase=2 , _UpperCAmelCase=1408 , **_UpperCAmelCase , ):
super().__init__(pad_token_id=__UpperCAmelCase , **__UpperCAmelCase )
lowercase__: Dict = vocab_size
lowercase__: int = hidden_size
lowercase__: Tuple = num_hidden_layers
lowercase__: Optional[int] = num_attention_heads
lowercase__: Any = hidden_act
lowercase__: Dict = intermediate_size
lowercase__: Union[str, Any] = hidden_dropout_prob
lowercase__: Optional[Any] = attention_probs_dropout_prob
lowercase__: Optional[int] = max_position_embeddings
lowercase__: Optional[int] = initializer_range
lowercase__: Dict = layer_norm_eps
lowercase__: Tuple = position_embedding_type
lowercase__: List[str] = cross_attention_frequency
lowercase__: Tuple = encoder_hidden_size
@classmethod
def _snake_case ( cls , _UpperCAmelCase , **_UpperCAmelCase ):
cls._set_token_in_kwargs(__UpperCAmelCase )
lowercase__: Any = cls.get_config_dict(__UpperCAmelCase , **__UpperCAmelCase )
# get the qformer config dict if we are loading from Blip2Config
if config_dict.get('''model_type''' ) == "blip-2":
lowercase__: int = config_dict["""qformer_config"""]
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(__UpperCAmelCase , **__UpperCAmelCase )
class UpperCAmelCase (__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_UpperCAmelCase :List[Any] = "blip-2"
_UpperCAmelCase :str = True
def __init__( self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=32 , **_UpperCAmelCase ):
super().__init__(**__UpperCAmelCase )
if vision_config is None:
lowercase__: Dict = {}
logger.info('''vision_config is None. initializing the Blip2VisionConfig with default values.''' )
if qformer_config is None:
lowercase__: Dict = {}
logger.info('''qformer_config is None. Initializing the Blip2QFormerConfig with default values.''' )
if text_config is None:
lowercase__: Tuple = {}
logger.info('''text_config is None. Initializing the text config with default values (`OPTConfig`).''' )
lowercase__: Dict = BlipaVisionConfig(**__UpperCAmelCase )
lowercase__: str = BlipaQFormerConfig(**__UpperCAmelCase )
lowercase__: Tuple = text_config["""model_type"""] if """model_type""" in text_config else """opt"""
lowercase__: str = CONFIG_MAPPING[text_model_type](**__UpperCAmelCase )
lowercase__: str = self.text_config.tie_word_embeddings
lowercase__: Any = self.text_config.is_encoder_decoder
lowercase__: Dict = num_query_tokens
lowercase__: str = self.vision_config.hidden_size
lowercase__: Optional[int] = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
lowercase__: List[Any] = 1.0
lowercase__: Optional[Any] = 0.02
@classmethod
def _snake_case ( cls , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase , ):
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **__UpperCAmelCase , )
def _snake_case ( self ):
lowercase__: str = copy.deepcopy(self.__dict__ )
lowercase__: str = self.vision_config.to_dict()
lowercase__: Tuple = self.qformer_config.to_dict()
lowercase__: str = self.text_config.to_dict()
lowercase__: Tuple = self.__class__.model_type
return output
| 365 | """simple docstring"""
import unittest
from transformers import DonutProcessor
__A = "naver-clova-ix/donut-base"
class UpperCAmelCase (unittest.TestCase ):
"""simple docstring"""
def _snake_case ( self ):
lowercase__: int = DonutProcessor.from_pretrained(_UpperCAmelCase )
def _snake_case ( self ):
lowercase__: Tuple = {
'''name''': '''John Doe''',
'''age''': '''99''',
'''city''': '''Atlanta''',
'''state''': '''GA''',
'''zip''': '''30301''',
'''phone''': '''123-4567''',
'''nicknames''': [{'''nickname''': '''Johnny'''}, {'''nickname''': '''JD'''}],
}
lowercase__: Union[str, Any] = (
'''<s_name>John Doe</s_name><s_age>99</s_age><s_city>Atlanta</s_city>'''
'''<s_state>GA</s_state><s_zip>30301</s_zip><s_phone>123-4567</s_phone>'''
'''<s_nicknames><s_nickname>Johnny</s_nickname>'''
'''<sep/><s_nickname>JD</s_nickname></s_nicknames>'''
)
lowercase__: str = self.processor.tokenajson(_UpperCAmelCase )
self.assertDictEqual(_UpperCAmelCase , _UpperCAmelCase )
| 2 | 0 |
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> str:
return " ".join(
''''''.join(word[::-1] ) if len(a_ ) > 4 else word for word in sentence.split() )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(reverse_long_words("Hey wollef sroirraw"))
| 366 | """simple docstring"""
import warnings
from ...utils import logging
from .image_processing_videomae import VideoMAEImageProcessor
__A = logging.get_logger(__name__)
class UpperCAmelCase (_UpperCAmelCase ):
"""simple docstring"""
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ):
warnings.warn(
'''The class VideoMAEFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use VideoMAEImageProcessor instead.''' , _UpperCAmelCase , )
super().__init__(*_UpperCAmelCase , **_UpperCAmelCase )
| 2 | 0 |
import argparse
import json
import os
import pickle
import shutil
import numpy as np
import torch
from distiller import Distiller
from lm_seqs_dataset import LmSeqsDataset
from transformers import (
BertConfig,
BertForMaskedLM,
BertTokenizer,
DistilBertConfig,
DistilBertForMaskedLM,
DistilBertTokenizer,
GPTaConfig,
GPTaLMHeadModel,
GPTaTokenizer,
RobertaConfig,
RobertaForMaskedLM,
RobertaTokenizer,
)
from utils import git_log, init_gpu_params, logger, set_seed
__A = {
'distilbert': (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer),
'roberta': (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer),
'bert': (BertConfig, BertForMaskedLM, BertTokenizer),
'gpt2': (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer),
}
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> List[Any]:
assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0)
assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0)
if args.mlm:
assert os.path.isfile(args.token_counts )
assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"])
else:
assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"])
assert args.teacher_type == args.student_type or (
args.student_type == "distilbert" and args.teacher_type == "bert"
)
assert os.path.isfile(args.student_config )
if args.student_pretrained_weights is not None:
assert os.path.isfile(args.student_pretrained_weights )
if args.freeze_token_type_embds:
assert args.student_type in ["roberta"]
assert args.alpha_ce >= 0.0
assert args.alpha_mlm >= 0.0
assert args.alpha_clm >= 0.0
assert args.alpha_mse >= 0.0
assert args.alpha_cos >= 0.0
assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase ) -> int:
if args.student_type == "roberta":
lowercase__: Dict = False
elif args.student_type == "gpt2":
lowercase__: Optional[int] = False
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase ) -> int:
if args.student_type == "roberta":
lowercase__: List[Any] = False
def SCREAMING_SNAKE_CASE__ ( ) -> str:
lowercase__: Dict = argparse.ArgumentParser(description='''Training''' )
parser.add_argument('''--force''' , action='''store_true''' , help='''Overwrite dump_path if it already exists.''' )
parser.add_argument(
'''--dump_path''' , type=_UpperCamelCase , required=_UpperCamelCase , help='''The output directory (log, checkpoints, parameters, etc.)''' )
parser.add_argument(
'''--data_file''' , type=_UpperCamelCase , required=_UpperCamelCase , help='''The binarized file (tokenized + tokens_to_ids) and grouped by sequence.''' , )
parser.add_argument(
'''--student_type''' , type=_UpperCamelCase , choices=['''distilbert''', '''roberta''', '''gpt2'''] , required=_UpperCamelCase , help='''The student type (DistilBERT, RoBERTa).''' , )
parser.add_argument('''--student_config''' , type=_UpperCamelCase , required=_UpperCamelCase , help='''Path to the student configuration.''' )
parser.add_argument(
'''--student_pretrained_weights''' , default=_UpperCamelCase , type=_UpperCamelCase , help='''Load student initialization checkpoint.''' )
parser.add_argument(
'''--teacher_type''' , choices=['''bert''', '''roberta''', '''gpt2'''] , required=_UpperCamelCase , help='''Teacher type (BERT, RoBERTa).''' )
parser.add_argument('''--teacher_name''' , type=_UpperCamelCase , required=_UpperCamelCase , help='''The teacher model.''' )
parser.add_argument('''--temperature''' , default=2.0 , type=_UpperCamelCase , help='''Temperature for the softmax temperature.''' )
parser.add_argument(
'''--alpha_ce''' , default=0.5 , type=_UpperCamelCase , help='''Linear weight for the distillation loss. Must be >=0.''' )
parser.add_argument(
'''--alpha_mlm''' , default=0.0 , type=_UpperCamelCase , help='''Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag.''' , )
parser.add_argument('''--alpha_clm''' , default=0.5 , type=_UpperCamelCase , help='''Linear weight for the CLM loss. Must be >=0.''' )
parser.add_argument('''--alpha_mse''' , default=0.0 , type=_UpperCamelCase , help='''Linear weight of the MSE loss. Must be >=0.''' )
parser.add_argument(
'''--alpha_cos''' , default=0.0 , type=_UpperCamelCase , help='''Linear weight of the cosine embedding loss. Must be >=0.''' )
parser.add_argument(
'''--mlm''' , action='''store_true''' , help='''The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM.''' )
parser.add_argument(
'''--mlm_mask_prop''' , default=0.1_5 , type=_UpperCamelCase , help='''Proportion of tokens for which we need to make a prediction.''' , )
parser.add_argument('''--word_mask''' , default=0.8 , type=_UpperCamelCase , help='''Proportion of tokens to mask out.''' )
parser.add_argument('''--word_keep''' , default=0.1 , type=_UpperCamelCase , help='''Proportion of tokens to keep.''' )
parser.add_argument('''--word_rand''' , default=0.1 , type=_UpperCamelCase , help='''Proportion of tokens to randomly replace.''' )
parser.add_argument(
'''--mlm_smoothing''' , default=0.7 , type=_UpperCamelCase , help='''Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec).''' , )
parser.add_argument('''--token_counts''' , type=_UpperCamelCase , help='''The token counts in the data_file for MLM.''' )
parser.add_argument(
'''--restrict_ce_to_mask''' , action='''store_true''' , help='''If true, compute the distillation loss only the [MLM] prediction distribution.''' , )
parser.add_argument(
'''--freeze_pos_embs''' , action='''store_true''' , help='''Freeze positional embeddings during distillation. For student_type in [\'roberta\', \'gpt2\'] only.''' , )
parser.add_argument(
'''--freeze_token_type_embds''' , action='''store_true''' , help='''Freeze token type embeddings during distillation if existent. For student_type in [\'roberta\'] only.''' , )
parser.add_argument('''--n_epoch''' , type=_UpperCamelCase , default=3 , help='''Number of pass on the whole dataset.''' )
parser.add_argument('''--batch_size''' , type=_UpperCamelCase , default=5 , help='''Batch size (for each process).''' )
parser.add_argument(
'''--group_by_size''' , action='''store_false''' , help='''If true, group sequences that have similar length into the same batch. Default is true.''' , )
parser.add_argument(
'''--gradient_accumulation_steps''' , type=_UpperCamelCase , default=5_0 , help='''Gradient accumulation for larger training batches.''' , )
parser.add_argument('''--warmup_prop''' , default=0.0_5 , type=_UpperCamelCase , help='''Linear warmup proportion.''' )
parser.add_argument('''--weight_decay''' , default=0.0 , type=_UpperCamelCase , help='''Weight decay if we apply some.''' )
parser.add_argument('''--learning_rate''' , default=5e-4 , type=_UpperCamelCase , help='''The initial learning rate for Adam.''' )
parser.add_argument('''--adam_epsilon''' , default=1e-6 , type=_UpperCamelCase , help='''Epsilon for Adam optimizer.''' )
parser.add_argument('''--max_grad_norm''' , default=5.0 , type=_UpperCamelCase , help='''Max gradient norm.''' )
parser.add_argument('''--initializer_range''' , default=0.0_2 , type=_UpperCamelCase , help='''Random initialization range.''' )
parser.add_argument(
'''--fp16''' , action='''store_true''' , help='''Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit''' , )
parser.add_argument(
'''--fp16_opt_level''' , type=_UpperCamelCase , default='''O1''' , help=(
'''For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].'''
'''See details at https://nvidia.github.io/apex/amp.html'''
) , )
parser.add_argument('''--n_gpu''' , type=_UpperCamelCase , default=1 , help='''Number of GPUs in the node.''' )
parser.add_argument('''--local_rank''' , type=_UpperCamelCase , default=-1 , help='''Distributed training - Local rank''' )
parser.add_argument('''--seed''' , type=_UpperCamelCase , default=5_6 , help='''Random seed''' )
parser.add_argument('''--log_interval''' , type=_UpperCamelCase , default=5_0_0 , help='''Tensorboard logging interval.''' )
parser.add_argument('''--checkpoint_interval''' , type=_UpperCamelCase , default=4_0_0_0 , help='''Checkpoint interval.''' )
lowercase__: List[str] = parser.parse_args()
sanity_checks(_UpperCamelCase )
# ARGS #
init_gpu_params(_UpperCamelCase )
set_seed(_UpperCamelCase )
if args.is_master:
if os.path.exists(args.dump_path ):
if not args.force:
raise ValueError(
F"""Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite"""
''' itUse `--force` if you want to overwrite it''' )
else:
shutil.rmtree(args.dump_path )
if not os.path.exists(args.dump_path ):
os.makedirs(args.dump_path )
logger.info(F"""Experiment will be dumped and logged in {args.dump_path}""" )
# SAVE PARAMS #
logger.info(F"""Param: {args}""" )
with open(os.path.join(args.dump_path , '''parameters.json''' ) , '''w''' ) as f:
json.dump(vars(_UpperCamelCase ) , _UpperCamelCase , indent=4 )
git_log(args.dump_path )
lowercase__, lowercase__, lowercase__: Tuple = MODEL_CLASSES[args.student_type]
lowercase__, lowercase__, lowercase__: Tuple = MODEL_CLASSES[args.teacher_type]
# TOKENIZER #
lowercase__: List[str] = teacher_tokenizer_class.from_pretrained(args.teacher_name )
lowercase__: int = {}
for tok_name, tok_symbol in tokenizer.special_tokens_map.items():
lowercase__: str = tokenizer.all_special_tokens.index(_UpperCamelCase )
lowercase__: Union[str, Any] = tokenizer.all_special_ids[idx]
logger.info(F"""Special tokens {special_tok_ids}""" )
lowercase__: str = special_tok_ids
lowercase__: Tuple = tokenizer.max_model_input_sizes[args.teacher_name]
# DATA LOADER #
logger.info(F"""Loading data from {args.data_file}""" )
with open(args.data_file , '''rb''' ) as fp:
lowercase__: Dict = pickle.load(_UpperCamelCase )
if args.mlm:
logger.info(F"""Loading token counts from {args.token_counts} (already pre-computed)""" )
with open(args.token_counts , '''rb''' ) as fp:
lowercase__: int = pickle.load(_UpperCamelCase )
lowercase__: Any = np.maximum(_UpperCamelCase , 1 ) ** -args.mlm_smoothing
for idx in special_tok_ids.values():
lowercase__: int = 0.0 # do not predict special tokens
lowercase__: Optional[int] = torch.from_numpy(_UpperCamelCase )
else:
lowercase__: Any = None
lowercase__: int = LmSeqsDataset(params=_UpperCamelCase , data=_UpperCamelCase )
logger.info('''Data loader created.''' )
# STUDENT #
logger.info(F"""Loading student config from {args.student_config}""" )
lowercase__: int = student_config_class.from_pretrained(args.student_config )
lowercase__: Tuple = True
if args.student_pretrained_weights is not None:
logger.info(F"""Loading pretrained weights from {args.student_pretrained_weights}""" )
lowercase__: Dict = student_model_class.from_pretrained(args.student_pretrained_weights , config=_UpperCamelCase )
else:
lowercase__: Optional[Any] = student_model_class(_UpperCamelCase )
if args.n_gpu > 0:
student.to(F"""cuda:{args.local_rank}""" )
logger.info('''Student loaded.''' )
# TEACHER #
lowercase__: Tuple = teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=_UpperCamelCase )
if args.n_gpu > 0:
teacher.to(F"""cuda:{args.local_rank}""" )
logger.info(F"""Teacher loaded from {args.teacher_name}.""" )
# FREEZING #
if args.freeze_pos_embs:
freeze_pos_embeddings(_UpperCamelCase , _UpperCamelCase )
if args.freeze_token_type_embds:
freeze_token_type_embeddings(_UpperCamelCase , _UpperCamelCase )
# SANITY CHECKS #
assert student.config.vocab_size == teacher.config.vocab_size
assert student.config.hidden_size == teacher.config.hidden_size
assert student.config.max_position_embeddings == teacher.config.max_position_embeddings
if args.mlm:
assert token_probs.size(0 ) == stu_architecture_config.vocab_size
# DISTILLER #
torch.cuda.empty_cache()
lowercase__: str = Distiller(
params=_UpperCamelCase , dataset=_UpperCamelCase , token_probs=_UpperCamelCase , student=_UpperCamelCase , teacher=_UpperCamelCase )
distiller.train()
logger.info('''Let\'s go get some drinks.''' )
if __name__ == "__main__":
main()
| 367 | """simple docstring"""
import math
from typing import Any, Callable, List, Optional, Tuple, Union
import numpy as np
import torch
from ...models import TaFilmDecoder
from ...schedulers import DDPMScheduler
from ...utils import is_onnx_available, logging, randn_tensor
if is_onnx_available():
from ..onnx_utils import OnnxRuntimeModel
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
from .continous_encoder import SpectrogramContEncoder
from .notes_encoder import SpectrogramNotesEncoder
__A = logging.get_logger(__name__) # pylint: disable=invalid-name
__A = 2_5_6
class UpperCAmelCase (_UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase :int = ["melgan"]
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ):
super().__init__()
# From MELGAN
lowercase__: Union[str, Any] = math.log(1e-5 ) # Matches MelGAN training.
lowercase__: Union[str, Any] = 4.0 # Largest value for most examples
lowercase__: Union[str, Any] = 128
self.register_modules(
notes_encoder=_UpperCAmelCase , continuous_encoder=_UpperCAmelCase , decoder=_UpperCAmelCase , scheduler=_UpperCAmelCase , melgan=_UpperCAmelCase , )
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase=(-1.0, 1.0) , _UpperCAmelCase=False ):
lowercase__, lowercase__: int = output_range
if clip:
lowercase__: Any = torch.clip(_UpperCAmelCase , self.min_value , self.max_value )
# Scale to [0, 1].
lowercase__: Optional[int] = (features - self.min_value) / (self.max_value - self.min_value)
# Scale to [min_out, max_out].
return zero_one * (max_out - min_out) + min_out
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase=(-1.0, 1.0) , _UpperCAmelCase=False ):
lowercase__, lowercase__: str = input_range
lowercase__: Dict = torch.clip(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) if clip else outputs
# Scale to [0, 1].
lowercase__: Tuple = (outputs - min_out) / (max_out - min_out)
# Scale to [self.min_value, self.max_value].
return zero_one * (self.max_value - self.min_value) + self.min_value
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
lowercase__: List[str] = input_tokens > 0
lowercase__, lowercase__: str = self.notes_encoder(
encoder_input_tokens=_UpperCAmelCase , encoder_inputs_mask=_UpperCAmelCase )
lowercase__, lowercase__: Optional[int] = self.continuous_encoder(
encoder_inputs=_UpperCAmelCase , encoder_inputs_mask=_UpperCAmelCase )
return [(tokens_encoded, tokens_mask), (continuous_encoded, continuous_mask)]
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
lowercase__: Tuple = noise_time
if not torch.is_tensor(_UpperCAmelCase ):
lowercase__: Tuple = torch.tensor([timesteps] , dtype=torch.long , device=input_tokens.device )
elif torch.is_tensor(_UpperCAmelCase ) and len(timesteps.shape ) == 0:
lowercase__: str = timesteps[None].to(input_tokens.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
lowercase__: Dict = timesteps * torch.ones(input_tokens.shape[0] , dtype=timesteps.dtype , device=timesteps.device )
lowercase__: Union[str, Any] = self.decoder(
encodings_and_masks=_UpperCAmelCase , decoder_input_tokens=_UpperCAmelCase , decoder_noise_time=_UpperCAmelCase )
return logits
@torch.no_grad()
def __call__( self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = 100 , _UpperCAmelCase = True , _UpperCAmelCase = "numpy" , _UpperCAmelCase = None , _UpperCAmelCase = 1 , ):
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(_UpperCAmelCase , _UpperCAmelCase ) or callback_steps <= 0)
):
raise ValueError(
F"""`callback_steps` has to be a positive integer but is {callback_steps} of type"""
F""" {type(_UpperCAmelCase )}.""" )
lowercase__: List[str] = np.zeros([1, TARGET_FEATURE_LENGTH, self.n_dims] , dtype=np.floataa )
lowercase__: Any = np.zeros([1, 0, self.n_dims] , np.floataa )
lowercase__: Tuple = torch.ones((1, TARGET_FEATURE_LENGTH) , dtype=_UpperCAmelCase , device=self.device )
for i, encoder_input_tokens in enumerate(_UpperCAmelCase ):
if i == 0:
lowercase__: str = torch.from_numpy(pred_mel[:1].copy() ).to(
device=self.device , dtype=self.decoder.dtype )
# The first chunk has no previous context.
lowercase__: Optional[int] = torch.zeros((1, TARGET_FEATURE_LENGTH) , dtype=_UpperCAmelCase , device=self.device )
else:
# The full song pipeline does not feed in a context feature, so the mask
# will be all 0s after the feature converter. Because we know we're
# feeding in a full context chunk from the previous prediction, set it
# to all 1s.
lowercase__: Union[str, Any] = ones
lowercase__: str = self.scale_features(
_UpperCAmelCase , output_range=[-1.0, 1.0] , clip=_UpperCAmelCase )
lowercase__: Dict = self.encode(
input_tokens=torch.IntTensor([encoder_input_tokens] ).to(device=self.device ) , continuous_inputs=_UpperCAmelCase , continuous_mask=_UpperCAmelCase , )
# Sample encoder_continuous_inputs shaped gaussian noise to begin loop
lowercase__: int = randn_tensor(
shape=encoder_continuous_inputs.shape , generator=_UpperCAmelCase , device=self.device , dtype=self.decoder.dtype , )
# set step values
self.scheduler.set_timesteps(_UpperCAmelCase )
# Denoising diffusion loop
for j, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
lowercase__: List[Any] = self.decode(
encodings_and_masks=_UpperCAmelCase , input_tokens=_UpperCAmelCase , noise_time=t / self.scheduler.config.num_train_timesteps , )
# Compute previous output: x_t -> x_t-1
lowercase__: Union[str, Any] = self.scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , generator=_UpperCAmelCase ).prev_sample
lowercase__: int = self.scale_to_features(_UpperCAmelCase , input_range=[-1.0, 1.0] )
lowercase__: Dict = mel[:1]
lowercase__: List[Any] = mel.cpu().float().numpy()
lowercase__: Optional[int] = np.concatenate([full_pred_mel, pred_mel[:1]] , axis=1 )
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(_UpperCAmelCase , _UpperCAmelCase )
logger.info('''Generated segment''' , _UpperCAmelCase )
if output_type == "numpy" and not is_onnx_available():
raise ValueError(
'''Cannot return output in \'np\' format if ONNX is not available. Make sure to have ONNX installed or set \'output_type\' to \'mel\'.''' )
elif output_type == "numpy" and self.melgan is None:
raise ValueError(
'''Cannot return output in \'np\' format if melgan component is not defined. Make sure to define `self.melgan` or set \'output_type\' to \'mel\'.''' )
if output_type == "numpy":
lowercase__: Tuple = self.melgan(input_features=full_pred_mel.astype(np.floataa ) )
else:
lowercase__: Dict = full_pred_mel
if not return_dict:
return (output,)
return AudioPipelineOutput(audios=_UpperCAmelCase )
| 2 | 0 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionSAGPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class UpperCAmelCase (a__ ,a__ ,unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase :Tuple = StableDiffusionSAGPipeline
_UpperCAmelCase :Dict = TEXT_TO_IMAGE_PARAMS
_UpperCAmelCase :Tuple = TEXT_TO_IMAGE_BATCH_PARAMS
_UpperCAmelCase :List[Any] = TEXT_TO_IMAGE_IMAGE_PARAMS
_UpperCAmelCase :Union[str, Any] = TEXT_TO_IMAGE_IMAGE_PARAMS
_UpperCAmelCase :int = False
def _snake_case ( self ):
torch.manual_seed(0 )
lowercase__: Tuple = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
lowercase__: int = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=_lowerCamelCase , set_alpha_to_one=_lowerCamelCase , )
torch.manual_seed(0 )
lowercase__: str = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
lowercase__: Dict = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
lowercase__: Union[str, Any] = CLIPTextModel(_lowerCamelCase )
lowercase__: str = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
lowercase__: Union[str, Any] = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase=0 ):
if str(_lowerCamelCase ).startswith('''mps''' ):
lowercase__: List[Any] = torch.manual_seed(_lowerCamelCase )
else:
lowercase__: List[Any] = torch.Generator(device=_lowerCamelCase ).manual_seed(_lowerCamelCase )
lowercase__: str = {
'''prompt''': '''.''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 1.0,
'''sag_scale''': 1.0,
'''output_type''': '''numpy''',
}
return inputs
def _snake_case ( self ):
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class UpperCAmelCase (unittest.TestCase ):
"""simple docstring"""
def _snake_case ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _snake_case ( self ):
lowercase__: Optional[Any] = StableDiffusionSAGPipeline.from_pretrained('''CompVis/stable-diffusion-v1-4''' )
lowercase__: Tuple = sag_pipe.to(_lowerCamelCase )
sag_pipe.set_progress_bar_config(disable=_lowerCamelCase )
lowercase__: List[str] = '''.'''
lowercase__: Dict = torch.manual_seed(0 )
lowercase__: List[str] = sag_pipe(
[prompt] , generator=_lowerCamelCase , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='''np''' )
lowercase__: int = output.images
lowercase__: Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowercase__: Optional[int] = np.array([0.1_568, 0.1_738, 0.1_695, 0.1_693, 0.1_507, 0.1_705, 0.1_547, 0.1_751, 0.1_949] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-2
def _snake_case ( self ):
lowercase__: Union[str, Any] = StableDiffusionSAGPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' )
lowercase__: int = sag_pipe.to(_lowerCamelCase )
sag_pipe.set_progress_bar_config(disable=_lowerCamelCase )
lowercase__: Dict = '''.'''
lowercase__: str = torch.manual_seed(0 )
lowercase__: Any = sag_pipe(
[prompt] , generator=_lowerCamelCase , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='''np''' )
lowercase__: List[str] = output.images
lowercase__: List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowercase__: str = np.array([0.3_459, 0.2_876, 0.2_537, 0.3_002, 0.2_671, 0.2_160, 0.3_026, 0.2_262, 0.2_371] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-2
def _snake_case ( self ):
lowercase__: int = StableDiffusionSAGPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' )
lowercase__: Optional[int] = sag_pipe.to(_lowerCamelCase )
sag_pipe.set_progress_bar_config(disable=_lowerCamelCase )
lowercase__: int = '''.'''
lowercase__: Optional[Any] = torch.manual_seed(0 )
lowercase__: List[str] = sag_pipe(
[prompt] , width=768 , height=512 , generator=_lowerCamelCase , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='''np''' , )
lowercase__: List[Any] = output.images
assert image.shape == (1, 512, 768, 3)
| 368 | """simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional
from packaging import version
if TYPE_CHECKING:
from ... import PreTrainedTokenizer, TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import is_torch_available, logging
__A = logging.get_logger(__name__)
__A = {
"bigscience/bloom": "https://huggingface.co/bigscience/bloom/resolve/main/config.json",
"bigscience/bloom-560m": "https://huggingface.co/bigscience/bloom-560m/blob/main/config.json",
"bigscience/bloom-1b1": "https://huggingface.co/bigscience/bloom-1b1/blob/main/config.json",
"bigscience/bloom-1b7": "https://huggingface.co/bigscience/bloom-1b7/blob/main/config.json",
"bigscience/bloom-3b": "https://huggingface.co/bigscience/bloom-3b/blob/main/config.json",
"bigscience/bloom-7b1": "https://huggingface.co/bigscience/bloom-7b1/blob/main/config.json",
}
class UpperCAmelCase (_UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase :str = "bloom"
_UpperCAmelCase :List[str] = ["past_key_values"]
_UpperCAmelCase :Optional[Any] = {
"num_hidden_layers": "n_layer",
"num_attention_heads": "n_head",
}
def __init__( self , _UpperCAmelCase=250880 , _UpperCAmelCase=64 , _UpperCAmelCase=2 , _UpperCAmelCase=8 , _UpperCAmelCase=1e-5 , _UpperCAmelCase=0.02 , _UpperCAmelCase=True , _UpperCAmelCase=1 , _UpperCAmelCase=2 , _UpperCAmelCase=False , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.0 , _UpperCAmelCase=1 , _UpperCAmelCase=False , **_UpperCAmelCase , ):
lowercase__: Any = vocab_size
# Backward compatibility with n_embed kwarg
lowercase__: Optional[Any] = kwargs.pop('''n_embed''' , _UpperCAmelCase )
lowercase__: int = hidden_size if n_embed is None else n_embed
lowercase__: int = n_layer
lowercase__: int = n_head
lowercase__: Optional[Any] = layer_norm_epsilon
lowercase__: int = initializer_range
lowercase__: List[Any] = use_cache
lowercase__: str = pretraining_tp
lowercase__: Tuple = apply_residual_connection_post_layernorm
lowercase__: int = hidden_dropout
lowercase__: Optional[Any] = attention_dropout
lowercase__: int = bos_token_id
lowercase__: Union[str, Any] = eos_token_id
lowercase__: Any = slow_but_exact
super().__init__(bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase )
class UpperCAmelCase (_UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase :int = version.parse("1.12" )
def __init__( self , _UpperCAmelCase , _UpperCAmelCase = "default" , _UpperCAmelCase = None , _UpperCAmelCase = False , ):
super().__init__(_UpperCAmelCase , task=_UpperCAmelCase , patching_specs=_UpperCAmelCase , use_past=_UpperCAmelCase )
if not getattr(self._config , '''pad_token_id''' , _UpperCAmelCase ):
# TODO: how to do that better?
lowercase__: Any = 0
@property
def _snake_case ( self ):
lowercase__: str = OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}} )
if self.use_past:
# BLOOM stores values on dynamic axis 2. For more details see: https://github.com/huggingface/transformers/pull/18344
self.fill_with_past_key_values_(_UpperCAmelCase , direction='''inputs''' , inverted_values_shape=_UpperCAmelCase )
lowercase__: List[str] = {0: '''batch''', 1: '''past_sequence + sequence'''}
else:
lowercase__: str = {0: '''batch''', 1: '''sequence'''}
return common_inputs
@property
def _snake_case ( self ):
return self._config.n_layer
@property
def _snake_case ( self ):
return self._config.n_head
@property
def _snake_case ( self ):
return 1e-3
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase = -1 , _UpperCAmelCase = -1 , _UpperCAmelCase = False , _UpperCAmelCase = None , ):
lowercase__: str = super(_UpperCAmelCase , self ).generate_dummy_inputs(
_UpperCAmelCase , batch_size=_UpperCAmelCase , seq_length=_UpperCAmelCase , is_pair=_UpperCAmelCase , framework=_UpperCAmelCase )
# We need to order the input in the way they appears in the forward()
lowercase__: List[Any] = OrderedDict({'''input_ids''': common_inputs['''input_ids''']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
lowercase__, lowercase__: Optional[Any] = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
lowercase__: Tuple = seqlen + 2
lowercase__: str = self._config.hidden_size // self.num_attention_heads
lowercase__: Optional[int] = (
batch * self.num_attention_heads,
head_dim,
past_key_values_length,
)
lowercase__: Union[str, Any] = (
batch * self.num_attention_heads,
past_key_values_length,
head_dim,
)
lowercase__: str = [
(torch.zeros(_UpperCAmelCase ), torch.zeros(_UpperCAmelCase )) for _ in range(self.num_layers )
]
lowercase__: Tuple = common_inputs['''attention_mask''']
if self.use_past:
lowercase__: int = ordered_inputs['''attention_mask'''].dtype
lowercase__: List[str] = torch.cat(
[ordered_inputs['''attention_mask'''], torch.ones(_UpperCAmelCase , _UpperCAmelCase , dtype=_UpperCAmelCase )] , dim=1 )
return ordered_inputs
@property
def _snake_case ( self ):
return 13
| 2 | 0 |
"""simple docstring"""
from typing import Dict
import numpy as np
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline, PipelineException
if is_tf_available():
import tensorflow as tf
from ..tf_utils import stable_softmax
if is_torch_available():
import torch
__A = logging.get_logger(__name__)
@add_end_docstrings(
__UpperCamelCase ,R"\n top_k (`int`, defaults to 5):\n The number of predictions to return.\n targets (`str` or `List[str]`, *optional*):\n When passed, the model will limit the scores to the passed targets instead of looking up in the whole\n vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting\n token will be used (with a warning, and that might be slower).\n\n " ,)
class UpperCAmelCase (__UpperCamelCase ):
"""simple docstring"""
def _snake_case ( self , _UpperCAmelCase ):
if self.framework == "tf":
lowercase__: List[str] = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()
elif self.framework == "pt":
lowercase__: Optional[Any] = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=_lowerCAmelCase )
else:
raise ValueError('''Unsupported framework''' )
return masked_index
def _snake_case ( self , _UpperCAmelCase ):
lowercase__: List[str] = self.get_masked_index(_lowerCAmelCase )
lowercase__: Any = np.prod(masked_index.shape )
if numel < 1:
raise PipelineException(
'''fill-mask''' , self.model.base_model_prefix , F"""No mask_token ({self.tokenizer.mask_token}) found on the input""" , )
def _snake_case ( self , _UpperCAmelCase ):
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
for model_input in model_inputs:
self._ensure_exactly_one_mask_token(model_input['''input_ids'''][0] )
else:
for input_ids in model_inputs["input_ids"]:
self._ensure_exactly_one_mask_token(_lowerCAmelCase )
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase=None , **_UpperCAmelCase ):
if return_tensors is None:
lowercase__: Union[str, Any] = self.framework
lowercase__: Any = self.tokenizer(_lowerCAmelCase , return_tensors=_lowerCAmelCase )
self.ensure_exactly_one_mask_token(_lowerCAmelCase )
return model_inputs
def _snake_case ( self , _UpperCAmelCase ):
lowercase__: List[str] = self.model(**_lowerCAmelCase )
lowercase__: Optional[int] = model_inputs["""input_ids"""]
return model_outputs
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase=5 , _UpperCAmelCase=None ):
# Cap top_k if there are targets
if target_ids is not None and target_ids.shape[0] < top_k:
lowercase__: Optional[int] = target_ids.shape[0]
lowercase__: Dict = model_outputs["""input_ids"""][0]
lowercase__: List[str] = model_outputs["""logits"""]
if self.framework == "tf":
lowercase__: str = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()[:, 0]
lowercase__: Optional[Any] = outputs.numpy()
lowercase__: Optional[int] = outputs[0, masked_index, :]
lowercase__: int = stable_softmax(_lowerCAmelCase , axis=-1 )
if target_ids is not None:
lowercase__: Any = tf.gather_nd(tf.squeeze(_lowerCAmelCase , 0 ) , target_ids.reshape(-1 , 1 ) )
lowercase__: Any = tf.expand_dims(_lowerCAmelCase , 0 )
lowercase__: Tuple = tf.math.top_k(_lowerCAmelCase , k=_lowerCAmelCase )
lowercase__: List[Any] = topk.values.numpy(), topk.indices.numpy()
else:
lowercase__: Union[str, Any] = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=_lowerCAmelCase ).squeeze(-1 )
# Fill mask pipeline supports only one ${mask_token} per sample
lowercase__: Dict = outputs[0, masked_index, :]
lowercase__: str = logits.softmax(dim=-1 )
if target_ids is not None:
lowercase__: Optional[Any] = probs[..., target_ids]
lowercase__: Tuple = probs.topk(_lowerCAmelCase )
lowercase__: Tuple = []
lowercase__: str = values.shape[0] == 1
for i, (_values, _predictions) in enumerate(zip(values.tolist() , predictions.tolist() ) ):
lowercase__: List[str] = []
for v, p in zip(_values , _predictions ):
# Copy is important since we're going to modify this array in place
lowercase__: Tuple = input_ids.numpy().copy()
if target_ids is not None:
lowercase__: List[str] = target_ids[p].tolist()
lowercase__: Any = p
# Filter padding out:
lowercase__: List[Any] = tokens[np.where(tokens != self.tokenizer.pad_token_id )]
# Originally we skip special tokens to give readable output.
# For multi masks though, the other [MASK] would be removed otherwise
# making the output look odd, so we add them back
lowercase__: int = self.tokenizer.decode(_lowerCAmelCase , skip_special_tokens=_lowerCAmelCase )
lowercase__: str = {"""score""": v, """token""": p, """token_str""": self.tokenizer.decode([p] ), """sequence""": sequence}
row.append(_lowerCAmelCase )
result.append(_lowerCAmelCase )
if single_mask:
return result[0]
return result
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase=None ):
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
lowercase__: Any = [targets]
try:
lowercase__: List[Any] = self.tokenizer.get_vocab()
except Exception:
lowercase__: Optional[Any] = {}
lowercase__: Optional[int] = []
for target in targets:
lowercase__: int = vocab.get(_lowerCAmelCase , _lowerCAmelCase )
if id_ is None:
lowercase__: Tuple = self.tokenizer(
_lowerCAmelCase , add_special_tokens=_lowerCAmelCase , return_attention_mask=_lowerCAmelCase , return_token_type_ids=_lowerCAmelCase , max_length=1 , truncation=_lowerCAmelCase , )["""input_ids"""]
if len(_lowerCAmelCase ) == 0:
logger.warning(
F"""The specified target token `{target}` does not exist in the model vocabulary. """
'''We cannot replace it with anything meaningful, ignoring it''' )
continue
lowercase__: Optional[int] = input_ids[0]
# XXX: If users encounter this pass
# it becomes pretty slow, so let's make sure
# The warning enables them to fix the input to
# get faster performance.
logger.warning(
F"""The specified target token `{target}` does not exist in the model vocabulary. """
F"""Replacing with `{self.tokenizer.convert_ids_to_tokens(id_ )}`.""" )
target_ids.append(id_ )
lowercase__: Optional[Any] = list(set(_lowerCAmelCase ) )
if len(_lowerCAmelCase ) == 0:
raise ValueError('''At least one target must be provided when passed.''' )
lowercase__: Dict = np.array(_lowerCAmelCase )
return target_ids
def _snake_case ( self , _UpperCAmelCase=None , _UpperCAmelCase=None ):
lowercase__: Tuple = {}
if targets is not None:
lowercase__: int = self.get_target_ids(_lowerCAmelCase , _lowerCAmelCase )
lowercase__: Optional[int] = target_ids
if top_k is not None:
lowercase__: Tuple = top_k
if self.tokenizer.mask_token_id is None:
raise PipelineException(
'''fill-mask''' , self.model.base_model_prefix , '''The tokenizer does not define a `mask_token`.''' )
return {}, {}, postprocess_params
def __call__( self , _UpperCAmelCase , *_UpperCAmelCase , **_UpperCAmelCase ):
lowercase__: List[str] = super().__call__(_lowerCAmelCase , **_lowerCAmelCase )
if isinstance(_lowerCAmelCase , _lowerCAmelCase ) and len(_lowerCAmelCase ) == 1:
return outputs[0]
return outputs
| 369 | """simple docstring"""
import unittest
from transformers import MobileBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertModel,
)
class UpperCAmelCase :
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=7 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=99 , _UpperCAmelCase=64 , _UpperCAmelCase=32 , _UpperCAmelCase=5 , _UpperCAmelCase=4 , _UpperCAmelCase=37 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=512 , _UpperCAmelCase=16 , _UpperCAmelCase=2 , _UpperCAmelCase=0.02 , _UpperCAmelCase=3 , _UpperCAmelCase=4 , _UpperCAmelCase=None , ):
lowercase__: Dict = parent
lowercase__: Optional[int] = batch_size
lowercase__: List[str] = seq_length
lowercase__: Optional[int] = is_training
lowercase__: Dict = use_input_mask
lowercase__: List[Any] = use_token_type_ids
lowercase__: List[str] = use_labels
lowercase__: Union[str, Any] = vocab_size
lowercase__: str = hidden_size
lowercase__: Any = embedding_size
lowercase__: Any = num_hidden_layers
lowercase__: Any = num_attention_heads
lowercase__: List[Any] = intermediate_size
lowercase__: Dict = hidden_act
lowercase__: List[Any] = hidden_dropout_prob
lowercase__: Dict = attention_probs_dropout_prob
lowercase__: Optional[int] = max_position_embeddings
lowercase__: List[Any] = type_vocab_size
lowercase__: Tuple = type_sequence_label_size
lowercase__: Optional[int] = initializer_range
lowercase__: Dict = num_labels
lowercase__: int = num_choices
lowercase__: int = scope
def _snake_case ( self ):
lowercase__: List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase__: List[Any] = None
if self.use_input_mask:
lowercase__: Any = random_attention_mask([self.batch_size, self.seq_length] )
lowercase__: List[Any] = None
if self.use_token_type_ids:
lowercase__: Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase__: Optional[Any] = None
lowercase__: Any = None
lowercase__: str = None
if self.use_labels:
lowercase__: Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase__: List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase__: Tuple = ids_tensor([self.batch_size] , self.num_choices )
lowercase__: Any = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _snake_case ( self ):
return MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_UpperCAmelCase , initializer_range=self.initializer_range , )
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
lowercase__: int = MobileBertModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
lowercase__: List[str] = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase )
lowercase__: Dict = model(_UpperCAmelCase , token_type_ids=_UpperCAmelCase )
lowercase__: str = model(_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
lowercase__: Any = MobileBertForMaskedLM(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
lowercase__: Union[str, Any] = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
lowercase__: List[Any] = MobileBertForNextSentencePrediction(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
lowercase__: List[str] = model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
lowercase__: Any = MobileBertForPreTraining(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
lowercase__: str = model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase , next_sentence_label=_UpperCAmelCase , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
lowercase__: Any = MobileBertForQuestionAnswering(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
lowercase__: int = model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , start_positions=_UpperCAmelCase , end_positions=_UpperCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
lowercase__: str = self.num_labels
lowercase__: Any = MobileBertForSequenceClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
lowercase__: Tuple = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
lowercase__: str = self.num_labels
lowercase__: Union[str, Any] = MobileBertForTokenClassification(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
lowercase__: Optional[int] = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
lowercase__: Dict = self.num_choices
lowercase__: Union[str, Any] = MobileBertForMultipleChoice(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
lowercase__: List[str] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase__: Optional[Any] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase__: Optional[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase__: Optional[Any] = model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _snake_case ( self ):
lowercase__: Optional[int] = self.prepare_config_and_inputs()
(
(
lowercase__
), (
lowercase__
), (
lowercase__
), (
lowercase__
), (
lowercase__
), (
lowercase__
), (
lowercase__
),
): Union[str, Any] = config_and_inputs
lowercase__: Optional[Any] = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase (_UpperCAmelCase ,_UpperCAmelCase ,unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase :Tuple = (
(
MobileBertModel,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
)
if is_torch_available()
else ()
)
_UpperCAmelCase :Optional[Any] = (
{
"feature-extraction": MobileBertModel,
"fill-mask": MobileBertForMaskedLM,
"question-answering": MobileBertForQuestionAnswering,
"text-classification": MobileBertForSequenceClassification,
"token-classification": MobileBertForTokenClassification,
"zero-shot": MobileBertForSequenceClassification,
}
if is_torch_available()
else {}
)
_UpperCAmelCase :Optional[Any] = True
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=False ):
lowercase__: int = super()._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase , return_labels=_UpperCAmelCase )
if return_labels:
if model_class in get_values(_UpperCAmelCase ):
lowercase__: Any = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=_UpperCAmelCase )
lowercase__: Dict = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_UpperCAmelCase )
return inputs_dict
def _snake_case ( self ):
lowercase__: int = MobileBertModelTester(self )
lowercase__: Dict = ConfigTester(self , config_class=_UpperCAmelCase , hidden_size=37 )
def _snake_case ( self ):
self.config_tester.run_common_tests()
def _snake_case ( self ):
lowercase__: str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*_UpperCAmelCase )
def _snake_case ( self ):
lowercase__: int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*_UpperCAmelCase )
def _snake_case ( self ):
lowercase__: List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*_UpperCAmelCase )
def _snake_case ( self ):
lowercase__: str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*_UpperCAmelCase )
def _snake_case ( self ):
lowercase__: List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*_UpperCAmelCase )
def _snake_case ( self ):
lowercase__: Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*_UpperCAmelCase )
def _snake_case ( self ):
lowercase__: Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*_UpperCAmelCase )
def _snake_case ( self ):
lowercase__: Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> List[Any]:
return torch.tensor(
__UpperCAmelCase , dtype=torch.long , device=__UpperCAmelCase , )
__A = 1E-3
@require_torch
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase (unittest.TestCase ):
"""simple docstring"""
@slow
def _snake_case ( self ):
lowercase__: Tuple = MobileBertModel.from_pretrained('''google/mobilebert-uncased''' ).to(_UpperCAmelCase )
lowercase__: Tuple = _long_tensor([[101, 7110, 1005, 1056, 2023, 11333, 17413, 1029, 102]] )
with torch.no_grad():
lowercase__: Tuple = model(_UpperCAmelCase )[0]
lowercase__: Dict = torch.Size((1, 9, 512) )
self.assertEqual(output.shape , _UpperCAmelCase )
lowercase__: List[Any] = torch.tensor(
[
[
[-2.4_7_3_6_5_2_6e0_7, 8.2_6_9_1_6_5_6e0_4, 1.6_5_2_1_8_3_8e0_5],
[-5.7_5_4_1_7_0_4e-0_1, 3.9_0_5_6_0_2_2e0_0, 4.4_0_1_1_5_0_7e0_0],
[2.6_0_4_7_3_5_9e0_0, 1.5_6_7_7_6_5_2e0_0, -1.7_3_2_4_1_8_8e-0_1],
]
] , device=_UpperCAmelCase , )
# MobileBERT results range from 10e0 to 10e8. Even a 0.0000001% difference with a value of 10e8 results in a
# ~1 difference, it's therefore not a good idea to measure using addition.
# Here, we instead divide the expected result with the result in order to obtain ~1. We then check that the
# result is held between bounds: 1 - TOLERANCE < expected_result / result < 1 + TOLERANCE
lowercase__: int = torch.all((expected_slice / output[..., :3, :3]) >= 1 - TOLERANCE )
lowercase__: Optional[int] = torch.all((expected_slice / output[..., :3, :3]) <= 1 + TOLERANCE )
self.assertTrue(lower_bound and upper_bound )
| 2 | 0 |
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
import tensorflow as tf
from .utils import logging
__A = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> List[int]:
if isinstance(UpperCAmelCase__ , np.ndarray ):
return list(tensor.shape )
lowercase__: Tuple = tf.shape(UpperCAmelCase__ )
if tensor.shape == tf.TensorShape(UpperCAmelCase__ ):
return dynamic
lowercase__: Dict = tensor.shape.as_list()
return [dynamic[i] if s is None else s for i, s in enumerate(UpperCAmelCase__ )]
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None ) -> tf.Tensor:
return tf.nn.softmax(logits=logits + 1e-9 , axis=UpperCAmelCase__ , name=UpperCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=1e-5 , __UpperCAmelCase=-1 ) -> List[str]:
# This is a very simplified functional layernorm, designed to duplicate
# the functionality of PyTorch nn.functional.layer_norm when this is needed to port
# models in Transformers.
if weight.shape.rank != 1 or bias.shape.rank != 1 or not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
raise NotImplementedError('''Only 1D weight and bias tensors are supported for now, with only a single axis.''' )
# Get mean and variance on the axis to be normalized
lowercase__: List[str] = tf.nn.moments(UpperCAmelCase__ , axes=[axis] , keepdims=UpperCAmelCase__ )
if axis != -1:
# Reshape scale and weight to have the same rank as inputs, but with 1 dimensions
# on every dimension except axis
lowercase__: List[Any] = [1] * inputs.shape.rank
lowercase__: List[str] = shape_list(UpperCAmelCase__ )[axis]
lowercase__: List[str] = tf.reshape(UpperCAmelCase__ , UpperCAmelCase__ )
lowercase__: List[Any] = tf.reshape(UpperCAmelCase__ , UpperCAmelCase__ )
# Compute layer normalization using the batch_normalization
# function.
lowercase__: str = tf.nn.batch_normalization(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , offset=UpperCAmelCase__ , scale=UpperCAmelCase__ , variance_epsilon=UpperCAmelCase__ , )
return outputs
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase=0 , __UpperCAmelCase=-1 ) -> Dict:
# Replicates the behavior of torch.flatten in TF
# If end_dim or start_dim is negative, count them from the end
if end_dim < 0:
end_dim += input.shape.rank
if start_dim < 0:
start_dim += input.shape.rank
if start_dim == end_dim:
return input
lowercase__: List[Any] = tf.shape(UpperCAmelCase__ )
lowercase__: Union[str, Any] = tf.math.reduce_prod(in_shape[start_dim : end_dim + 1] )
lowercase__: Dict = tf.concat([in_shape[:start_dim], [flattened_dim], in_shape[end_dim + 1 :]] , axis=0 )
return tf.reshape(UpperCAmelCase__ , UpperCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> tf.Tensor:
if not isinstance(UpperCAmelCase__ , tf.Tensor ):
lowercase__: List[Any] = tf.convert_to_tensor(UpperCAmelCase__ ) # Catches stray NumPy inputs
if encoder_attention_mask.shape.rank == 3:
lowercase__: Any = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.shape.rank == 2:
lowercase__: List[Any] = encoder_attention_mask[:, None, None, :]
# T5 has a mask that can compare sequence ids, we can simulate this here with this transposition
# Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow
# /transformer/transformer_layers.py#L270
# encoder_extended_attention_mask = (encoder_extended_attention_mask ==
# encoder_extended_attention_mask.transpose(-1, -2))
lowercase__: Optional[Any] = (
tf.cast(1 , encoder_attention_mask.dtype ) - encoder_extended_attention_mask
) * encoder_extended_attention_mask.dtype.min
return encoder_extended_attention_mask
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = "input_ids" ) -> None:
tf.debugging.assert_less(
UpperCAmelCase__ , tf.cast(UpperCAmelCase__ , dtype=tensor.dtype ) , message=(
F"""The maximum value of {tensor_name} ({tf.math.reduce_max(UpperCAmelCase__ )}) must be smaller than the embedding """
F"""layer\'s input dimension ({embed_dim}). The likely cause is some problem at tokenization time."""
) , )
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Any:
lowercase__: int = 6_4_5_1_2
# Check that no item in `data` is larger than `HDF5_OBJECT_HEADER_LIMIT`
# because in that case even chunking the array would not make the saving
# possible.
lowercase__: Optional[Any] = [x for x in data if len(UpperCAmelCase__ ) > HDF5_OBJECT_HEADER_LIMIT]
# Expecting this to never be true.
if bad_attributes:
raise RuntimeError(
'''The following attributes cannot be saved to HDF5 file because '''
F"""they are larger than {HDF5_OBJECT_HEADER_LIMIT} """
F"""bytes: {bad_attributes}""" )
lowercase__: Any = np.asarray(UpperCAmelCase__ )
lowercase__: Union[str, Any] = 1
lowercase__: Optional[Any] = np.array_split(UpperCAmelCase__ , UpperCAmelCase__ )
# This will never loop forever thanks to the test above.
while any(x.nbytes > HDF5_OBJECT_HEADER_LIMIT for x in chunked_data ):
num_chunks += 1
lowercase__: Optional[Any] = np.array_split(UpperCAmelCase__ , UpperCAmelCase__ )
if num_chunks > 1:
for chunk_id, chunk_data in enumerate(UpperCAmelCase__ ):
lowercase__: Union[str, Any] = chunk_data
else:
lowercase__: Any = data
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase ) -> str:
if name in group.attrs:
lowercase__: Optional[Any] = [n.decode('''utf8''' ) if hasattr(UpperCAmelCase__ , '''decode''' ) else n for n in group.attrs[name]]
else:
lowercase__: int = []
lowercase__: Optional[int] = 0
while "%s%d" % (name, chunk_id) in group.attrs:
data.extend(
[n.decode('''utf8''' ) if hasattr(UpperCAmelCase__ , '''decode''' ) else n for n in group.attrs['''%s%d''' % (name, chunk_id)]] )
chunk_id += 1
return data
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> Any:
def _expand_single_ad_tensor(__UpperCAmelCase ):
if isinstance(UpperCAmelCase__ , tf.Tensor ) and t.shape.rank == 1:
return tf.expand_dims(UpperCAmelCase__ , axis=-1 )
return t
return tf.nest.map_structure(_expand_single_ad_tensor , UpperCAmelCase__ )
| 370 | """simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
"microsoft/unispeech-sat-base-100h-libri-ft": (
"https://huggingface.co/microsoft/unispeech-sat-base-100h-libri-ft/resolve/main/config.json"
),
# See all UniSpeechSat models at https://huggingface.co/models?filter=unispeech_sat
}
class UpperCAmelCase (_UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase :Any = "unispeech-sat"
def __init__( self , _UpperCAmelCase=32 , _UpperCAmelCase=768 , _UpperCAmelCase=12 , _UpperCAmelCase=12 , _UpperCAmelCase=3072 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.02 , _UpperCAmelCase=1e-5 , _UpperCAmelCase="group" , _UpperCAmelCase="gelu" , _UpperCAmelCase=(512, 512, 512, 512, 512, 512, 512) , _UpperCAmelCase=(5, 2, 2, 2, 2, 2, 2) , _UpperCAmelCase=(10, 3, 3, 3, 3, 2, 2) , _UpperCAmelCase=False , _UpperCAmelCase=128 , _UpperCAmelCase=16 , _UpperCAmelCase=False , _UpperCAmelCase=True , _UpperCAmelCase=0.05 , _UpperCAmelCase=10 , _UpperCAmelCase=2 , _UpperCAmelCase=0.0 , _UpperCAmelCase=10 , _UpperCAmelCase=0 , _UpperCAmelCase=320 , _UpperCAmelCase=2 , _UpperCAmelCase=0.1 , _UpperCAmelCase=100 , _UpperCAmelCase=256 , _UpperCAmelCase=256 , _UpperCAmelCase=0.1 , _UpperCAmelCase="mean" , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=256 , _UpperCAmelCase=(512, 512, 512, 512, 1500) , _UpperCAmelCase=(5, 3, 3, 1, 1) , _UpperCAmelCase=(1, 2, 3, 1, 1) , _UpperCAmelCase=512 , _UpperCAmelCase=0 , _UpperCAmelCase=1 , _UpperCAmelCase=2 , _UpperCAmelCase=504 , **_UpperCAmelCase , ):
super().__init__(**_UpperCAmelCase , pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase )
lowercase__: Union[str, Any] = hidden_size
lowercase__: Union[str, Any] = feat_extract_norm
lowercase__: Any = feat_extract_activation
lowercase__: List[Any] = list(_UpperCAmelCase )
lowercase__: Optional[int] = list(_UpperCAmelCase )
lowercase__: int = list(_UpperCAmelCase )
lowercase__: Any = conv_bias
lowercase__: List[str] = num_conv_pos_embeddings
lowercase__: List[str] = num_conv_pos_embedding_groups
lowercase__: int = len(self.conv_dim )
lowercase__: Dict = num_hidden_layers
lowercase__: List[Any] = intermediate_size
lowercase__: Dict = hidden_act
lowercase__: Optional[Any] = num_attention_heads
lowercase__: Union[str, Any] = hidden_dropout
lowercase__: List[Any] = attention_dropout
lowercase__: str = activation_dropout
lowercase__: Optional[Any] = feat_proj_dropout
lowercase__: Optional[int] = final_dropout
lowercase__: Any = layerdrop
lowercase__: int = layer_norm_eps
lowercase__: Any = initializer_range
lowercase__: Union[str, Any] = vocab_size
lowercase__: Optional[Any] = num_clusters
lowercase__: Dict = do_stable_layer_norm
lowercase__: List[str] = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
F""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"""
F""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowercase__: Dict = apply_spec_augment
lowercase__: Union[str, Any] = mask_time_prob
lowercase__: List[str] = mask_time_length
lowercase__: Union[str, Any] = mask_time_min_masks
lowercase__: str = mask_feature_prob
lowercase__: Dict = mask_feature_length
lowercase__: List[Any] = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
lowercase__: Tuple = num_codevectors_per_group
lowercase__: Optional[Any] = num_codevector_groups
lowercase__: int = contrastive_logits_temperature
lowercase__: Any = feat_quantizer_dropout
lowercase__: int = num_negatives
lowercase__: Optional[Any] = codevector_dim
lowercase__: int = proj_codevector_dim
lowercase__: str = diversity_loss_weight
# ctc loss
lowercase__: int = ctc_loss_reduction
lowercase__: Union[str, Any] = ctc_zero_infinity
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
lowercase__: Optional[Any] = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
lowercase__: Union[str, Any] = list(_UpperCAmelCase )
lowercase__: Tuple = list(_UpperCAmelCase )
lowercase__: Union[str, Any] = list(_UpperCAmelCase )
lowercase__: Tuple = xvector_output_dim
@property
def _snake_case ( self ):
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 2 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__A = {
"configuration_blenderbot": [
"BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"BlenderbotConfig",
"BlenderbotOnnxConfig",
],
"tokenization_blenderbot": ["BlenderbotTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ["BlenderbotTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST",
"BlenderbotForCausalLM",
"BlenderbotForConditionalGeneration",
"BlenderbotModel",
"BlenderbotPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"TFBlenderbotForConditionalGeneration",
"TFBlenderbotModel",
"TFBlenderbotPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"FlaxBlenderbotForConditionalGeneration",
"FlaxBlenderbotModel",
"FlaxBlenderbotPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_blenderbot import (
BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotConfig,
BlenderbotOnnxConfig,
)
from .tokenization_blenderbot import BlenderbotTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_fast import BlenderbotTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot import (
BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotForCausalLM,
BlenderbotForConditionalGeneration,
BlenderbotModel,
BlenderbotPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot import (
TFBlenderbotForConditionalGeneration,
TFBlenderbotModel,
TFBlenderbotPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
FlaxBlenderbotPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 371 | """simple docstring"""
import argparse
import torch
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_from_original_stable_diffusion_ckpt
if __name__ == "__main__":
__A = argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_path", default=None, type=str, required=True, help="Path to the checkpoint to convert."
)
# !wget https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml
parser.add_argument(
"--original_config_file",
default=None,
type=str,
help="The YAML config file corresponding to the original architecture.",
)
parser.add_argument(
"--num_in_channels",
default=None,
type=int,
help="The number of input channels. If `None` number of input channels will be automatically inferred.",
)
parser.add_argument(
"--scheduler_type",
default="pndm",
type=str,
help="Type of scheduler to use. Should be one of ['pndm', 'lms', 'ddim', 'euler', 'euler-ancestral', 'dpm']",
)
parser.add_argument(
"--pipeline_type",
default=None,
type=str,
help=(
"The pipeline type. One of 'FrozenOpenCLIPEmbedder', 'FrozenCLIPEmbedder', 'PaintByExample'"
". If `None` pipeline will be automatically inferred."
),
)
parser.add_argument(
"--image_size",
default=None,
type=int,
help=(
"The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2"
" Base. Use 768 for Stable Diffusion v2."
),
)
parser.add_argument(
"--prediction_type",
default=None,
type=str,
help=(
"The prediction type that the model was trained on. Use 'epsilon' for Stable Diffusion v1.X and Stable"
" Diffusion v2 Base. Use 'v_prediction' for Stable Diffusion v2."
),
)
parser.add_argument(
"--extract_ema",
action="store_true",
help=(
"Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights"
" or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield"
" higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning."
),
)
parser.add_argument(
"--upcast_attention",
action="store_true",
help=(
"Whether the attention computation should always be upcasted. This is necessary when running stable"
" diffusion 2.1."
),
)
parser.add_argument(
"--from_safetensors",
action="store_true",
help="If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.",
)
parser.add_argument(
"--to_safetensors",
action="store_true",
help="Whether to store pipeline in safetensors format or not.",
)
parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.")
parser.add_argument("--device", type=str, help="Device to use (e.g. cpu, cuda:0, cuda:1, etc.)")
parser.add_argument(
"--stable_unclip",
type=str,
default=None,
required=False,
help="Set if this is a stable unCLIP model. One of 'txt2img' or 'img2img'.",
)
parser.add_argument(
"--stable_unclip_prior",
type=str,
default=None,
required=False,
help="Set if this is a stable unCLIP txt2img model. Selects which prior to use. If `--stable_unclip` is set to `txt2img`, the karlo prior (https://huggingface.co/kakaobrain/karlo-v1-alpha/tree/main/prior) is selected by default.",
)
parser.add_argument(
"--clip_stats_path",
type=str,
help="Path to the clip stats file. Only required if the stable unclip model's config specifies `model.params.noise_aug_config.params.clip_stats_path`.",
required=False,
)
parser.add_argument(
"--controlnet", action="store_true", default=None, help="Set flag if this is a controlnet checkpoint."
)
parser.add_argument("--half", action="store_true", help="Save weights in half precision.")
parser.add_argument(
"--vae_path",
type=str,
default=None,
required=False,
help="Set to a path, hub id to an already converted vae to not convert it again.",
)
__A = parser.parse_args()
__A = download_from_original_stable_diffusion_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
prediction_type=args.prediction_type,
model_type=args.pipeline_type,
extract_ema=args.extract_ema,
scheduler_type=args.scheduler_type,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
stable_unclip=args.stable_unclip,
stable_unclip_prior=args.stable_unclip_prior,
clip_stats_path=args.clip_stats_path,
controlnet=args.controlnet,
vae_path=args.vae_path,
)
if args.half:
pipe.to(torch_dtype=torch.floataa)
if args.controlnet:
# only save the controlnet model
pipe.controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
else:
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 2 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
"unc-nlp/lxmert-base-uncased": "https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/config.json",
}
class UpperCAmelCase (lowercase__ ):
"""simple docstring"""
_UpperCAmelCase :Dict = """lxmert"""
_UpperCAmelCase :Tuple = {}
def __init__( self , _UpperCAmelCase=30522 , _UpperCAmelCase=768 , _UpperCAmelCase=12 , _UpperCAmelCase=9500 , _UpperCAmelCase=1600 , _UpperCAmelCase=400 , _UpperCAmelCase=3072 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=512 , _UpperCAmelCase=2 , _UpperCAmelCase=0.02 , _UpperCAmelCase=1e-1_2 , _UpperCAmelCase=9 , _UpperCAmelCase=5 , _UpperCAmelCase=5 , _UpperCAmelCase=2048 , _UpperCAmelCase=4 , _UpperCAmelCase=6.67 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , **_UpperCAmelCase , ):
lowercase__: Tuple = vocab_size
lowercase__: List[Any] = hidden_size
lowercase__: Tuple = num_attention_heads
lowercase__: Optional[int] = hidden_act
lowercase__: List[str] = intermediate_size
lowercase__: Optional[Any] = hidden_dropout_prob
lowercase__: List[str] = attention_probs_dropout_prob
lowercase__: List[str] = max_position_embeddings
lowercase__: Union[str, Any] = type_vocab_size
lowercase__: Tuple = initializer_range
lowercase__: Optional[Any] = layer_norm_eps
lowercase__: Tuple = num_qa_labels
lowercase__: Optional[Any] = num_object_labels
lowercase__: List[Any] = num_attr_labels
lowercase__: Optional[Any] = l_layers
lowercase__: Tuple = x_layers
lowercase__: Tuple = r_layers
lowercase__: Union[str, Any] = visual_feat_dim
lowercase__: Optional[Any] = visual_pos_dim
lowercase__: Union[str, Any] = visual_loss_normalizer
lowercase__: Optional[Any] = task_matched
lowercase__: Tuple = task_mask_lm
lowercase__: int = task_obj_predict
lowercase__: str = task_qa
lowercase__: str = visual_obj_loss
lowercase__: Union[str, Any] = visual_attr_loss
lowercase__: Any = visual_feat_loss
lowercase__: Dict = {"vision": r_layers, "cross_encoder": x_layers, "language": l_layers}
super().__init__(**lowercase_ )
| 350 | """simple docstring"""
import warnings
from diffusers import StableDiffusionImgaImgPipeline # noqa F401
warnings.warn(
"The `image_to_image.py` script is outdated. Please use directly `from diffusers import"
" StableDiffusionImg2ImgPipeline` instead."
)
| 2 | 0 |
"""simple docstring"""
import unittest
from transformers import DebertaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
)
from transformers.models.deberta.modeling_deberta import DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCAmelCase (__snake_case ):
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=7 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=99 , _UpperCAmelCase=32 , _UpperCAmelCase=5 , _UpperCAmelCase=4 , _UpperCAmelCase=37 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=512 , _UpperCAmelCase=16 , _UpperCAmelCase=2 , _UpperCAmelCase=0.02 , _UpperCAmelCase=False , _UpperCAmelCase=True , _UpperCAmelCase="None" , _UpperCAmelCase=3 , _UpperCAmelCase=4 , _UpperCAmelCase=None , ):
lowercase__: Dict = parent
lowercase__: Dict = batch_size
lowercase__: Optional[int] = seq_length
lowercase__: Union[str, Any] = is_training
lowercase__: int = use_input_mask
lowercase__: int = use_token_type_ids
lowercase__: Tuple = use_labels
lowercase__: Any = vocab_size
lowercase__: Union[str, Any] = hidden_size
lowercase__: Optional[Any] = num_hidden_layers
lowercase__: Tuple = num_attention_heads
lowercase__: Optional[int] = intermediate_size
lowercase__: List[Any] = hidden_act
lowercase__: Tuple = hidden_dropout_prob
lowercase__: Any = attention_probs_dropout_prob
lowercase__: Any = max_position_embeddings
lowercase__: Union[str, Any] = type_vocab_size
lowercase__: Optional[int] = type_sequence_label_size
lowercase__: int = initializer_range
lowercase__: Dict = num_labels
lowercase__: List[str] = num_choices
lowercase__: int = relative_attention
lowercase__: List[Any] = position_biased_input
lowercase__: Dict = pos_att_type
lowercase__: Optional[Any] = scope
def _snake_case ( self ):
lowercase__: str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase__: str = None
if self.use_input_mask:
lowercase__: Any = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
lowercase__: Dict = None
if self.use_token_type_ids:
lowercase__: Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase__: Any = None
lowercase__: int = None
lowercase__: Tuple = None
if self.use_labels:
lowercase__: int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase__: List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase__: Tuple = ids_tensor([self.batch_size] , self.num_choices )
lowercase__: List[Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _snake_case ( self ):
return DebertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def _snake_case ( self ):
lowercase__: Tuple = self.get_config()
lowercase__: str = 300
return config
def _snake_case ( self , _UpperCAmelCase ):
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
lowercase__: List[str] = DebertaModel(config=a_ )
model.to(a_ )
model.eval()
lowercase__: Optional[Any] = model(a_ , attention_mask=a_ , token_type_ids=a_ )[0]
lowercase__: str = model(a_ , token_type_ids=a_ )[0]
lowercase__: Dict = model(a_ )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
lowercase__: Optional[int] = DebertaForMaskedLM(config=a_ )
model.to(a_ )
model.eval()
lowercase__: List[Any] = model(a_ , attention_mask=a_ , token_type_ids=a_ , labels=a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
lowercase__: Tuple = self.num_labels
lowercase__: Union[str, Any] = DebertaForSequenceClassification(a_ )
model.to(a_ )
model.eval()
lowercase__: Optional[int] = model(a_ , attention_mask=a_ , token_type_ids=a_ , labels=a_ )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(a_ )
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
lowercase__: int = self.num_labels
lowercase__: str = DebertaForTokenClassification(config=a_ )
model.to(a_ )
model.eval()
lowercase__: Union[str, Any] = model(a_ , attention_mask=a_ , token_type_ids=a_ , labels=a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
lowercase__: List[str] = DebertaForQuestionAnswering(config=a_ )
model.to(a_ )
model.eval()
lowercase__: Any = model(
a_ , attention_mask=a_ , token_type_ids=a_ , start_positions=a_ , end_positions=a_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _snake_case ( self ):
lowercase__: int = self.prepare_config_and_inputs()
(
lowercase__
): Tuple = config_and_inputs
lowercase__: Tuple = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase (__snake_case ,__snake_case ,unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase :Union[str, Any] = (
(
DebertaModel,
DebertaForMaskedLM,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaForQuestionAnswering,
)
if is_torch_available()
else ()
)
_UpperCAmelCase :List[Any] = (
{
"feature-extraction": DebertaModel,
"fill-mask": DebertaForMaskedLM,
"question-answering": DebertaForQuestionAnswering,
"text-classification": DebertaForSequenceClassification,
"token-classification": DebertaForTokenClassification,
"zero-shot": DebertaForSequenceClassification,
}
if is_torch_available()
else {}
)
_UpperCAmelCase :Optional[int] = True
_UpperCAmelCase :Union[str, Any] = False
_UpperCAmelCase :Tuple = False
_UpperCAmelCase :Union[str, Any] = False
_UpperCAmelCase :Dict = False
def _snake_case ( self ):
lowercase__: List[str] = DebertaModelTester(self )
lowercase__: List[str] = ConfigTester(self , config_class=a_ , hidden_size=37 )
def _snake_case ( self ):
self.config_tester.run_common_tests()
def _snake_case ( self ):
lowercase__: List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*a_ )
def _snake_case ( self ):
lowercase__: str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*a_ )
def _snake_case ( self ):
lowercase__: List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*a_ )
def _snake_case ( self ):
lowercase__: Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*a_ )
def _snake_case ( self ):
lowercase__: List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*a_ )
@slow
def _snake_case ( self ):
for model_name in DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__: Tuple = DebertaModel.from_pretrained(a_ )
self.assertIsNotNone(a_ )
@require_torch
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase (unittest.TestCase ):
"""simple docstring"""
@unittest.skip(reason='''Model not available yet''' )
def _snake_case ( self ):
pass
@slow
def _snake_case ( self ):
lowercase__: Any = DebertaModel.from_pretrained('''microsoft/deberta-base''' )
lowercase__: str = torch.tensor([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]] )
lowercase__: Tuple = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
lowercase__: Optional[int] = model(a_ , attention_mask=a_ )[0]
# compare the actual values for a slice.
lowercase__: int = torch.tensor(
[[[-0.5_986, -0.8_055, -0.8_462], [1.4_484, -0.9_348, -0.8_059], [0.3_123, 0.0_032, -1.4_131]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , a_ , atol=1e-4 ) , F"""{output[:, 1:4, 1:4]}""" )
| 351 | """simple docstring"""
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
"Salesforce/codegen-350M-nl": "https://huggingface.co/Salesforce/codegen-350M-nl/resolve/main/config.json",
"Salesforce/codegen-350M-multi": "https://huggingface.co/Salesforce/codegen-350M-multi/resolve/main/config.json",
"Salesforce/codegen-350M-mono": "https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/config.json",
"Salesforce/codegen-2B-nl": "https://huggingface.co/Salesforce/codegen-2B-nl/resolve/main/config.json",
"Salesforce/codegen-2B-multi": "https://huggingface.co/Salesforce/codegen-2B-multi/resolve/main/config.json",
"Salesforce/codegen-2B-mono": "https://huggingface.co/Salesforce/codegen-2B-mono/resolve/main/config.json",
"Salesforce/codegen-6B-nl": "https://huggingface.co/Salesforce/codegen-6B-nl/resolve/main/config.json",
"Salesforce/codegen-6B-multi": "https://huggingface.co/Salesforce/codegen-6B-multi/resolve/main/config.json",
"Salesforce/codegen-6B-mono": "https://huggingface.co/Salesforce/codegen-6B-mono/resolve/main/config.json",
"Salesforce/codegen-16B-nl": "https://huggingface.co/Salesforce/codegen-16B-nl/resolve/main/config.json",
"Salesforce/codegen-16B-multi": "https://huggingface.co/Salesforce/codegen-16B-multi/resolve/main/config.json",
"Salesforce/codegen-16B-mono": "https://huggingface.co/Salesforce/codegen-16B-mono/resolve/main/config.json",
}
class UpperCAmelCase (_UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase :List[str] = "codegen"
_UpperCAmelCase :Optional[int] = {
"max_position_embeddings": "n_positions",
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self , _UpperCAmelCase=50400 , _UpperCAmelCase=2048 , _UpperCAmelCase=2048 , _UpperCAmelCase=4096 , _UpperCAmelCase=28 , _UpperCAmelCase=16 , _UpperCAmelCase=64 , _UpperCAmelCase=None , _UpperCAmelCase="gelu_new" , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.0 , _UpperCAmelCase=1e-5 , _UpperCAmelCase=0.02 , _UpperCAmelCase=True , _UpperCAmelCase=50256 , _UpperCAmelCase=50256 , _UpperCAmelCase=False , **_UpperCAmelCase , ):
lowercase__: int = vocab_size
lowercase__: str = n_ctx
lowercase__: List[Any] = n_positions
lowercase__: Union[str, Any] = n_embd
lowercase__: Optional[Any] = n_layer
lowercase__: str = n_head
lowercase__: List[Any] = n_inner
lowercase__: Union[str, Any] = rotary_dim
lowercase__: Optional[Any] = activation_function
lowercase__: Union[str, Any] = resid_pdrop
lowercase__: Optional[int] = embd_pdrop
lowercase__: Optional[Any] = attn_pdrop
lowercase__: Optional[int] = layer_norm_epsilon
lowercase__: List[Any] = initializer_range
lowercase__: Tuple = use_cache
lowercase__: Any = bos_token_id
lowercase__: Any = eos_token_id
super().__init__(
bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , tie_word_embeddings=_UpperCAmelCase , **_UpperCAmelCase )
class UpperCAmelCase (_UpperCAmelCase ):
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase = "default" , _UpperCAmelCase = None , _UpperCAmelCase = False , ):
super().__init__(_UpperCAmelCase , task=_UpperCAmelCase , patching_specs=_UpperCAmelCase , use_past=_UpperCAmelCase )
if not getattr(self._config , '''pad_token_id''' , _UpperCAmelCase ):
# TODO: how to do that better?
lowercase__: Any = 0
@property
def _snake_case ( self ):
lowercase__: int = OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}} )
if self.use_past:
self.fill_with_past_key_values_(_UpperCAmelCase , direction='''inputs''' )
lowercase__: int = {0: '''batch''', 1: '''past_sequence + sequence'''}
else:
lowercase__: Tuple = {0: '''batch''', 1: '''sequence'''}
return common_inputs
@property
def _snake_case ( self ):
return self._config.n_layer
@property
def _snake_case ( self ):
return self._config.n_head
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase = -1 , _UpperCAmelCase = -1 , _UpperCAmelCase = False , _UpperCAmelCase = None , ):
lowercase__: Optional[int] = super(_UpperCAmelCase , self ).generate_dummy_inputs(
_UpperCAmelCase , batch_size=_UpperCAmelCase , seq_length=_UpperCAmelCase , is_pair=_UpperCAmelCase , framework=_UpperCAmelCase )
# We need to order the input in the way they appears in the forward()
lowercase__: List[Any] = OrderedDict({'''input_ids''': common_inputs['''input_ids''']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
lowercase__, lowercase__: Union[str, Any] = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
lowercase__: Any = seqlen + 2
lowercase__: List[str] = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
lowercase__: Optional[Any] = [
(torch.zeros(_UpperCAmelCase ), torch.zeros(_UpperCAmelCase )) for _ in range(self.num_layers )
]
lowercase__: Optional[Any] = common_inputs['''attention_mask''']
if self.use_past:
lowercase__: List[str] = ordered_inputs['''attention_mask'''].dtype
lowercase__: List[Any] = torch.cat(
[ordered_inputs['''attention_mask'''], torch.ones(_UpperCAmelCase , _UpperCAmelCase , dtype=_UpperCAmelCase )] , dim=1 )
return ordered_inputs
@property
def _snake_case ( self ):
return 13
| 2 | 0 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor, ChineseCLIPProcessor
@require_vision
class UpperCAmelCase (unittest.TestCase ):
"""simple docstring"""
def _snake_case ( self ):
lowercase__: List[str] = tempfile.mkdtemp()
lowercase__: Optional[Any] = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'的',
'价',
'格',
'是',
'15',
'便',
'alex',
'##andra',
',',
'。',
'-',
't',
'shirt',
]
lowercase__: Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
lowercase__: List[str] = {
'do_resize': True,
'size': {'height': 224, 'width': 224},
'do_center_crop': True,
'crop_size': {'height': 18, 'width': 18},
'do_normalize': True,
'image_mean': [0.48_145_466, 0.4_578_275, 0.40_821_073],
'image_std': [0.26_862_954, 0.26_130_258, 0.27_577_711],
'do_convert_rgb': True,
}
lowercase__: Dict = os.path.join(self.tmpdirname , snake_case__ )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(snake_case__ , snake_case__ )
def _snake_case ( self , **_UpperCAmelCase ):
return BertTokenizer.from_pretrained(self.tmpdirname , **snake_case__ )
def _snake_case ( self , **_UpperCAmelCase ):
return BertTokenizerFast.from_pretrained(self.tmpdirname , **snake_case__ )
def _snake_case ( self , **_UpperCAmelCase ):
return ChineseCLIPImageProcessor.from_pretrained(self.tmpdirname , **snake_case__ )
def _snake_case ( self ):
shutil.rmtree(self.tmpdirname )
def _snake_case ( self ):
lowercase__: Dict = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
lowercase__: Dict = [Image.fromarray(np.moveaxis(snake_case__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _snake_case ( self ):
lowercase__: Tuple = self.get_tokenizer()
lowercase__: Any = self.get_rust_tokenizer()
lowercase__: Optional[int] = self.get_image_processor()
lowercase__: str = ChineseCLIPProcessor(tokenizer=snake_case__ , image_processor=snake_case__ )
processor_slow.save_pretrained(self.tmpdirname )
lowercase__: Tuple = ChineseCLIPProcessor.from_pretrained(self.tmpdirname , use_fast=snake_case__ )
lowercase__: List[Any] = ChineseCLIPProcessor(tokenizer=snake_case__ , image_processor=snake_case__ )
processor_fast.save_pretrained(self.tmpdirname )
lowercase__: Tuple = ChineseCLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , snake_case__ )
self.assertIsInstance(processor_fast.tokenizer , snake_case__ )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , snake_case__ )
self.assertIsInstance(processor_fast.image_processor , snake_case__ )
def _snake_case ( self ):
lowercase__: Any = ChineseCLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowercase__: Dict = self.get_tokenizer(cls_token='''(CLS)''' , sep_token='''(SEP)''' )
lowercase__: int = self.get_image_processor(do_normalize=snake_case__ )
lowercase__: Union[str, Any] = ChineseCLIPProcessor.from_pretrained(
self.tmpdirname , cls_token='''(CLS)''' , sep_token='''(SEP)''' , do_normalize=snake_case__ )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , snake_case__ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , snake_case__ )
def _snake_case ( self ):
lowercase__: Any = self.get_image_processor()
lowercase__: Optional[Any] = self.get_tokenizer()
lowercase__: List[Any] = ChineseCLIPProcessor(tokenizer=snake_case__ , image_processor=snake_case__ )
lowercase__: str = self.prepare_image_inputs()
lowercase__: Dict = image_processor(snake_case__ , return_tensors='''np''' )
lowercase__: Tuple = processor(images=snake_case__ , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def _snake_case ( self ):
lowercase__: Optional[Any] = self.get_image_processor()
lowercase__: List[Any] = self.get_tokenizer()
lowercase__: Any = ChineseCLIPProcessor(tokenizer=snake_case__ , image_processor=snake_case__ )
lowercase__: List[Any] = 'Alexandra,T-shirt的价格是15便士。'
lowercase__: List[Any] = processor(text=snake_case__ )
lowercase__: List[Any] = tokenizer(snake_case__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _snake_case ( self ):
lowercase__: str = self.get_image_processor()
lowercase__: Tuple = self.get_tokenizer()
lowercase__: Optional[Any] = ChineseCLIPProcessor(tokenizer=snake_case__ , image_processor=snake_case__ )
lowercase__: int = 'Alexandra,T-shirt的价格是15便士。'
lowercase__: List[str] = self.prepare_image_inputs()
lowercase__: Union[str, Any] = processor(text=snake_case__ , images=snake_case__ )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(snake_case__ ):
processor()
def _snake_case ( self ):
lowercase__: int = self.get_image_processor()
lowercase__: Optional[Any] = self.get_tokenizer()
lowercase__: int = ChineseCLIPProcessor(tokenizer=snake_case__ , image_processor=snake_case__ )
lowercase__: int = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowercase__: str = processor.batch_decode(snake_case__ )
lowercase__: Optional[Any] = tokenizer.batch_decode(snake_case__ )
self.assertListEqual(snake_case__ , snake_case__ )
def _snake_case ( self ):
lowercase__: Union[str, Any] = self.get_image_processor()
lowercase__: Optional[Any] = self.get_tokenizer()
lowercase__: Optional[int] = ChineseCLIPProcessor(tokenizer=snake_case__ , image_processor=snake_case__ )
lowercase__: str = 'Alexandra,T-shirt的价格是15便士。'
lowercase__: Tuple = self.prepare_image_inputs()
lowercase__: List[Any] = processor(text=snake_case__ , images=snake_case__ )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 352 | """simple docstring"""
from dataclasses import dataclass, field
from typing import Optional
from transformers import AutoConfig, AutoImageProcessor, AutoTokenizer, FlaxVisionEncoderDecoderModel, HfArgumentParser
@dataclass
class UpperCAmelCase :
"""simple docstring"""
_UpperCAmelCase :str = field(
metadata={"help": "The output directory where the model will be written."} ,)
_UpperCAmelCase :str = field(
metadata={
"help": (
"The encoder model checkpoint for weights initialization."
"Don't set if you want to train an encoder model from scratch."
)
} ,)
_UpperCAmelCase :str = field(
metadata={
"help": (
"The decoder model checkpoint for weights initialization."
"Don't set if you want to train a decoder model from scratch."
)
} ,)
_UpperCAmelCase :Optional[str] = field(
default=_UpperCAmelCase ,metadata={"help": "Pretrained encoder config name or path if not the same as encoder_model_name"} )
_UpperCAmelCase :Optional[str] = field(
default=_UpperCAmelCase ,metadata={"help": "Pretrained decoder config name or path if not the same as decoder_model_name"} )
def SCREAMING_SNAKE_CASE__ ( ) -> Tuple:
lowercase__: Dict = HfArgumentParser((ModelArguments,) )
((lowercase__), ): List[str] = parser.parse_args_into_dataclasses()
# Load pretrained model and tokenizer
# Use explicit specified encoder config
if model_args.encoder_config_name:
lowercase__: List[Any] = AutoConfig.from_pretrained(model_args.encoder_config_name )
# Use pretrained encoder model's config
else:
lowercase__: int = AutoConfig.from_pretrained(model_args.encoder_model_name_or_path )
# Use explicit specified decoder config
if model_args.decoder_config_name:
lowercase__: str = AutoConfig.from_pretrained(model_args.decoder_config_name )
# Use pretrained decoder model's config
else:
lowercase__: Union[str, Any] = AutoConfig.from_pretrained(model_args.decoder_model_name_or_path )
# necessary for `from_encoder_decoder_pretrained` when `decoder_config` is passed
lowercase__: Tuple = True
lowercase__: int = True
lowercase__: Any = FlaxVisionEncoderDecoderModel.from_encoder_decoder_pretrained(
encoder_pretrained_model_name_or_path=model_args.encoder_model_name_or_path , decoder_pretrained_model_name_or_path=model_args.decoder_model_name_or_path , encoder_config=__UpperCAmelCase , decoder_config=__UpperCAmelCase , )
# GPT2 only has bos/eos tokens but not decoder_start/pad tokens
lowercase__: int = decoder_config.decoder_start_token_id
lowercase__: Tuple = decoder_config.pad_token_id
if decoder_start_token_id is None:
lowercase__: Tuple = decoder_config.bos_token_id
if pad_token_id is None:
lowercase__: Optional[int] = decoder_config.eos_token_id
# This is necessary to make Flax's generate() work
lowercase__: Optional[Any] = decoder_config.eos_token_id
lowercase__: Tuple = decoder_start_token_id
lowercase__: Dict = pad_token_id
lowercase__: Optional[int] = AutoImageProcessor.from_pretrained(model_args.encoder_model_name_or_path )
lowercase__: Union[str, Any] = AutoTokenizer.from_pretrained(model_args.decoder_model_name_or_path )
lowercase__: Tuple = tokenizer.convert_ids_to_tokens(model.config.pad_token_id )
model.save_pretrained(model_args.output_dir )
image_processor.save_pretrained(model_args.output_dir )
tokenizer.save_pretrained(model_args.output_dir )
if __name__ == "__main__":
main()
| 2 | 0 |
"""simple docstring"""
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
__A = logging.get_logger(__name__)
class UpperCAmelCase (_UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase :int = ["audio_values", "audio_mask"]
def __init__( self , _UpperCAmelCase=2048 , _UpperCAmelCase=1 , _UpperCAmelCase=[16, 16] , _UpperCAmelCase=128 , _UpperCAmelCase=44100 , _UpperCAmelCase=86 , _UpperCAmelCase=2048 , _UpperCAmelCase=0.0 , **_UpperCAmelCase , ):
super().__init__(
feature_size=__UpperCAmelCase , sampling_rate=__UpperCAmelCase , padding_value=__UpperCAmelCase , **__UpperCAmelCase , )
lowercase__: List[Any] = spectrogram_length
lowercase__: int = num_channels
lowercase__: Optional[int] = patch_size
lowercase__: Tuple = feature_size // self.patch_size[1]
lowercase__: Any = n_fft
lowercase__: List[Any] = sampling_rate // hop_length_to_sampling_rate
lowercase__: Tuple = sampling_rate
lowercase__: Union[str, Any] = padding_value
lowercase__: List[str] = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=__UpperCAmelCase , min_frequency=0.0 , max_frequency=22_050.0 , sampling_rate=__UpperCAmelCase , norm='''slaney''' , mel_scale='''slaney''' , ).T
def _snake_case ( self , _UpperCAmelCase ):
lowercase__: Tuple = spectrogram(
__UpperCAmelCase , window_function(self.n_fft , '''hann''' ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel='''dB''' , db_range=80.0 , )
lowercase__: Optional[int] = log_spec[:, :-1]
lowercase__: str = log_spec - 20.0
lowercase__: Any = np.clip(log_spec / 40.0 , -2.0 , 0.0 ) + 1.0
return log_spec
def __call__( self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = True , _UpperCAmelCase = None , _UpperCAmelCase = False , _UpperCAmelCase = False , **_UpperCAmelCase , ):
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
'''This feature extractor is set to support sampling rate'''
F""" of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled"""
F""" with {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
lowercase__: Dict = isinstance(__UpperCAmelCase , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F"""Only mono-channel audio is supported for input to {self}""" )
lowercase__: Any = is_batched_numpy or (
isinstance(__UpperCAmelCase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
lowercase__: Optional[Any] = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(__UpperCAmelCase , np.ndarray ):
lowercase__: int = np.asarray(__UpperCAmelCase , dtype=np.floataa )
elif isinstance(__UpperCAmelCase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
lowercase__: Any = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
lowercase__: Tuple = [np.asarray([raw_speech] ).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
lowercase__: str = [
self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0] , __UpperCAmelCase ):
lowercase__: Optional[Any] = [np.asarray(__UpperCAmelCase , dtype=np.floataa ) for feature in audio_features]
# Create audio attention mask
lowercase__: int = max(
[ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch
if return_attention_mask:
lowercase__: Optional[Any] = [
(ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0]
for feature in audio_features
]
lowercase__: int = np.array(__UpperCAmelCase ).astype(np.floataa )
# convert into correct format for padding
lowercase__: Any = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
lowercase__: List[str] = np.ones([len(__UpperCAmelCase ), 1, max_time_len, self.feature_size] ).astype(np.floataa )
lowercase__: Dict = padded_audio_features * self.padding_value
for i in range(len(__UpperCAmelCase ) ):
lowercase__: Optional[Any] = audio_features[i]
lowercase__: List[str] = feature
# return as BatchFeature
if return_attention_mask:
lowercase__: List[str] = {'''audio_values''': padded_audio_features, '''audio_mask''': audio_mask}
else:
lowercase__: Union[str, Any] = {'''audio_values''': padded_audio_features}
lowercase__: Tuple = BatchFeature(data=__UpperCAmelCase , tensor_type=__UpperCAmelCase )
return encoded_inputs
| 353 | """simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {"ctrl": "https://huggingface.co/ctrl/resolve/main/config.json"}
class UpperCAmelCase (_UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase :Optional[Any] = "ctrl"
_UpperCAmelCase :int = ["past_key_values"]
_UpperCAmelCase :Dict = {
"max_position_embeddings": "n_positions",
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self , _UpperCAmelCase=246534 , _UpperCAmelCase=256 , _UpperCAmelCase=1280 , _UpperCAmelCase=8192 , _UpperCAmelCase=48 , _UpperCAmelCase=16 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=1e-6 , _UpperCAmelCase=0.02 , _UpperCAmelCase=True , **_UpperCAmelCase , ):
lowercase__: Union[str, Any] = vocab_size
lowercase__: Optional[int] = n_positions
lowercase__: Optional[int] = n_embd
lowercase__: Any = n_layer
lowercase__: Any = n_head
lowercase__: int = dff
lowercase__: Dict = resid_pdrop
lowercase__: Any = embd_pdrop
lowercase__: Any = layer_norm_epsilon
lowercase__: Optional[int] = initializer_range
lowercase__: Dict = use_cache
super().__init__(**_UpperCAmelCase )
| 2 | 0 |
"""simple docstring"""
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import torch
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
@dataclass
class UpperCAmelCase (_UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase :Union[List[np.ndarray], torch.FloatTensor]
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_text_to_video_synth import TextToVideoSDPipeline
from .pipeline_text_to_video_synth_imgaimg import VideoToVideoSDPipeline # noqa: F401
from .pipeline_text_to_video_zero import TextToVideoZeroPipeline
| 354 | """simple docstring"""
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase = 5_0 ) -> int:
lowercase__: str = [[0] * 3 for _ in range(length + 1 )]
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
different_colour_ways_number[row_length][tile_length - 2] += (
different_colour_ways_number[row_length - tile_start - tile_length][
tile_length - 2
]
+ 1
)
return sum(different_colour_ways_number[length] )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 2 | 0 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..utils import cached_file
# docstyle-ignore
__A = "\nHuman: <<task>>\n\nAssistant: "
__A = "huggingface-tools/default-prompts"
__A = {"chat": "chat_prompt_template.txt", "run": "run_prompt_template.txt"}
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase="run" ) -> Any:
if prompt_or_repo_id is None:
lowercase__: int = DEFAULT_PROMPTS_REPO
# prompt is considered a repo ID when it does not contain any kind of space
if re.search('''\\s''' , lowercase__ ) is not None:
return prompt_or_repo_id
lowercase__: Optional[Any] = cached_file(
lowercase__ , PROMPT_FILES[mode] , repo_type='''dataset''' , user_agent={'''agent''': agent_name} )
with open(lowercase__ , '''r''' , encoding='''utf-8''' ) as f:
return f.read()
| 355 | """simple docstring"""
import pickle
import numpy as np
from matplotlib import pyplot as plt
class UpperCAmelCase :
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=0.2 , _UpperCAmelCase=0.2 ):
lowercase__: int = bp_numa
lowercase__: Union[str, Any] = bp_numa
lowercase__: List[str] = bp_numa
lowercase__: str = conva_get[:2]
lowercase__: Union[str, Any] = conva_get[2]
lowercase__: Any = size_pa
lowercase__: Optional[Any] = rate_w
lowercase__: Tuple = rate_t
lowercase__: List[str] = [
np.mat(-1 * np.random.rand(self.conva[0] , self.conva[0] ) + 0.5 )
for i in range(self.conva[1] )
]
lowercase__: Dict = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 )
lowercase__: str = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 )
lowercase__: Union[str, Any] = -2 * np.random.rand(self.conva[1] ) + 1
lowercase__: Any = -2 * np.random.rand(self.num_bpa ) + 1
lowercase__: Any = -2 * np.random.rand(self.num_bpa ) + 1
def _snake_case ( self , _UpperCAmelCase ):
# save model dict with pickle
lowercase__: int = {
'''num_bp1''': self.num_bpa,
'''num_bp2''': self.num_bpa,
'''num_bp3''': self.num_bpa,
'''conv1''': self.conva,
'''step_conv1''': self.step_conva,
'''size_pooling1''': self.size_poolinga,
'''rate_weight''': self.rate_weight,
'''rate_thre''': self.rate_thre,
'''w_conv1''': self.w_conva,
'''wkj''': self.wkj,
'''vji''': self.vji,
'''thre_conv1''': self.thre_conva,
'''thre_bp2''': self.thre_bpa,
'''thre_bp3''': self.thre_bpa,
}
with open(_UpperCAmelCase , '''wb''' ) as f:
pickle.dump(_UpperCAmelCase , _UpperCAmelCase )
print(F"""Model saved: {save_path}""" )
@classmethod
def _snake_case ( cls , _UpperCAmelCase ):
# read saved model
with open(_UpperCAmelCase , '''rb''' ) as f:
lowercase__: Optional[int] = pickle.load(_UpperCAmelCase ) # noqa: S301
lowercase__: Tuple = model_dic.get('''conv1''' )
conv_get.append(model_dic.get('''step_conv1''' ) )
lowercase__: Any = model_dic.get('''size_pooling1''' )
lowercase__: int = model_dic.get('''num_bp1''' )
lowercase__: Optional[int] = model_dic.get('''num_bp2''' )
lowercase__: str = model_dic.get('''num_bp3''' )
lowercase__: Any = model_dic.get('''rate_weight''' )
lowercase__: Union[str, Any] = model_dic.get('''rate_thre''' )
# create model instance
lowercase__: str = CNN(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# modify model parameter
lowercase__: Dict = model_dic.get('''w_conv1''' )
lowercase__: Dict = model_dic.get('''wkj''' )
lowercase__: str = model_dic.get('''vji''' )
lowercase__: List[Any] = model_dic.get('''thre_conv1''' )
lowercase__: Optional[int] = model_dic.get('''thre_bp2''' )
lowercase__: Tuple = model_dic.get('''thre_bp3''' )
return conv_ins
def _snake_case ( self , _UpperCAmelCase ):
return 1 / (1 + np.exp(-1 * x ))
def _snake_case ( self , _UpperCAmelCase ):
return round(_UpperCAmelCase , 3 )
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
# convolution process
lowercase__: Any = convs[0]
lowercase__: Tuple = convs[1]
lowercase__: List[Any] = np.shape(_UpperCAmelCase )[0]
# get the data slice of original image data, data_focus
lowercase__: List[Any] = []
for i_focus in range(0 , size_data - size_conv + 1 , _UpperCAmelCase ):
for j_focus in range(0 , size_data - size_conv + 1 , _UpperCAmelCase ):
lowercase__: Tuple = data[
i_focus : i_focus + size_conv, j_focus : j_focus + size_conv
]
data_focus.append(_UpperCAmelCase )
# calculate the feature map of every single kernel, and saved as list of matrix
lowercase__: Optional[int] = []
lowercase__: Optional[int] = int((size_data - size_conv) / conv_step + 1 )
for i_map in range(_UpperCAmelCase ):
lowercase__: str = []
for i_focus in range(len(_UpperCAmelCase ) ):
lowercase__: Any = (
np.sum(np.multiply(data_focus[i_focus] , w_convs[i_map] ) )
- thre_convs[i_map]
)
featuremap.append(self.sig(_UpperCAmelCase ) )
lowercase__: str = np.asmatrix(_UpperCAmelCase ).reshape(
_UpperCAmelCase , _UpperCAmelCase )
data_featuremap.append(_UpperCAmelCase )
# expanding the data slice to One dimenssion
lowercase__: Union[str, Any] = []
for each_focus in data_focus:
focusa_list.extend(self.Expand_Mat(_UpperCAmelCase ) )
lowercase__: Any = np.asarray(_UpperCAmelCase )
return focus_list, data_featuremap
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase="average_pool" ):
# pooling process
lowercase__: List[Any] = len(featuremaps[0] )
lowercase__: Any = int(size_map / size_pooling )
lowercase__: List[Any] = []
for i_map in range(len(_UpperCAmelCase ) ):
lowercase__: Any = featuremaps[i_map]
lowercase__: Tuple = []
for i_focus in range(0 , _UpperCAmelCase , _UpperCAmelCase ):
for j_focus in range(0 , _UpperCAmelCase , _UpperCAmelCase ):
lowercase__: Optional[Any] = feature_map[
i_focus : i_focus + size_pooling,
j_focus : j_focus + size_pooling,
]
if pooling_type == "average_pool":
# average pooling
map_pooled.append(np.average(_UpperCAmelCase ) )
elif pooling_type == "max_pooling":
# max pooling
map_pooled.append(np.max(_UpperCAmelCase ) )
lowercase__: str = np.asmatrix(_UpperCAmelCase ).reshape(_UpperCAmelCase , _UpperCAmelCase )
featuremap_pooled.append(_UpperCAmelCase )
return featuremap_pooled
def _snake_case ( self , _UpperCAmelCase ):
# expanding three dimension data to one dimension list
lowercase__: Optional[Any] = []
for i in range(len(_UpperCAmelCase ) ):
lowercase__: Any = np.shape(data[i] )
lowercase__: List[Any] = data[i].reshape(1 , shapes[0] * shapes[1] )
lowercase__: List[str] = data_listed.getA().tolist()[0]
data_expanded.extend(_UpperCAmelCase )
lowercase__: List[str] = np.asarray(_UpperCAmelCase )
return data_expanded
def _snake_case ( self , _UpperCAmelCase ):
# expanding matrix to one dimension list
lowercase__: Union[str, Any] = np.asarray(_UpperCAmelCase )
lowercase__: List[str] = np.shape(_UpperCAmelCase )
lowercase__: List[Any] = data_mat.reshape(1 , shapes[0] * shapes[1] )
return data_expanded
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
lowercase__: str = []
lowercase__: List[str] = 0
for i_map in range(_UpperCAmelCase ):
lowercase__: Union[str, Any] = np.ones((size_map, size_map) )
for i in range(0 , _UpperCAmelCase , _UpperCAmelCase ):
for j in range(0 , _UpperCAmelCase , _UpperCAmelCase ):
lowercase__: Optional[Any] = pd_pool[
i_pool
]
lowercase__: List[Any] = i_pool + 1
lowercase__: str = np.multiply(
_UpperCAmelCase , np.multiply(out_map[i_map] , (1 - out_map[i_map]) ) )
pd_all.append(_UpperCAmelCase )
return pd_all
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=bool ):
# model traning
print('''----------------------Start Training-------------------------''' )
print((''' - - Shape: Train_Data ''', np.shape(_UpperCAmelCase )) )
print((''' - - Shape: Teach_Data ''', np.shape(_UpperCAmelCase )) )
lowercase__: Tuple = 0
lowercase__: Tuple = []
lowercase__: Optional[int] = 10000
while rp < n_repeat and mse >= error_accuracy:
lowercase__: Tuple = 0
print(F"""-------------Learning Time {rp}--------------""" )
for p in range(len(_UpperCAmelCase ) ):
# print('------------Learning Image: %d--------------'%p)
lowercase__: List[Any] = np.asmatrix(datas_train[p] )
lowercase__: Optional[int] = np.asarray(datas_teach[p] )
lowercase__, lowercase__: List[str] = self.convolute(
_UpperCAmelCase , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
lowercase__: Optional[int] = self.pooling(_UpperCAmelCase , self.size_poolinga )
lowercase__: int = np.shape(_UpperCAmelCase )
lowercase__: Optional[Any] = self._expand(_UpperCAmelCase )
lowercase__: Any = data_bp_input
lowercase__: Any = np.dot(_UpperCAmelCase , self.vji.T ) - self.thre_bpa
lowercase__: str = self.sig(_UpperCAmelCase )
lowercase__: Optional[Any] = np.dot(_UpperCAmelCase , self.wkj.T ) - self.thre_bpa
lowercase__: Dict = self.sig(_UpperCAmelCase )
# --------------Model Leaning ------------------------
# calculate error and gradient---------------
lowercase__: str = np.multiply(
(data_teach - bp_outa) , np.multiply(_UpperCAmelCase , (1 - bp_outa) ) )
lowercase__: str = np.multiply(
np.dot(_UpperCAmelCase , self.wkj ) , np.multiply(_UpperCAmelCase , (1 - bp_outa) ) )
lowercase__: Dict = np.dot(_UpperCAmelCase , self.vji )
lowercase__: Any = pd_i_all / (self.size_poolinga * self.size_poolinga)
lowercase__: List[str] = pd_conva_pooled.T.getA().tolist()
lowercase__: Optional[Any] = self._calculate_gradient_from_pool(
_UpperCAmelCase , _UpperCAmelCase , shape_featuremapa[0] , shape_featuremapa[1] , self.size_poolinga , )
# weight and threshold learning process---------
# convolution layer
for k_conv in range(self.conva[1] ):
lowercase__: str = self._expand_mat(pd_conva_all[k_conv] )
lowercase__: str = self.rate_weight * np.dot(_UpperCAmelCase , _UpperCAmelCase )
lowercase__: Any = self.w_conva[k_conv] + delta_w.reshape(
(self.conva[0], self.conva[0]) )
lowercase__: List[Any] = (
self.thre_conva[k_conv]
- np.sum(pd_conva_all[k_conv] ) * self.rate_thre
)
# all connected layer
lowercase__: Optional[int] = self.wkj + pd_k_all.T * bp_outa * self.rate_weight
lowercase__: List[Any] = self.vji + pd_j_all.T * bp_outa * self.rate_weight
lowercase__: List[str] = self.thre_bpa - pd_k_all * self.rate_thre
lowercase__: Optional[Any] = self.thre_bpa - pd_j_all * self.rate_thre
# calculate the sum error of all single image
lowercase__: Optional[Any] = np.sum(abs(data_teach - bp_outa ) )
error_count += errors
# print(' ----Teach ',data_teach)
# print(' ----BP_output ',bp_out3)
lowercase__: str = rp + 1
lowercase__: Optional[Any] = error_count / patterns
all_mse.append(_UpperCAmelCase )
def draw_error():
lowercase__: Union[str, Any] = [error_accuracy for i in range(int(n_repeat * 1.2 ) )]
plt.plot(_UpperCAmelCase , '''+-''' )
plt.plot(_UpperCAmelCase , '''r--''' )
plt.xlabel('''Learning Times''' )
plt.ylabel('''All_mse''' )
plt.grid(_UpperCAmelCase , alpha=0.5 )
plt.show()
print('''------------------Training Complished---------------------''' )
print((''' - - Training epoch: ''', rp, F""" - - Mse: {mse:.6f}""") )
if draw_e:
draw_error()
return mse
def _snake_case ( self , _UpperCAmelCase ):
# model predict
lowercase__: Union[str, Any] = []
print('''-------------------Start Testing-------------------------''' )
print((''' - - Shape: Test_Data ''', np.shape(_UpperCAmelCase )) )
for p in range(len(_UpperCAmelCase ) ):
lowercase__: Union[str, Any] = np.asmatrix(datas_test[p] )
lowercase__, lowercase__: Any = self.convolute(
_UpperCAmelCase , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
lowercase__: List[str] = self.pooling(_UpperCAmelCase , self.size_poolinga )
lowercase__: str = self._expand(_UpperCAmelCase )
lowercase__: List[Any] = data_bp_input
lowercase__: List[str] = bp_outa * self.vji.T - self.thre_bpa
lowercase__: Any = self.sig(_UpperCAmelCase )
lowercase__: Optional[int] = bp_outa * self.wkj.T - self.thre_bpa
lowercase__: Any = self.sig(_UpperCAmelCase )
produce_out.extend(bp_outa.getA().tolist() )
lowercase__: str = [list(map(self.do_round , _UpperCAmelCase ) ) for each in produce_out]
return np.asarray(_UpperCAmelCase )
def _snake_case ( self , _UpperCAmelCase ):
# return the data of image after convoluting process so we can check it out
lowercase__: int = np.asmatrix(_UpperCAmelCase )
lowercase__, lowercase__: Optional[int] = self.convolute(
_UpperCAmelCase , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
lowercase__: List[Any] = self.pooling(_UpperCAmelCase , self.size_poolinga )
return data_conveda, data_pooleda
if __name__ == "__main__":
pass
| 2 | 0 |
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__A = logging.get_logger(__name__)
__A = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
__A = {
"tokenizer_file": {
"EleutherAI/gpt-neox-20b": "https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json",
},
}
__A = {
"gpt-neox-20b": 2_0_4_8,
}
class UpperCAmelCase (__lowerCamelCase ):
"""simple docstring"""
_UpperCAmelCase :Tuple = VOCAB_FILES_NAMES
_UpperCAmelCase :Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase :Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase :Optional[Any] = ['input_ids', 'attention_mask']
def __init__( self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase="<|endoftext|>" , _UpperCAmelCase="<|endoftext|>" , _UpperCAmelCase="<|endoftext|>" , _UpperCAmelCase=False , **_UpperCAmelCase , ):
super().__init__(
UpperCamelCase_ , UpperCamelCase_ , tokenizer_file=UpperCamelCase_ , unk_token=UpperCamelCase_ , bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , add_prefix_space=UpperCamelCase_ , **UpperCamelCase_ , )
lowercase__: str = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , UpperCamelCase_ ) != add_prefix_space:
lowercase__: Optional[Any] = getattr(UpperCamelCase_ , pre_tok_state.pop('''type''' ) )
lowercase__: List[str] = add_prefix_space
lowercase__: Dict = pre_tok_class(**UpperCamelCase_ )
lowercase__: Optional[Any] = add_prefix_space
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase = None ):
lowercase__: Tuple = self._tokenizer.model.save(UpperCamelCase_ , name=UpperCamelCase_ )
return tuple(UpperCamelCase_ )
def _snake_case ( self , _UpperCAmelCase ):
lowercase__: str = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ ) + [self.eos_token_id] )
if len(UpperCamelCase_ ) > self.model_max_length:
lowercase__: str = input_ids[-self.model_max_length :]
return input_ids
| 356 | """simple docstring"""
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class UpperCAmelCase (_UpperCAmelCase ,unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase :Union[str, Any] = CTRLTokenizer
_UpperCAmelCase :Any = False
_UpperCAmelCase :List[Any] = False
def _snake_case ( self ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowercase__: Dict = ['''adapt''', '''re@@''', '''a@@''', '''apt''', '''c@@''', '''t''', '''<unk>''']
lowercase__: Any = dict(zip(_UpperCAmelCase , range(len(_UpperCAmelCase ) ) ) )
lowercase__: Optional[int] = ['''#version: 0.2''', '''a p''', '''ap t</w>''', '''r e''', '''a d''', '''ad apt</w>''', '''''']
lowercase__: Optional[Any] = {'''unk_token''': '''<unk>'''}
lowercase__: Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
lowercase__: int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_UpperCAmelCase ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(_UpperCAmelCase ) )
def _snake_case ( self , **_UpperCAmelCase ):
kwargs.update(self.special_tokens_map )
return CTRLTokenizer.from_pretrained(self.tmpdirname , **_UpperCAmelCase )
def _snake_case ( self , _UpperCAmelCase ):
lowercase__: Optional[int] = '''adapt react readapt apt'''
lowercase__: Optional[int] = '''adapt react readapt apt'''
return input_text, output_text
def _snake_case ( self ):
lowercase__: List[str] = CTRLTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
lowercase__: Optional[int] = '''adapt react readapt apt'''
lowercase__: Any = '''adapt re@@ a@@ c@@ t re@@ adapt apt'''.split()
lowercase__: Optional[Any] = tokenizer.tokenize(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
lowercase__: int = tokens + [tokenizer.unk_token]
lowercase__: str = [0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) , _UpperCAmelCase )
| 2 | 0 |
"""simple docstring"""
from .integrations import (
is_optuna_available,
is_ray_available,
is_sigopt_available,
is_wandb_available,
run_hp_search_optuna,
run_hp_search_ray,
run_hp_search_sigopt,
run_hp_search_wandb,
)
from .trainer_utils import (
HPSearchBackend,
default_hp_space_optuna,
default_hp_space_ray,
default_hp_space_sigopt,
default_hp_space_wandb,
)
from .utils import logging
__A = logging.get_logger(__name__)
class UpperCAmelCase :
"""simple docstring"""
_UpperCAmelCase :str
_UpperCAmelCase :str = None
@staticmethod
def _snake_case ( ):
raise NotImplementedError
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ):
raise NotImplementedError
def _snake_case ( self , _UpperCAmelCase ):
raise NotImplementedError
def _snake_case ( self ):
if not self.is_available():
raise RuntimeError(
F"""You picked the {self.name} backend, but it is not installed. Run {self.pip_install()}.""" )
@classmethod
def _snake_case ( cls ):
return F"""`pip install {cls.pip_package or cls.name}`"""
class UpperCAmelCase (lowerCAmelCase_ ):
"""simple docstring"""
_UpperCAmelCase :Any = 'optuna'
@staticmethod
def _snake_case ( ):
return is_optuna_available()
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ):
return run_hp_search_optuna(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase )
def _snake_case ( self , _UpperCAmelCase ):
return default_hp_space_optuna(__lowerCAmelCase )
class UpperCAmelCase (lowerCAmelCase_ ):
"""simple docstring"""
_UpperCAmelCase :Optional[int] = 'ray'
_UpperCAmelCase :Dict = '\'ray[tune]\''
@staticmethod
def _snake_case ( ):
return is_ray_available()
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ):
return run_hp_search_ray(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase )
def _snake_case ( self , _UpperCAmelCase ):
return default_hp_space_ray(__lowerCAmelCase )
class UpperCAmelCase (lowerCAmelCase_ ):
"""simple docstring"""
_UpperCAmelCase :Optional[Any] = 'sigopt'
@staticmethod
def _snake_case ( ):
return is_sigopt_available()
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ):
return run_hp_search_sigopt(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase )
def _snake_case ( self , _UpperCAmelCase ):
return default_hp_space_sigopt(__lowerCAmelCase )
class UpperCAmelCase (lowerCAmelCase_ ):
"""simple docstring"""
_UpperCAmelCase :Optional[Any] = 'wandb'
@staticmethod
def _snake_case ( ):
return is_wandb_available()
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ):
return run_hp_search_wandb(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase )
def _snake_case ( self , _UpperCAmelCase ):
return default_hp_space_wandb(__lowerCAmelCase )
__A = {
HPSearchBackend(backend.name): backend for backend in [OptunaBackend, RayTuneBackend, SigOptBackend, WandbBackend]
}
def SCREAMING_SNAKE_CASE__ ( ) -> Tuple:
lowercase__: List[Any] = [backend for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() if backend.is_available()]
if len(__UpperCAmelCase ) > 0:
lowercase__: Union[str, Any] = available_backends[0].name
if len(__UpperCAmelCase ) > 1:
logger.info(
F"""{len(__UpperCAmelCase )} hyperparameter search backends available. Using {name} as the default.""" )
return name
raise RuntimeError(
'''No hyperparameter search backend available.\n'''
+ '''\n'''.join(
F""" - To install {backend.name} run {backend.pip_install()}"""
for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() ) )
| 357 | """simple docstring"""
import os
import re
import shutil
from argparse import ArgumentParser, Namespace
from datasets.commands import BaseDatasetsCLICommand
from datasets.utils.logging import get_logger
__A = "<<<<<<< This should probably be modified because it mentions: "
__A = "=======\n>>>>>>>\n"
__A = [
"TextEncoderConfig",
"ByteTextEncoder",
"SubwordTextEncoder",
"encoder_config",
"maybe_build_from_corpus",
"manual_dir",
]
__A = [
# (pattern, replacement)
# Order is important here for some replacements
(R"tfds\.core", R"datasets"),
(R"tf\.io\.gfile\.GFile", R"open"),
(R"tf\.([\w\d]+)", R"datasets.Value('\1')"),
(R"tfds\.features\.Text\(\)", R"datasets.Value('string')"),
(R"tfds\.features\.Text\(", R"datasets.Value('string'),"),
(R"features\s*=\s*tfds.features.FeaturesDict\(", R"features=datasets.Features("),
(R"tfds\.features\.FeaturesDict\(", R"dict("),
(R"The TensorFlow Datasets Authors", R"The TensorFlow Datasets Authors and the HuggingFace Datasets Authors"),
(R"tfds\.", R"datasets."),
(R"dl_manager\.manual_dir", R"self.config.data_dir"),
(R"self\.builder_config", R"self.config"),
]
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> Tuple:
return ConvertCommand(args.tfds_path , args.datasets_directory )
class UpperCAmelCase (_UpperCAmelCase ):
"""simple docstring"""
@staticmethod
def _snake_case ( _UpperCAmelCase ):
lowercase__: int = parser.add_parser(
'''convert''' , help='''Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset.''' , )
train_parser.add_argument(
'''--tfds_path''' , type=_UpperCAmelCase , required=_UpperCAmelCase , help='''Path to a TensorFlow Datasets folder to convert or a single tfds file to convert.''' , )
train_parser.add_argument(
'''--datasets_directory''' , type=_UpperCAmelCase , required=_UpperCAmelCase , help='''Path to the HuggingFace Datasets folder.''' )
train_parser.set_defaults(func=_UpperCAmelCase )
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , *_UpperCAmelCase ):
lowercase__: List[str] = get_logger('''datasets-cli/converting''' )
lowercase__: Optional[Any] = tfds_path
lowercase__: Dict = datasets_directory
def _snake_case ( self ):
if os.path.isdir(self._tfds_path ):
lowercase__: Optional[Any] = os.path.abspath(self._tfds_path )
elif os.path.isfile(self._tfds_path ):
lowercase__: Optional[int] = os.path.dirname(self._tfds_path )
else:
raise ValueError('''--tfds_path is neither a directory nor a file. Please check path.''' )
lowercase__: int = os.path.abspath(self._datasets_directory )
self._logger.info(F"""Converting datasets from {abs_tfds_path} to {abs_datasets_path}""" )
lowercase__: Tuple = []
lowercase__: Dict = []
lowercase__: Any = {}
if os.path.isdir(self._tfds_path ):
lowercase__: Dict = os.listdir(_UpperCAmelCase )
else:
lowercase__: Dict = [os.path.basename(self._tfds_path )]
for f_name in file_names:
self._logger.info(F"""Looking at file {f_name}""" )
lowercase__: Tuple = os.path.join(_UpperCAmelCase , _UpperCAmelCase )
lowercase__: Optional[int] = os.path.join(_UpperCAmelCase , _UpperCAmelCase )
if not os.path.isfile(_UpperCAmelCase ) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name:
self._logger.info('''Skipping file''' )
continue
with open(_UpperCAmelCase , encoding='''utf-8''' ) as f:
lowercase__: Tuple = f.readlines()
lowercase__: Optional[Any] = []
lowercase__: Dict = False
lowercase__: List[str] = False
lowercase__: List[Any] = []
for line in lines:
lowercase__: List[str] = line
# Convert imports
if "import tensorflow.compat.v2 as tf" in out_line:
continue
elif "@tfds.core" in out_line:
continue
elif "builder=self" in out_line:
continue
elif "import tensorflow_datasets.public_api as tfds" in out_line:
lowercase__: Optional[int] = '''import datasets\n'''
elif "import tensorflow" in out_line:
# order is important here
lowercase__: Dict = ''''''
continue
elif "from absl import logging" in out_line:
lowercase__: Tuple = '''from datasets import logging\n'''
elif "getLogger" in out_line:
lowercase__: Optional[Any] = out_line.replace('''getLogger''' , '''get_logger''' )
elif any(expression in out_line for expression in TO_HIGHLIGHT ):
lowercase__: Any = True
lowercase__: str = list(filter(lambda _UpperCAmelCase : e in out_line , _UpperCAmelCase ) )
out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(_UpperCAmelCase ) + '''\n''' )
out_lines.append(_UpperCAmelCase )
out_lines.append(_UpperCAmelCase )
continue
else:
for pattern, replacement in TO_CONVERT:
lowercase__: List[Any] = re.sub(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# Take care of saving utilities (to later move them together with main script)
if "tensorflow_datasets" in out_line:
lowercase__: Any = re.match(r'''from\stensorflow_datasets.*import\s([^\.\r\n]+)''' , _UpperCAmelCase )
tfds_imports.extend(imp.strip() for imp in match.group(1 ).split(''',''' ) )
lowercase__: List[str] = '''from . import ''' + match.group(1 )
# Check we have not forget anything
if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line:
raise ValueError(F"""Error converting {out_line.strip()}""" )
if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line:
lowercase__: Optional[Any] = True
out_lines.append(_UpperCAmelCase )
if is_builder or "wmt" in f_name:
# We create a new directory for each dataset
lowercase__: Dict = f_name.replace('''.py''' , '''''' )
lowercase__: Dict = os.path.join(_UpperCAmelCase , _UpperCAmelCase )
lowercase__: Optional[Any] = os.path.join(_UpperCAmelCase , _UpperCAmelCase )
os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase )
self._logger.info(F"""Adding directory {output_dir}""" )
imports_to_builder_map.update({imp: output_dir for imp in tfds_imports} )
else:
# Utilities will be moved at the end
utils_files.append(_UpperCAmelCase )
if needs_manual_update:
with_manual_update.append(_UpperCAmelCase )
with open(_UpperCAmelCase , '''w''' , encoding='''utf-8''' ) as f:
f.writelines(_UpperCAmelCase )
self._logger.info(F"""Converted in {output_file}""" )
for utils_file in utils_files:
try:
lowercase__: str = os.path.basename(_UpperCAmelCase )
lowercase__: Union[str, Any] = imports_to_builder_map[f_name.replace('''.py''' , '''''' )]
self._logger.info(F"""Moving {dest_folder} to {utils_file}""" )
shutil.copy(_UpperCAmelCase , _UpperCAmelCase )
except KeyError:
self._logger.error(F"""Cannot find destination folder for {utils_file}. Please copy manually.""" )
if with_manual_update:
for file_path in with_manual_update:
self._logger.warning(
F"""You need to manually update file {file_path} to remove configurations using 'TextEncoderConfig'.""" )
| 2 | 0 |
import math
import os
import sys
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> str:
lowercase__: Any = ''''''
try:
with open(_UpperCamelCase , '''rb''' ) as binary_file:
lowercase__: List[Any] = binary_file.read()
for dat in data:
lowercase__: Union[str, Any] = F"""{dat:08b}"""
result += curr_byte
return result
except OSError:
print('''File not accessible''' )
sys.exit()
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> None:
lexicon.pop(_UpperCamelCase )
lowercase__: Optional[Any] = last_match_id
if math.loga(_UpperCamelCase ).is_integer():
for curr_key in lexicon:
lowercase__: Dict = '''0''' + lexicon[curr_key]
lowercase__: Dict = bin(_UpperCamelCase )[2:]
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> str:
lowercase__: List[Any] = {'''0''': '''0''', '''1''': '''1'''}
lowercase__, lowercase__: int = '''''', ''''''
lowercase__: Dict = len(_UpperCamelCase )
for i in range(len(_UpperCamelCase ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
lowercase__: str = lexicon[curr_string]
result += last_match_id
add_key_to_lexicon(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
index += 1
lowercase__: Tuple = ''''''
while curr_string != "" and curr_string not in lexicon:
curr_string += "0"
if curr_string != "":
lowercase__: Optional[int] = lexicon[curr_string]
result += last_match_id
return result
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase ) -> str:
lowercase__: Tuple = os.path.getsize(_UpperCamelCase )
lowercase__: int = bin(_UpperCamelCase )[2:]
lowercase__: List[str] = len(_UpperCamelCase )
return "0" * (length_length - 1) + file_length_binary + compressed
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase ) -> None:
lowercase__: List[str] = 8
try:
with open(_UpperCamelCase , '''wb''' ) as opened_file:
lowercase__: Optional[Any] = [
to_write[i : i + byte_length]
for i in range(0 , len(_UpperCamelCase ) , _UpperCamelCase )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append('''10000000''' )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array:
opened_file.write(int(_UpperCamelCase , 2 ).to_bytes(1 , byteorder='''big''' ) )
except OSError:
print('''File not accessible''' )
sys.exit()
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase ) -> None:
lowercase__: List[str] = read_file_binary(_UpperCamelCase )
lowercase__: List[str] = compress_data(_UpperCamelCase )
lowercase__: List[Any] = add_file_length(_UpperCamelCase , _UpperCamelCase )
write_file_binary(_UpperCamelCase , _UpperCamelCase )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 358 | """simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
"microsoft/cvt-13": "https://huggingface.co/microsoft/cvt-13/resolve/main/config.json",
# See all Cvt models at https://huggingface.co/models?filter=cvt
}
class UpperCAmelCase (_UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase :Tuple = "cvt"
def __init__( self , _UpperCAmelCase=3 , _UpperCAmelCase=[7, 3, 3] , _UpperCAmelCase=[4, 2, 2] , _UpperCAmelCase=[2, 1, 1] , _UpperCAmelCase=[64, 192, 384] , _UpperCAmelCase=[1, 3, 6] , _UpperCAmelCase=[1, 2, 10] , _UpperCAmelCase=[4.0, 4.0, 4.0] , _UpperCAmelCase=[0.0, 0.0, 0.0] , _UpperCAmelCase=[0.0, 0.0, 0.0] , _UpperCAmelCase=[0.0, 0.0, 0.1] , _UpperCAmelCase=[True, True, True] , _UpperCAmelCase=[False, False, True] , _UpperCAmelCase=["dw_bn", "dw_bn", "dw_bn"] , _UpperCAmelCase=[3, 3, 3] , _UpperCAmelCase=[1, 1, 1] , _UpperCAmelCase=[2, 2, 2] , _UpperCAmelCase=[1, 1, 1] , _UpperCAmelCase=[1, 1, 1] , _UpperCAmelCase=0.02 , _UpperCAmelCase=1e-1_2 , **_UpperCAmelCase , ):
super().__init__(**_UpperCAmelCase )
lowercase__: Dict = num_channels
lowercase__: str = patch_sizes
lowercase__: Optional[Any] = patch_stride
lowercase__: List[str] = patch_padding
lowercase__: Optional[Any] = embed_dim
lowercase__: Optional[int] = num_heads
lowercase__: Any = depth
lowercase__: str = mlp_ratio
lowercase__: Any = attention_drop_rate
lowercase__: Any = drop_rate
lowercase__: Optional[Any] = drop_path_rate
lowercase__: Dict = qkv_bias
lowercase__: Dict = cls_token
lowercase__: Any = qkv_projection_method
lowercase__: List[str] = kernel_qkv
lowercase__: Union[str, Any] = padding_kv
lowercase__: Optional[int] = stride_kv
lowercase__: int = padding_q
lowercase__: Dict = stride_q
lowercase__: Any = initializer_range
lowercase__: Union[str, Any] = layer_norm_eps
| 2 | 0 |
"""simple docstring"""
import functools
import logging
import os
import sys
import threading
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
import huggingface_hub.utils as hf_hub_utils
from tqdm import auto as tqdm_lib
__A = threading.Lock()
__A = None
__A = {
"""debug""": logging.DEBUG,
"""info""": logging.INFO,
"""warning""": logging.WARNING,
"""error""": logging.ERROR,
"""critical""": logging.CRITICAL,
}
__A = logging.WARNING
__A = True
def SCREAMING_SNAKE_CASE__ ( ) -> int:
lowercase__: Any = os.getenv('''TRANSFORMERS_VERBOSITY''' , lowerCAmelCase__ )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
F"""Unknown option TRANSFORMERS_VERBOSITY={env_level_str}, """
F"""has to be one of: { ', '.join(log_levels.keys() ) }""" )
return _default_log_level
def SCREAMING_SNAKE_CASE__ ( ) -> str:
return __name__.split('''.''' )[0]
def SCREAMING_SNAKE_CASE__ ( ) -> logging.Logger:
return logging.getLogger(_get_library_name() )
def SCREAMING_SNAKE_CASE__ ( ) -> None:
global _default_handler
with _lock:
if _default_handler:
# This library has already configured the library root logger.
return
lowercase__: List[Any] = logging.StreamHandler() # Set sys.stderr as stream.
lowercase__: Optional[int] = sys.stderr.flush
# Apply our default configuration to the library root logger.
lowercase__: Union[str, Any] = _get_library_root_logger()
library_root_logger.addHandler(_default_handler )
library_root_logger.setLevel(_get_default_logging_level() )
lowercase__: List[str] = False
def SCREAMING_SNAKE_CASE__ ( ) -> None:
global _default_handler
with _lock:
if not _default_handler:
return
lowercase__: Optional[Any] = _get_library_root_logger()
library_root_logger.removeHandler(_default_handler )
library_root_logger.setLevel(logging.NOTSET )
lowercase__: Optional[int] = None
def SCREAMING_SNAKE_CASE__ ( ) -> str:
return log_levels
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase = None ) -> logging.Logger:
if name is None:
lowercase__: Optional[Any] = _get_library_name()
_configure_library_root_logger()
return logging.getLogger(lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( ) -> int:
_configure_library_root_logger()
return _get_library_root_logger().getEffectiveLevel()
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> None:
_configure_library_root_logger()
_get_library_root_logger().setLevel(lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( ) -> Union[str, Any]:
return set_verbosity(lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( ) -> Optional[int]:
return set_verbosity(lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( ) -> str:
return set_verbosity(lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( ) -> str:
return set_verbosity(lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( ) -> None:
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().removeHandler(_default_handler )
def SCREAMING_SNAKE_CASE__ ( ) -> None:
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().addHandler(_default_handler )
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> None:
_configure_library_root_logger()
assert handler is not None
_get_library_root_logger().addHandler(lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> None:
_configure_library_root_logger()
assert handler is not None and handler not in _get_library_root_logger().handlers
_get_library_root_logger().removeHandler(lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( ) -> None:
_configure_library_root_logger()
lowercase__: Dict = False
def SCREAMING_SNAKE_CASE__ ( ) -> None:
_configure_library_root_logger()
lowercase__: Union[str, Any] = True
def SCREAMING_SNAKE_CASE__ ( ) -> None:
lowercase__: str = _get_library_root_logger().handlers
for handler in handlers:
lowercase__: List[Any] = logging.Formatter('''[%(levelname)s|%(filename)s:%(lineno)s] %(asctime)s >> %(message)s''' )
handler.setFormatter(lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( ) -> None:
lowercase__: Dict = _get_library_root_logger().handlers
for handler in handlers:
handler.setFormatter(lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[Any]:
lowercase__: Optional[Any] = os.getenv('''TRANSFORMERS_NO_ADVISORY_WARNINGS''' , lowerCAmelCase__ )
if no_advisory_warnings:
return
self.warning(*lowerCAmelCase__ , **lowerCAmelCase__ )
__A = warning_advice
@functools.lru_cache(lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Union[str, Any]:
self.warning(*lowerCAmelCase__ , **lowerCAmelCase__ )
__A = warning_once
class UpperCAmelCase :
"""simple docstring"""
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ): # pylint: disable=unused-argument
lowercase__: List[str] = args[0] if args else None
def __iter__( self ):
return iter(self._iterator )
def __getattr__( self , _UpperCAmelCase ):
def empty_fn(*_UpperCAmelCase , **_UpperCAmelCase ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__( self ):
return self
def __exit__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
return
class UpperCAmelCase :
"""simple docstring"""
def __call__( self , *_UpperCAmelCase , **_UpperCAmelCase ):
if _tqdm_active:
return tqdm_lib.tqdm(*__lowerCAmelCase , **__lowerCAmelCase )
else:
return EmptyTqdm(*__lowerCAmelCase , **__lowerCAmelCase )
def _snake_case ( self , *_UpperCAmelCase , **_UpperCAmelCase ):
lowercase__: str = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*__lowerCAmelCase , **__lowerCAmelCase )
def _snake_case ( self ):
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
__A = _tqdm_cls()
def SCREAMING_SNAKE_CASE__ ( ) -> bool:
global _tqdm_active
return bool(_tqdm_active )
def SCREAMING_SNAKE_CASE__ ( ) -> Dict:
global _tqdm_active
lowercase__: Optional[int] = True
hf_hub_utils.enable_progress_bars()
def SCREAMING_SNAKE_CASE__ ( ) -> Tuple:
global _tqdm_active
lowercase__: Dict = False
hf_hub_utils.disable_progress_bars()
| 359 | """simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import add_start_docstrings
__A = R"\n [`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and\n can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.\n\n Args:\n title_sep (`str`, *optional*, defaults to `\" / \"`):\n Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].\n doc_sep (`str`, *optional*, defaults to `\" // \"`):\n Separator inserted between the text of the retrieved document and the original input when calling\n [`RagRetriever`].\n n_docs (`int`, *optional*, defaults to 5):\n Number of documents to retrieve.\n max_combined_length (`int`, *optional*, defaults to 300):\n Max length of contextualized input returned by [`~RagRetriever.__call__`].\n retrieval_vector_size (`int`, *optional*, defaults to 768):\n Dimensionality of the document embeddings indexed by [`RagRetriever`].\n retrieval_batch_size (`int`, *optional*, defaults to 8):\n Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated\n [`RagRetriever`].\n dataset (`str`, *optional*, defaults to `\"wiki_dpr\"`):\n A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids\n using `datasets.list_datasets()`).\n dataset_split (`str`, *optional*, defaults to `\"train\"`)\n Which split of the `dataset` to load.\n index_name (`str`, *optional*, defaults to `\"compressed\"`)\n The index name of the index associated with the `dataset`. One can choose between `\"legacy\"`, `\"exact\"` and\n `\"compressed\"`.\n index_path (`str`, *optional*)\n The path to the serialized faiss index on disk.\n passages_path (`str`, *optional*):\n A path to text passages compatible with the faiss index. Required if using\n [`~models.rag.retrieval_rag.LegacyIndex`]\n use_dummy_dataset (`bool`, *optional*, defaults to `False`)\n Whether to load a \"dummy\" variant of the dataset specified by `dataset`.\n label_smoothing (`float`, *optional*, defaults to 0.0):\n Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing\n in the loss calculation. If set to 0, no label smoothing is performed.\n do_marginalize (`bool`, *optional*, defaults to `False`):\n If `True`, the logits are marginalized over all documents by making use of\n `torch.nn.functional.log_softmax`.\n reduce_loss (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.\n do_deduplication (`bool`, *optional*, defaults to `True`):\n Whether or not to deduplicate the generations from different context documents for a given input. Has to be\n set to `False` if used while training with distributed backend.\n exclude_bos_score (`bool`, *optional*, defaults to `False`):\n Whether or not to disregard the BOS token when computing the loss.\n output_retrieved(`bool`, *optional*, defaults to `False`):\n If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and\n `context_attention_mask` are returned. See returned tensors for more detail.\n use_cache (`bool`, *optional*, defaults to `True`):\n Whether or not the model should return the last key/values attentions (not used by all models).\n forced_eos_token_id (`int`, *optional*):\n The id of the token to force as the last generated token when `max_length` is reached. Usually set to\n `eos_token_id`.\n"
@add_start_docstrings(_UpperCAmelCase )
class UpperCAmelCase (_UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase :Optional[int] = "rag"
_UpperCAmelCase :List[Any] = True
def __init__( self , _UpperCAmelCase=None , _UpperCAmelCase=True , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=" / " , _UpperCAmelCase=" // " , _UpperCAmelCase=5 , _UpperCAmelCase=300 , _UpperCAmelCase=768 , _UpperCAmelCase=8 , _UpperCAmelCase="wiki_dpr" , _UpperCAmelCase="train" , _UpperCAmelCase="compressed" , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=0.0 , _UpperCAmelCase=True , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=True , _UpperCAmelCase=None , **_UpperCAmelCase , ):
super().__init__(
bos_token_id=_UpperCAmelCase , pad_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , decoder_start_token_id=_UpperCAmelCase , forced_eos_token_id=_UpperCAmelCase , is_encoder_decoder=_UpperCAmelCase , prefix=_UpperCAmelCase , vocab_size=_UpperCAmelCase , **_UpperCAmelCase , )
assert (
"question_encoder" in kwargs and "generator" in kwargs
), "Config has to be initialized with question_encoder and generator config"
lowercase__: Optional[Any] = kwargs.pop('''question_encoder''' )
lowercase__: Any = question_encoder_config.pop('''model_type''' )
lowercase__: Tuple = kwargs.pop('''generator''' )
lowercase__: Union[str, Any] = decoder_config.pop('''model_type''' )
from ..auto.configuration_auto import AutoConfig
lowercase__: Optional[int] = AutoConfig.for_model(_UpperCAmelCase , **_UpperCAmelCase )
lowercase__: Any = AutoConfig.for_model(_UpperCAmelCase , **_UpperCAmelCase )
lowercase__: str = reduce_loss
lowercase__: str = label_smoothing
lowercase__: Dict = exclude_bos_score
lowercase__: Any = do_marginalize
lowercase__: Optional[int] = title_sep
lowercase__: Any = doc_sep
lowercase__: Any = n_docs
lowercase__: List[Any] = max_combined_length
lowercase__: int = dataset
lowercase__: int = dataset_split
lowercase__: str = index_name
lowercase__: Dict = retrieval_vector_size
lowercase__: Dict = retrieval_batch_size
lowercase__: List[str] = passages_path
lowercase__: str = index_path
lowercase__: Optional[Any] = use_dummy_dataset
lowercase__: str = output_retrieved
lowercase__: List[str] = do_deduplication
lowercase__: List[Any] = use_cache
if self.forced_eos_token_id is None:
lowercase__: int = getattr(self.generator , '''forced_eos_token_id''' , _UpperCAmelCase )
@classmethod
def _snake_case ( cls , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ):
return cls(question_encoder=question_encoder_config.to_dict() , generator=generator_config.to_dict() , **_UpperCAmelCase )
def _snake_case ( self ):
lowercase__: List[str] = copy.deepcopy(self.__dict__ )
lowercase__: str = self.question_encoder.to_dict()
lowercase__: str = self.generator.to_dict()
lowercase__: str = self.__class__.model_type
return output
| 2 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_tf_available,
is_torch_available,
)
__A = {
'configuration_speech_to_text': ['SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Speech2TextConfig'],
'processing_speech_to_text': ['Speech2TextProcessor'],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ['Speech2TextTokenizer']
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ['Speech2TextFeatureExtractor']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
'TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFSpeech2TextForConditionalGeneration',
'TFSpeech2TextModel',
'TFSpeech2TextPreTrainedModel',
]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
'SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST',
'Speech2TextForConditionalGeneration',
'Speech2TextModel',
'Speech2TextPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig
from .processing_speech_to_text import SpeechaTextProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speech_to_text import SpeechaTextTokenizer
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_speech_to_text import (
TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSpeechaTextForConditionalGeneration,
TFSpeechaTextModel,
TFSpeechaTextPreTrainedModel,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_to_text import (
SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechaTextForConditionalGeneration,
SpeechaTextModel,
SpeechaTextPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 360 | """simple docstring"""
import json
import os
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from requests.exceptions import HTTPError
from transformers.utils import (
CONFIG_NAME,
FLAX_WEIGHTS_NAME,
TF2_WEIGHTS_NAME,
TRANSFORMERS_CACHE,
WEIGHTS_NAME,
cached_file,
get_file_from_repo,
has_file,
)
__A = "hf-internal-testing/tiny-random-bert"
__A = os.path.join(TRANSFORMERS_CACHE, "models--hf-internal-testing--tiny-random-bert")
__A = "9b8c223d42b2188cb49d29af482996f9d0f3e5a6"
class UpperCAmelCase (unittest.TestCase ):
"""simple docstring"""
def _snake_case ( self ):
lowercase__: Union[str, Any] = cached_file(_UpperCAmelCase , _UpperCAmelCase )
# Should have downloaded the file in here
self.assertTrue(os.path.isdir(_UpperCAmelCase ) )
# Cache should contain at least those three subfolders:
for subfolder in ["blobs", "refs", "snapshots"]:
self.assertTrue(os.path.isdir(os.path.join(_UpperCAmelCase , _UpperCAmelCase ) ) )
with open(os.path.join(_UpperCAmelCase , '''refs''' , '''main''' ) ) as f:
lowercase__: Dict = f.read()
self.assertEqual(_UpperCAmelCase , os.path.join(_UpperCAmelCase , '''snapshots''' , _UpperCAmelCase , _UpperCAmelCase ) )
self.assertTrue(os.path.isfile(_UpperCAmelCase ) )
# File is cached at the same place the second time.
lowercase__: Any = cached_file(_UpperCAmelCase , _UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
# Using a specific revision to test the full commit hash.
lowercase__: Dict = cached_file(_UpperCAmelCase , _UpperCAmelCase , revision='''9b8c223''' )
self.assertEqual(_UpperCAmelCase , os.path.join(_UpperCAmelCase , '''snapshots''' , _UpperCAmelCase , _UpperCAmelCase ) )
def _snake_case ( self ):
with self.assertRaisesRegex(_UpperCAmelCase , '''is not a valid model identifier''' ):
lowercase__: int = cached_file('''tiny-random-bert''' , _UpperCAmelCase )
with self.assertRaisesRegex(_UpperCAmelCase , '''is not a valid git identifier''' ):
lowercase__: List[Any] = cached_file(_UpperCAmelCase , _UpperCAmelCase , revision='''aaaa''' )
with self.assertRaisesRegex(_UpperCAmelCase , '''does not appear to have a file named''' ):
lowercase__: Dict = cached_file(_UpperCAmelCase , '''conf''' )
def _snake_case ( self ):
with self.assertRaisesRegex(_UpperCAmelCase , '''does not appear to have a file named''' ):
lowercase__: Optional[Any] = cached_file(_UpperCAmelCase , '''conf''' )
with open(os.path.join(_UpperCAmelCase , '''refs''' , '''main''' ) ) as f:
lowercase__: int = f.read()
self.assertTrue(os.path.isfile(os.path.join(_UpperCAmelCase , '''.no_exist''' , _UpperCAmelCase , '''conf''' ) ) )
lowercase__: Dict = cached_file(_UpperCAmelCase , '''conf''' , _raise_exceptions_for_missing_entries=_UpperCAmelCase )
self.assertIsNone(_UpperCAmelCase )
lowercase__: List[str] = cached_file(_UpperCAmelCase , '''conf''' , local_files_only=_UpperCAmelCase , _raise_exceptions_for_missing_entries=_UpperCAmelCase )
self.assertIsNone(_UpperCAmelCase )
lowercase__: Union[str, Any] = mock.Mock()
lowercase__: str = 500
lowercase__: Union[str, Any] = {}
lowercase__: List[str] = HTTPError
lowercase__: int = {}
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('''requests.Session.request''' , return_value=_UpperCAmelCase ) as mock_head:
lowercase__: Any = cached_file(_UpperCAmelCase , '''conf''' , _raise_exceptions_for_connection_errors=_UpperCAmelCase )
self.assertIsNone(_UpperCAmelCase )
# This check we did call the fake head request
mock_head.assert_called()
def _snake_case ( self ):
self.assertTrue(has_file('''hf-internal-testing/tiny-bert-pt-only''' , _UpperCAmelCase ) )
self.assertFalse(has_file('''hf-internal-testing/tiny-bert-pt-only''' , _UpperCAmelCase ) )
self.assertFalse(has_file('''hf-internal-testing/tiny-bert-pt-only''' , _UpperCAmelCase ) )
def _snake_case ( self ):
# `get_file_from_repo` returns None if the file does not exist
self.assertIsNone(get_file_from_repo('''bert-base-cased''' , '''ahah.txt''' ) )
# The function raises if the repository does not exist.
with self.assertRaisesRegex(_UpperCAmelCase , '''is not a valid model identifier''' ):
get_file_from_repo('''bert-base-case''' , _UpperCAmelCase )
# The function raises if the revision does not exist.
with self.assertRaisesRegex(_UpperCAmelCase , '''is not a valid git identifier''' ):
get_file_from_repo('''bert-base-cased''' , _UpperCAmelCase , revision='''ahaha''' )
lowercase__: Optional[Any] = get_file_from_repo('''bert-base-cased''' , _UpperCAmelCase )
# The name is the cached name which is not very easy to test, so instead we load the content.
lowercase__: Optional[Any] = json.loads(open(_UpperCAmelCase , '''r''' ).read() )
self.assertEqual(config['''hidden_size'''] , 768 )
def _snake_case ( self ):
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase__: Any = Path(_UpperCAmelCase ) / '''a.txt'''
filename.touch()
self.assertEqual(get_file_from_repo(_UpperCAmelCase , '''a.txt''' ) , str(_UpperCAmelCase ) )
self.assertIsNone(get_file_from_repo(_UpperCAmelCase , '''b.txt''' ) )
| 2 | 0 |
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> str:
if isinstance(lowercase_ , lowercase_ ):
raise TypeError('''\'float\' object cannot be interpreted as an integer''' )
if isinstance(lowercase_ , lowercase_ ):
raise TypeError('''\'str\' object cannot be interpreted as an integer''' )
if num == 0:
return "0b0"
lowercase__: int = False
if num < 0:
lowercase__: List[Any] = True
lowercase__: Optional[Any] = -num
lowercase__: Any = []
while num > 0:
binary.insert(0 , num % 2 )
num >>= 1
if negative:
return "-0b" + "".join(str(lowercase_ ) for e in binary )
return "0b" + "".join(str(lowercase_ ) for e in binary )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 361 | """simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
"microsoft/beit-base-patch16-224-pt22k": (
"https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json"
),
# See all BEiT models at https://huggingface.co/models?filter=beit
}
class UpperCAmelCase (_UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase :Optional[Any] = "beit"
def __init__( self , _UpperCAmelCase=8192 , _UpperCAmelCase=768 , _UpperCAmelCase=12 , _UpperCAmelCase=12 , _UpperCAmelCase=3072 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.02 , _UpperCAmelCase=1e-1_2 , _UpperCAmelCase=224 , _UpperCAmelCase=16 , _UpperCAmelCase=3 , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=True , _UpperCAmelCase=[3, 5, 7, 11] , _UpperCAmelCase=[1, 2, 3, 6] , _UpperCAmelCase=True , _UpperCAmelCase=0.4 , _UpperCAmelCase=256 , _UpperCAmelCase=1 , _UpperCAmelCase=False , _UpperCAmelCase=255 , **_UpperCAmelCase , ):
super().__init__(**_UpperCAmelCase )
lowercase__: Union[str, Any] = vocab_size
lowercase__: List[Any] = hidden_size
lowercase__: Optional[int] = num_hidden_layers
lowercase__: Optional[int] = num_attention_heads
lowercase__: int = intermediate_size
lowercase__: List[str] = hidden_act
lowercase__: List[Any] = hidden_dropout_prob
lowercase__: Dict = attention_probs_dropout_prob
lowercase__: List[str] = initializer_range
lowercase__: Optional[int] = layer_norm_eps
lowercase__: int = image_size
lowercase__: Tuple = patch_size
lowercase__: int = num_channels
lowercase__: Optional[Any] = use_mask_token
lowercase__: List[Any] = use_absolute_position_embeddings
lowercase__: Optional[int] = use_relative_position_bias
lowercase__: Optional[int] = use_shared_relative_position_bias
lowercase__: Optional[Any] = layer_scale_init_value
lowercase__: Union[str, Any] = drop_path_rate
lowercase__: Tuple = use_mean_pooling
# decode head attributes (semantic segmentation)
lowercase__: Tuple = out_indices
lowercase__: Optional[int] = pool_scales
# auxiliary head attributes (semantic segmentation)
lowercase__: List[str] = use_auxiliary_head
lowercase__: Optional[Any] = auxiliary_loss_weight
lowercase__: str = auxiliary_channels
lowercase__: List[str] = auxiliary_num_convs
lowercase__: Tuple = auxiliary_concat_input
lowercase__: Dict = semantic_loss_ignore_index
class UpperCAmelCase (_UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase :Dict = version.parse("1.11" )
@property
def _snake_case ( self ):
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def _snake_case ( self ):
return 1e-4
| 2 | 0 |
"""simple docstring"""
import inspect
import unittest
import numpy as np
from transformers import BeitConfig
from transformers.testing_utils import require_flax, require_vision, slow
from transformers.utils import cached_property, is_flax_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor
if is_flax_available():
import jax
from transformers import FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling, FlaxBeitModel
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class UpperCAmelCase (unittest.TestCase ):
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=100 , _UpperCAmelCase=13 , _UpperCAmelCase=30 , _UpperCAmelCase=2 , _UpperCAmelCase=3 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=32 , _UpperCAmelCase=5 , _UpperCAmelCase=4 , _UpperCAmelCase=37 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=10 , _UpperCAmelCase=0.02 , _UpperCAmelCase=3 , ):
lowercase__: int = parent
lowercase__: str = vocab_size
lowercase__: Tuple = batch_size
lowercase__: Tuple = image_size
lowercase__: List[str] = patch_size
lowercase__: List[Any] = num_channels
lowercase__: Dict = is_training
lowercase__: int = use_labels
lowercase__: Dict = hidden_size
lowercase__: Tuple = num_hidden_layers
lowercase__: int = num_attention_heads
lowercase__: Any = intermediate_size
lowercase__: List[str] = hidden_act
lowercase__: int = hidden_dropout_prob
lowercase__: Union[str, Any] = attention_probs_dropout_prob
lowercase__: int = type_sequence_label_size
lowercase__: Dict = initializer_range
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
lowercase__: Union[str, Any] = (image_size // patch_size) ** 2
lowercase__: str = num_patches + 1
def _snake_case ( self ):
lowercase__: Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase__: Dict = None
if self.use_labels:
lowercase__: Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase__: Union[str, Any] = BeitConfig(
vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_a , initializer_range=self.initializer_range , )
return config, pixel_values, labels
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
lowercase__: Any = FlaxBeitModel(config=_a )
lowercase__: List[Any] = model(_a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
lowercase__: Dict = FlaxBeitForMaskedImageModeling(config=_a )
lowercase__: Union[str, Any] = model(_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) )
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
lowercase__: List[str] = self.type_sequence_label_size
lowercase__: Optional[Any] = FlaxBeitForImageClassification(config=_a )
lowercase__: str = model(_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowercase__: List[Any] = 1
lowercase__: int = FlaxBeitForImageClassification(_a )
lowercase__: Tuple = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase__: Tuple = model(_a )
def _snake_case ( self ):
lowercase__: List[Any] = self.prepare_config_and_inputs()
(
(
lowercase__
), (
lowercase__
), (
lowercase__
),
): Any = config_and_inputs
lowercase__: Optional[Any] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_flax
class UpperCAmelCase (_UpperCAmelCase ,unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase :List[str] = (
(FlaxBeitModel, FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling) if is_flax_available() else ()
)
def _snake_case ( self ):
lowercase__: int = FlaxBeitModelTester(self )
lowercase__: List[Any] = ConfigTester(self , config_class=_a , has_text_modality=_a , hidden_size=37 )
def _snake_case ( self ):
self.config_tester.run_common_tests()
def _snake_case ( self ):
lowercase__, lowercase__: int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__: str = model_class(_a )
lowercase__: Optional[int] = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__: Tuple = [*signature.parameters.keys()]
lowercase__: List[str] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _a )
def _snake_case ( self ):
lowercase__, lowercase__: List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
lowercase__: List[str] = self._prepare_for_class(_a , _a )
lowercase__: Optional[Any] = model_class(_a )
@jax.jit
def model_jitted(_UpperCAmelCase , **_UpperCAmelCase ):
return model(pixel_values=_a , **_a )
with self.subTest('''JIT Enabled''' ):
lowercase__: str = model_jitted(**_a ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
lowercase__: Union[str, Any] = model_jitted(**_a ).to_tuple()
self.assertEqual(len(_a ) , len(_a ) )
for jitted_output, output in zip(_a , _a ):
self.assertEqual(jitted_output.shape , output.shape )
def _snake_case ( self ):
lowercase__: Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def _snake_case ( self ):
lowercase__: List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_a )
def _snake_case ( self ):
lowercase__: List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_a )
@slow
def _snake_case ( self ):
for model_class_name in self.all_model_classes:
lowercase__: int = model_class_name.from_pretrained('''microsoft/beit-base-patch16-224''' )
lowercase__: str = model(np.ones((1, 3, 224, 224) ) )
self.assertIsNotNone(_a )
def SCREAMING_SNAKE_CASE__ ( ) -> List[Any]:
lowercase__: str = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_vision
@require_flax
class UpperCAmelCase (unittest.TestCase ):
"""simple docstring"""
@cached_property
def _snake_case ( self ):
return BeitImageProcessor.from_pretrained('''microsoft/beit-base-patch16-224''' ) if is_vision_available() else None
@slow
def _snake_case ( self ):
lowercase__: Optional[Any] = FlaxBeitForMaskedImageModeling.from_pretrained('''microsoft/beit-base-patch16-224-pt22k''' )
lowercase__: Dict = self.default_image_processor
lowercase__: Tuple = prepare_img()
lowercase__: int = image_processor(images=_a , return_tensors='''np''' ).pixel_values
# prepare bool_masked_pos
lowercase__: str = np.ones((1, 196) , dtype=_a )
# forward pass
lowercase__: Optional[Any] = model(pixel_values=_a , bool_masked_pos=_a )
lowercase__: int = outputs.logits
# verify the logits
lowercase__: int = (1, 196, 8192)
self.assertEqual(logits.shape , _a )
lowercase__: List[str] = np.array(
[[-3.2_437, 0.5_072, -13.9_174], [-3.2_456, 0.4_948, -13.9_401], [-3.2_033, 0.5_121, -13.8_550]] )
self.assertTrue(np.allclose(logits[bool_masked_pos][:3, :3] , _a , atol=1e-2 ) )
@slow
def _snake_case ( self ):
lowercase__: Any = FlaxBeitForImageClassification.from_pretrained('''microsoft/beit-base-patch16-224''' )
lowercase__: str = self.default_image_processor
lowercase__: List[Any] = prepare_img()
lowercase__: List[str] = image_processor(images=_a , return_tensors='''np''' )
# forward pass
lowercase__: Optional[Any] = model(**_a )
lowercase__: Optional[int] = outputs.logits
# verify the logits
lowercase__: Optional[Any] = (1, 1000)
self.assertEqual(logits.shape , _a )
lowercase__: Optional[int] = np.array([-1.2_385, -1.0_987, -1.0_108] )
self.assertTrue(np.allclose(logits[0, :3] , _a , atol=1e-4 ) )
lowercase__: Optional[Any] = 281
self.assertEqual(logits.argmax(-1 ).item() , _a )
@slow
def _snake_case ( self ):
lowercase__: Optional[int] = FlaxBeitForImageClassification.from_pretrained('''microsoft/beit-large-patch16-224-pt22k-ft22k''' )
lowercase__: Dict = self.default_image_processor
lowercase__: Dict = prepare_img()
lowercase__: List[Any] = image_processor(images=_a , return_tensors='''np''' )
# forward pass
lowercase__: Any = model(**_a )
lowercase__: Union[str, Any] = outputs.logits
# verify the logits
lowercase__: Any = (1, 21841)
self.assertEqual(logits.shape , _a )
lowercase__: Union[str, Any] = np.array([1.6_881, -0.2_787, 0.5_901] )
self.assertTrue(np.allclose(logits[0, :3] , _a , atol=1e-4 ) )
lowercase__: int = 2396
self.assertEqual(logits.argmax(-1 ).item() , _a )
| 362 | """simple docstring"""
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase ) -> str:
lowercase__: int = ''''''
for word_or_phrase in separated:
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
raise Exception('''join() accepts only strings to be joined''' )
joined += word_or_phrase + separator
return joined.strip(__UpperCAmelCase )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 2 | 0 |
"""simple docstring"""
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFGPTaLMHeadModel, is_keras_nlp_available, is_tf_available
from transformers.models.gpta.tokenization_gpta import GPTaTokenizer
from transformers.testing_utils import require_keras_nlp, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_keras_nlp_available():
from transformers.models.gpta import TFGPTaTokenizer
__A = ['''gpt2''']
__A = '''gpt2'''
if is_tf_available():
class UpperCAmelCase (tf.Module ):
"""simple docstring"""
def __init__( self , _UpperCAmelCase ):
super().__init__()
lowercase__: Optional[Any] = tokenizer
lowercase__: Union[str, Any] = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
lowercase__: int = TFGPTaLMHeadModel.from_config(_SCREAMING_SNAKE_CASE )
@tf.function(input_signature=(tf.TensorSpec((None,) , tf.string , name='''text''' ),) )
def _snake_case ( self , _UpperCAmelCase ):
lowercase__: Union[str, Any] = self.tokenizer(_SCREAMING_SNAKE_CASE )
lowercase__: Optional[int] = tokenized["input_ids"].to_tensor()
lowercase__: Optional[int] = tf.cast(input_ids_dense > 0 , tf.intaa )
# input_mask = tf.reshape(input_mask, [-1, MAX_SEQ_LEN])
lowercase__: Dict = self.model(input_ids=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE )["logits"]
return outputs
@require_tf
@require_keras_nlp
class UpperCAmelCase (unittest.TestCase ):
"""simple docstring"""
def _snake_case ( self ):
super().setUp()
lowercase__: List[str] = [GPTaTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ) for checkpoint in (TOKENIZER_CHECKPOINTS)]
lowercase__: List[str] = [TFGPTaTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ) for checkpoint in TOKENIZER_CHECKPOINTS]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
lowercase__: Tuple = [
"This is a straightforward English test sentence.",
"This one has some weird characters\rto\nsee\r\nif those\u00E9break things.",
"Now we're going to add some Chinese: 一 二 三 一二三",
"And some much more rare Chinese: 齉 堃 齉堃",
"Je vais aussi écrire en français pour tester les accents",
"Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ",
]
lowercase__: List[str] = list(zip(self.test_sentences , self.test_sentences[::-1] ) )
def _snake_case ( self ):
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ):
for test_inputs in self.test_sentences:
lowercase__: Any = tokenizer([test_inputs] , return_tensors='''tf''' )
lowercase__: str = tf_tokenizer([test_inputs] )
for key in python_outputs.keys():
# convert them to numpy to avoid messing with ragged tensors
lowercase__: Any = python_outputs[key].numpy()
lowercase__: Optional[int] = tf_outputs[key].numpy()
self.assertTrue(tf.reduce_all(python_outputs_values.shape == tf_outputs_values.shape ) )
self.assertTrue(tf.reduce_all(tf.cast(_SCREAMING_SNAKE_CASE , tf.intaa ) == tf_outputs_values ) )
@slow
def _snake_case ( self ):
for tf_tokenizer in self.tf_tokenizers:
lowercase__: List[str] = tf.function(_SCREAMING_SNAKE_CASE )
for test_inputs in self.test_sentences:
lowercase__: str = tf.constant(_SCREAMING_SNAKE_CASE )
lowercase__: Tuple = compiled_tokenizer(_SCREAMING_SNAKE_CASE )
lowercase__: List[Any] = tf_tokenizer(_SCREAMING_SNAKE_CASE )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def _snake_case ( self ):
for tf_tokenizer in self.tf_tokenizers:
lowercase__: Optional[int] = ModelToSave(tokenizer=_SCREAMING_SNAKE_CASE )
lowercase__: Any = tf.convert_to_tensor([self.test_sentences[0]] )
lowercase__: Optional[int] = model.serving(_SCREAMING_SNAKE_CASE ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
lowercase__: int = Path(_SCREAMING_SNAKE_CASE ) / "saved.model"
tf.saved_model.save(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , signatures={'''serving_default''': model.serving} )
lowercase__: int = tf.saved_model.load(_SCREAMING_SNAKE_CASE )
lowercase__: Tuple = loaded_model.signatures["serving_default"](_SCREAMING_SNAKE_CASE )["output_0"]
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertTrue(tf.reduce_all(out == loaded_output ) )
@slow
def _snake_case ( self ):
for tf_tokenizer in self.tf_tokenizers:
lowercase__: str = tf.convert_to_tensor([self.test_sentences[0]] )
lowercase__: Tuple = tf_tokenizer(_SCREAMING_SNAKE_CASE ) # Build model with some sample inputs
lowercase__: Optional[int] = tf_tokenizer.get_config()
lowercase__: List[Any] = TFGPTaTokenizer.from_config(_SCREAMING_SNAKE_CASE )
lowercase__: Dict = model_from_config(_SCREAMING_SNAKE_CASE )
for key in from_config_output.keys():
self.assertTrue(tf.reduce_all(from_config_output[key] == out[key] ) )
@slow
def _snake_case ( self ):
for tf_tokenizer in self.tf_tokenizers:
# for the test to run
lowercase__: int = 123123
for max_length in [3, 5, 1024]:
lowercase__: List[Any] = tf.convert_to_tensor([self.test_sentences[0]] )
lowercase__: List[str] = tf_tokenizer(_SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE )
lowercase__: List[str] = out["input_ids"].numpy().shape[1]
assert out_length == max_length
| 363 | """simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPanoramaPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
@skip_mps
class UpperCAmelCase (_UpperCAmelCase ,_UpperCAmelCase ,unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase :Optional[int] = StableDiffusionPanoramaPipeline
_UpperCAmelCase :List[str] = TEXT_TO_IMAGE_PARAMS
_UpperCAmelCase :str = TEXT_TO_IMAGE_BATCH_PARAMS
_UpperCAmelCase :Dict = TEXT_TO_IMAGE_IMAGE_PARAMS
_UpperCAmelCase :List[Any] = TEXT_TO_IMAGE_IMAGE_PARAMS
def _snake_case ( self ):
torch.manual_seed(0 )
lowercase__: Optional[int] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
lowercase__: List[Any] = DDIMScheduler()
torch.manual_seed(0 )
lowercase__: Tuple = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
lowercase__: Tuple = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
lowercase__: List[str] = CLIPTextModel(_UpperCAmelCase )
lowercase__: int = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
lowercase__: int = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase=0 ):
lowercase__: int = torch.manual_seed(_UpperCAmelCase )
lowercase__: List[Any] = {
'''prompt''': '''a photo of the dolomites''',
'''generator''': generator,
# Setting height and width to None to prevent OOMs on CPU.
'''height''': None,
'''width''': None,
'''num_inference_steps''': 1,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def _snake_case ( self ):
lowercase__: Optional[int] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowercase__: List[str] = self.get_dummy_components()
lowercase__: Union[str, Any] = StableDiffusionPanoramaPipeline(**_UpperCAmelCase )
lowercase__: int = sd_pipe.to(_UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=_UpperCAmelCase )
lowercase__: str = self.get_dummy_inputs(_UpperCAmelCase )
lowercase__: Any = sd_pipe(**_UpperCAmelCase ).images
lowercase__: Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase__: List[str] = np.array([0.6_186, 0.5_374, 0.4_915, 0.4_135, 0.4_114, 0.4_563, 0.5_128, 0.4_977, 0.4_757] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _snake_case ( self ):
super().test_inference_batch_consistent(batch_sizes=[1, 2] )
def _snake_case ( self ):
super().test_inference_batch_single_identical(batch_size=2 , expected_max_diff=3.2_5e-3 )
def _snake_case ( self ):
lowercase__: Optional[Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowercase__: Union[str, Any] = self.get_dummy_components()
lowercase__: str = StableDiffusionPanoramaPipeline(**_UpperCAmelCase )
lowercase__: str = sd_pipe.to(_UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=_UpperCAmelCase )
lowercase__: str = self.get_dummy_inputs(_UpperCAmelCase )
lowercase__: Union[str, Any] = '''french fries'''
lowercase__: Union[str, Any] = sd_pipe(**_UpperCAmelCase , negative_prompt=_UpperCAmelCase )
lowercase__: Optional[Any] = output.images
lowercase__: str = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase__: Optional[int] = np.array([0.6_187, 0.5_375, 0.4_915, 0.4_136, 0.4_114, 0.4_563, 0.5_128, 0.4_976, 0.4_757] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _snake_case ( self ):
lowercase__: Optional[Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowercase__: Union[str, Any] = self.get_dummy_components()
lowercase__: Optional[Any] = StableDiffusionPanoramaPipeline(**_UpperCAmelCase )
lowercase__: str = sd_pipe.to(_UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=_UpperCAmelCase )
lowercase__: Optional[int] = self.get_dummy_inputs(_UpperCAmelCase )
lowercase__: Union[str, Any] = sd_pipe(**_UpperCAmelCase , view_batch_size=2 )
lowercase__: List[str] = output.images
lowercase__: List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase__: List[Any] = np.array([0.6_187, 0.5_375, 0.4_915, 0.4_136, 0.4_114, 0.4_563, 0.5_128, 0.4_976, 0.4_757] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _snake_case ( self ):
lowercase__: Optional[int] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowercase__: int = self.get_dummy_components()
lowercase__: List[str] = EulerAncestralDiscreteScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='''scaled_linear''' )
lowercase__: Any = StableDiffusionPanoramaPipeline(**_UpperCAmelCase )
lowercase__: Any = sd_pipe.to(_UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=_UpperCAmelCase )
lowercase__: int = self.get_dummy_inputs(_UpperCAmelCase )
lowercase__: Dict = sd_pipe(**_UpperCAmelCase ).images
lowercase__: Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase__: List[Any] = np.array([0.4_024, 0.6_510, 0.4_901, 0.5_378, 0.5_813, 0.5_622, 0.4_795, 0.4_467, 0.4_952] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _snake_case ( self ):
lowercase__: int = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowercase__: List[Any] = self.get_dummy_components()
lowercase__: Any = PNDMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , skip_prk_steps=_UpperCAmelCase )
lowercase__: Dict = StableDiffusionPanoramaPipeline(**_UpperCAmelCase )
lowercase__: int = sd_pipe.to(_UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=_UpperCAmelCase )
lowercase__: Optional[int] = self.get_dummy_inputs(_UpperCAmelCase )
lowercase__: Dict = sd_pipe(**_UpperCAmelCase ).images
lowercase__: str = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase__: List[Any] = np.array([0.6_391, 0.6_291, 0.4_861, 0.5_134, 0.5_552, 0.4_578, 0.5_032, 0.5_023, 0.4_539] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class UpperCAmelCase (unittest.TestCase ):
"""simple docstring"""
def _snake_case ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _snake_case ( self , _UpperCAmelCase=0 ):
lowercase__: Union[str, Any] = torch.manual_seed(_UpperCAmelCase )
lowercase__: int = {
'''prompt''': '''a photo of the dolomites''',
'''generator''': generator,
'''num_inference_steps''': 3,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def _snake_case ( self ):
lowercase__: Any = '''stabilityai/stable-diffusion-2-base'''
lowercase__: str = DDIMScheduler.from_pretrained(_UpperCAmelCase , subfolder='''scheduler''' )
lowercase__: Dict = StableDiffusionPanoramaPipeline.from_pretrained(_UpperCAmelCase , scheduler=_UpperCAmelCase , safety_checker=_UpperCAmelCase )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
pipe.enable_attention_slicing()
lowercase__: Tuple = self.get_inputs()
lowercase__: Optional[Any] = pipe(**_UpperCAmelCase ).images
lowercase__: Optional[Any] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 2048, 3)
lowercase__: List[Any] = np.array(
[
0.36_968_392,
0.27_025_372,
0.32_446_766,
0.28_379_387,
0.36_363_274,
0.30_733_347,
0.27_100_027,
0.27_054_125,
0.25_536_096,
] )
assert np.abs(expected_slice - image_slice ).max() < 1e-2
def _snake_case ( self ):
lowercase__: int = StableDiffusionPanoramaPipeline.from_pretrained(
'''stabilityai/stable-diffusion-2-base''' , safety_checker=_UpperCAmelCase )
lowercase__: Tuple = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
pipe.enable_attention_slicing()
lowercase__: List[str] = self.get_inputs()
lowercase__: Dict = pipe(**_UpperCAmelCase ).images
lowercase__: Tuple = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 2048, 3)
lowercase__: List[Any] = np.array(
[
[
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
]
] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def _snake_case ( self ):
lowercase__: int = 0
def callback_fn(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> None:
lowercase__: List[str] = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
lowercase__: Dict = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 256)
lowercase__: Any = latents[0, -3:, -3:, -1]
lowercase__: List[Any] = np.array(
[
0.18_681_869,
0.33_907_816,
0.5_361_276,
0.14_432_865,
-0.02_856_611,
-0.73_941_123,
0.23_397_987,
0.47_322_682,
-0.37_823_164,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
elif step == 2:
lowercase__: Tuple = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 256)
lowercase__: Optional[Any] = latents[0, -3:, -3:, -1]
lowercase__: Any = np.array(
[
0.18_539_645,
0.33_987_248,
0.5_378_559,
0.14_437_142,
-0.02_455_261,
-0.7_338_317,
0.23_990_755,
0.47_356_272,
-0.3_786_505,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
lowercase__: int = False
lowercase__: str = '''stabilityai/stable-diffusion-2-base'''
lowercase__: Union[str, Any] = DDIMScheduler.from_pretrained(_UpperCAmelCase , subfolder='''scheduler''' )
lowercase__: Tuple = StableDiffusionPanoramaPipeline.from_pretrained(_UpperCAmelCase , scheduler=_UpperCAmelCase , safety_checker=_UpperCAmelCase )
lowercase__: Optional[Any] = pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
pipe.enable_attention_slicing()
lowercase__: Tuple = self.get_inputs()
pipe(**_UpperCAmelCase , callback=_UpperCAmelCase , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def _snake_case ( self ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowercase__: List[Any] = '''stabilityai/stable-diffusion-2-base'''
lowercase__: Any = DDIMScheduler.from_pretrained(_UpperCAmelCase , subfolder='''scheduler''' )
lowercase__: int = StableDiffusionPanoramaPipeline.from_pretrained(_UpperCAmelCase , scheduler=_UpperCAmelCase , safety_checker=_UpperCAmelCase )
lowercase__: List[Any] = pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
lowercase__: Any = self.get_inputs()
lowercase__: List[str] = pipe(**_UpperCAmelCase )
lowercase__: Optional[int] = torch.cuda.max_memory_allocated()
# make sure that less than 5.2 GB is allocated
assert mem_bytes < 5.5 * 10**9
| 2 | 0 |
"""simple docstring"""
import torch
from transformers import AutoModel
class UpperCAmelCase (torch.nn.Module ):
"""simple docstring"""
def __init__( self , _UpperCAmelCase="sayef/fsner-bert-base-uncased" ):
super(UpperCAmelCase_ , self ).__init__()
lowercase__: int = AutoModel.from_pretrained(UpperCAmelCase_ , return_dict=UpperCAmelCase_ )
lowercase__: Any = torch.nn.CosineSimilarity(3 , 1e-0_8 )
lowercase__: Tuple = torch.nn.Softmax(dim=1 )
def _snake_case ( self , **_UpperCAmelCase ):
return self.bert(**UpperCAmelCase_ ).last_hidden_state
def _snake_case ( self , _UpperCAmelCase ):
return token_embeddings.sum(2 , keepdim=UpperCAmelCase_ )
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=1 ):
return self.softmax(T * self.cos(UpperCAmelCase_ , UpperCAmelCase_ ) )
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase ):
lowercase__: Dict = W_supports["sizes"].tolist()
lowercase__: Tuple = W_supports["start_token_id"].item()
lowercase__: str = W_supports["end_token_id"].item()
del W_supports["sizes"]
del W_supports["start_token_id"]
del W_supports["end_token_id"]
lowercase__: List[Any] = self.BERT(**UpperCAmelCase_ )
lowercase__: List[Any] = self.BERT(**UpperCAmelCase_ )
lowercase__: List[str] = None
lowercase__: Dict = None
lowercase__: List[Any] = W_supports["input_ids"] == start_token_id
lowercase__: List[Any] = W_supports["input_ids"] == end_token_id
for i, size in enumerate(UpperCAmelCase_ ):
if i == 0:
lowercase__: Dict = 0
else:
lowercase__: Tuple = support_sizes[i - 1]
lowercase__: Optional[Any] = S[s : s + size][start_token_masks[s : s + size]]
lowercase__: Optional[Any] = S[s : s + size][end_token_masks[s : s + size]]
lowercase__: Dict = torch.matmul(q[i] , s_start.T ).sum(1 ).softmax(0 )
lowercase__: Optional[Any] = torch.matmul(q[i] , s_end.T ).sum(1 ).softmax(0 )
if p_starts is not None:
lowercase__: str = torch.vstack((p_starts, p_start) )
lowercase__: List[str] = torch.vstack((p_ends, p_end) )
else:
lowercase__: List[Any] = p_start
lowercase__: List[Any] = p_end
return p_starts, p_ends
| 364 | """simple docstring"""
import unittest
from transformers import DebertaVaTokenizer, DebertaVaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
__A = get_tests_dir("fixtures/spiece.model")
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase (_UpperCAmelCase ,unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase :Dict = DebertaVaTokenizer
_UpperCAmelCase :Tuple = DebertaVaTokenizerFast
_UpperCAmelCase :int = True
_UpperCAmelCase :int = True
def _snake_case ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
lowercase__: List[Any] = DebertaVaTokenizer(_UpperCAmelCase , unk_token='''<unk>''' )
tokenizer.save_pretrained(self.tmpdirname )
def _snake_case ( self , _UpperCAmelCase ):
lowercase__: List[str] = '''this is a test'''
lowercase__: int = '''this is a test'''
return input_text, output_text
def _snake_case ( self ):
lowercase__: Optional[int] = '''<pad>'''
lowercase__: Optional[int] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_UpperCAmelCase ) , _UpperCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_UpperCAmelCase ) , _UpperCAmelCase )
def _snake_case ( self ):
lowercase__: Union[str, Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<pad>''' )
self.assertEqual(vocab_keys[1] , '''<unk>''' )
self.assertEqual(vocab_keys[-1] , '''[PAD]''' )
self.assertEqual(len(_UpperCAmelCase ) , 30001 )
def _snake_case ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 30000 )
def _snake_case ( self ):
# fmt: off
lowercase__: int = ''' \tHeLLo!how \n Are yoU? '''
lowercase__: List[str] = ['''▁hello''', '''!''', '''how''', '''▁are''', '''▁you''', '''?''']
# fmt: on
lowercase__: Any = DebertaVaTokenizer(_UpperCAmelCase , do_lower_case=_UpperCAmelCase )
lowercase__: Union[str, Any] = tokenizer.convert_ids_to_tokens(tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
lowercase__: Optional[Any] = DebertaVaTokenizerFast(_UpperCAmelCase , do_lower_case=_UpperCAmelCase )
lowercase__: Optional[Any] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
@unittest.skip('''There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.''' )
def _snake_case ( self ):
pass
@unittest.skip('''There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.''' )
def _snake_case ( self ):
pass
def _snake_case ( self ):
# fmt: off
lowercase__: Dict = '''I was born in 92000, and this is falsé.'''
lowercase__: str = ['''▁''', '''<unk>''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', '''▁''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''▁''', '''.''', ]
# fmt: on
lowercase__: Union[str, Any] = DebertaVaTokenizer(_UpperCAmelCase , split_by_punct=_UpperCAmelCase )
lowercase__: str = tokenizer.convert_ids_to_tokens(tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
lowercase__: Tuple = DebertaVaTokenizerFast(_UpperCAmelCase , split_by_punct=_UpperCAmelCase )
lowercase__: Union[str, Any] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
def _snake_case ( self ):
# fmt: off
lowercase__: Any = '''I was born in 92000, and this is falsé.'''
lowercase__: str = ['''▁i''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', '''▁''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''▁''', '''.''', ]
# fmt: on
lowercase__: Optional[int] = DebertaVaTokenizer(_UpperCAmelCase , do_lower_case=_UpperCAmelCase , split_by_punct=_UpperCAmelCase )
lowercase__: List[Any] = tokenizer.convert_ids_to_tokens(tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
lowercase__: Optional[int] = DebertaVaTokenizerFast(_UpperCAmelCase , do_lower_case=_UpperCAmelCase , split_by_punct=_UpperCAmelCase )
lowercase__: Tuple = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
def _snake_case ( self ):
# fmt: off
lowercase__: List[str] = '''I was born in 92000, and this is falsé.'''
lowercase__: List[str] = ['''▁i''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''.''', ]
# fmt: on
lowercase__: Union[str, Any] = DebertaVaTokenizer(_UpperCAmelCase , do_lower_case=_UpperCAmelCase , split_by_punct=_UpperCAmelCase )
lowercase__: Union[str, Any] = tokenizer.convert_ids_to_tokens(tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
lowercase__: int = DebertaVaTokenizerFast(_UpperCAmelCase , do_lower_case=_UpperCAmelCase , split_by_punct=_UpperCAmelCase )
lowercase__: Tuple = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
def _snake_case ( self ):
# fmt: off
lowercase__: Union[str, Any] = '''I was born in 92000, and this is falsé.'''
lowercase__: int = ['''▁''', '''<unk>''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', '''▁''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''▁''', '''.''', ]
# fmt: on
lowercase__: Optional[int] = DebertaVaTokenizer(_UpperCAmelCase , do_lower_case=_UpperCAmelCase , split_by_punct=_UpperCAmelCase )
lowercase__: Dict = tokenizer.convert_ids_to_tokens(tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
lowercase__: Union[str, Any] = DebertaVaTokenizerFast(_UpperCAmelCase , do_lower_case=_UpperCAmelCase , split_by_punct=_UpperCAmelCase )
lowercase__: Dict = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
def _snake_case ( self ):
# fmt: off
lowercase__: Optional[int] = ''' \tHeLLo!how \n Are yoU? '''
lowercase__: str = ['''▁''', '''<unk>''', '''e''', '''<unk>''', '''o''', '''!''', '''how''', '''▁''', '''<unk>''', '''re''', '''▁yo''', '''<unk>''', '''?''']
# fmt: on
lowercase__: Dict = DebertaVaTokenizer(_UpperCAmelCase , do_lower_case=_UpperCAmelCase , split_by_punct=_UpperCAmelCase )
lowercase__: List[str] = tokenizer.convert_ids_to_tokens(tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
lowercase__: Optional[Any] = DebertaVaTokenizerFast(_UpperCAmelCase , do_lower_case=_UpperCAmelCase , split_by_punct=_UpperCAmelCase )
lowercase__: List[str] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
def _snake_case ( self ):
lowercase__: int = self.get_tokenizer()
lowercase__: List[Any] = self.get_rust_tokenizer()
lowercase__: List[str] = '''I was born in 92000, and this is falsé.'''
lowercase__: Any = tokenizer.convert_ids_to_tokens(tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) )
lowercase__: List[str] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
lowercase__: Dict = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
lowercase__: Tuple = rust_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
lowercase__: Any = self.get_rust_tokenizer()
lowercase__: str = tokenizer.encode(_UpperCAmelCase )
lowercase__: Any = rust_tokenizer.encode(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
def _snake_case ( self ):
lowercase__: Optional[Any] = '''This is a test'''
lowercase__: str = [13, 1, 4398, 25, 21, 1289]
lowercase__: List[Any] = ['''▁''', '''T''', '''his''', '''▁is''', '''▁a''', '''▁test''']
lowercase__: Any = ['''▁''', '''<unk>''', '''his''', '''▁is''', '''▁a''', '''▁test''']
lowercase__: int = DebertaVaTokenizer(_UpperCAmelCase , keep_accents=_UpperCAmelCase )
lowercase__: int = DebertaVaTokenizerFast(_UpperCAmelCase , keep_accents=_UpperCAmelCase )
lowercase__: Any = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
lowercase__: str = tokenizer.tokenize(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
lowercase__: Any = tokenizer.convert_ids_to_tokens(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
lowercase__: Union[str, Any] = rust_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
lowercase__: List[Any] = rust_tokenizer.tokenize(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
lowercase__: str = rust_tokenizer.convert_ids_to_tokens(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
# fmt: off
lowercase__: str = '''I was born in 92000, and this is falsé.'''
lowercase__: Dict = [13, 1, 23, 386, 19, 561, 3050, 15, 17, 48, 25, 8256, 18, 1, 9]
lowercase__: Tuple = ['''▁''', '''I''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''é''', '''.''', ]
lowercase__: Dict = ['''▁''', '''<unk>''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''.''', ]
# fmt: on
lowercase__: Optional[Any] = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
lowercase__: Dict = tokenizer.tokenize(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
lowercase__: Optional[Any] = tokenizer.convert_ids_to_tokens(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
lowercase__: List[Any] = rust_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
lowercase__: Dict = rust_tokenizer.tokenize(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
lowercase__: Optional[Any] = rust_tokenizer.convert_ids_to_tokens(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
def _snake_case ( self ):
lowercase__: Optional[int] = DebertaVaTokenizer(_UpperCAmelCase )
lowercase__: Optional[int] = tokenizer.encode('''sequence builders''' )
lowercase__: Optional[Any] = tokenizer.encode('''multi-sequence build''' )
lowercase__: Union[str, Any] = tokenizer.build_inputs_with_special_tokens(_UpperCAmelCase )
lowercase__: Dict = tokenizer.build_inputs_with_special_tokens(_UpperCAmelCase , _UpperCAmelCase )
self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] , _UpperCAmelCase )
self.assertEqual(
[tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [tokenizer.sep_token_id] , _UpperCAmelCase , )
@slow
def _snake_case ( self ):
# fmt: off
lowercase__: List[Any] = {'''input_ids''': [[1, 39867, 36, 19390, 486, 27, 35052, 81436, 18, 60685, 1225, 7, 35052, 81436, 18, 9367, 16899, 18, 15937, 53, 594, 773, 18, 16287, 30465, 36, 15937, 6, 41139, 38, 36979, 60763, 191, 6, 34132, 99, 6, 50538, 390, 43230, 6, 34132, 2779, 20850, 14, 699, 1072, 1194, 36, 382, 10901, 53, 7, 699, 1072, 2084, 36, 20422, 630, 53, 19, 105, 3049, 1896, 1053, 16899, 1506, 11, 37978, 4243, 7, 1237, 31869, 200, 16566, 654, 6, 35052, 81436, 7, 55630, 13593, 4, 2], [1, 26, 15011, 13, 667, 8, 1053, 18, 23611, 1237, 72356, 12820, 34, 104134, 1209, 35, 13313, 6627, 21, 202, 347, 7, 164, 2399, 11, 46, 4485, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 1232, 2864, 15785, 14951, 105, 5, 8581, 1250, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''token_type_ids''': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_UpperCAmelCase , model_name='''microsoft/deberta-v2-xlarge''' , revision='''ad6e42c1532ddf3a15c39246b63f5559d558b670''' , )
| 2 | 0 |
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> int:
if n == 1 or not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
return 0
elif n == 2:
return 1
else:
lowercase__: List[str] = [0, 1]
for i in range(2 , n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> int:
lowercase__: str = 0
lowercase__: Any = 2
while digits < n:
index += 1
lowercase__: Optional[int] = len(str(fibonacci(SCREAMING_SNAKE_CASE_ ) ) )
return index
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase = 1_0_0_0 ) -> int:
return fibonacci_digits_index(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 365 | """simple docstring"""
import unittest
from transformers import DonutProcessor
__A = "naver-clova-ix/donut-base"
class UpperCAmelCase (unittest.TestCase ):
"""simple docstring"""
def _snake_case ( self ):
lowercase__: int = DonutProcessor.from_pretrained(_UpperCAmelCase )
def _snake_case ( self ):
lowercase__: Tuple = {
'''name''': '''John Doe''',
'''age''': '''99''',
'''city''': '''Atlanta''',
'''state''': '''GA''',
'''zip''': '''30301''',
'''phone''': '''123-4567''',
'''nicknames''': [{'''nickname''': '''Johnny'''}, {'''nickname''': '''JD'''}],
}
lowercase__: Union[str, Any] = (
'''<s_name>John Doe</s_name><s_age>99</s_age><s_city>Atlanta</s_city>'''
'''<s_state>GA</s_state><s_zip>30301</s_zip><s_phone>123-4567</s_phone>'''
'''<s_nicknames><s_nickname>Johnny</s_nickname>'''
'''<sep/><s_nickname>JD</s_nickname></s_nicknames>'''
)
lowercase__: str = self.processor.tokenajson(_UpperCAmelCase )
self.assertDictEqual(_UpperCAmelCase , _UpperCAmelCase )
| 2 | 0 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class UpperCAmelCase (unittest.TestCase ):
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=7 , _UpperCAmelCase=3 , _UpperCAmelCase=18 , _UpperCAmelCase=30 , _UpperCAmelCase=400 , _UpperCAmelCase=True , _UpperCAmelCase=None , _UpperCAmelCase=True , _UpperCAmelCase=None , _UpperCAmelCase=True , _UpperCAmelCase=[0.5, 0.5, 0.5] , _UpperCAmelCase=[0.5, 0.5, 0.5] , ):
lowercase__: Union[str, Any] = size if size is not None else {'''shortest_edge''': 18}
lowercase__: Dict = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
lowercase__: Optional[int] = parent
lowercase__: List[str] = batch_size
lowercase__: Optional[int] = num_channels
lowercase__: Dict = image_size
lowercase__: int = min_resolution
lowercase__: Tuple = max_resolution
lowercase__: Optional[int] = do_resize
lowercase__: Union[str, Any] = size
lowercase__: int = do_center_crop
lowercase__: Tuple = crop_size
lowercase__: List[str] = do_normalize
lowercase__: Optional[Any] = image_mean
lowercase__: Dict = image_std
def _snake_case ( self ):
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"do_center_crop": self.do_center_crop,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class UpperCAmelCase (UpperCamelCase__ ,unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase :str = LevitImageProcessor if is_vision_available() else None
def _snake_case ( self ):
lowercase__: List[str] = LevitImageProcessingTester(self )
@property
def _snake_case ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def _snake_case ( self ):
lowercase__: int = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowerCamelCase , '''image_mean''' ) )
self.assertTrue(hasattr(__lowerCamelCase , '''image_std''' ) )
self.assertTrue(hasattr(__lowerCamelCase , '''do_normalize''' ) )
self.assertTrue(hasattr(__lowerCamelCase , '''do_resize''' ) )
self.assertTrue(hasattr(__lowerCamelCase , '''do_center_crop''' ) )
self.assertTrue(hasattr(__lowerCamelCase , '''size''' ) )
def _snake_case ( self ):
lowercase__: Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 18} )
self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} )
lowercase__: List[str] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42} )
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} )
def _snake_case ( self ):
pass
def _snake_case ( self ):
# Initialize image_processing
lowercase__: int = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase__: Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , Image.Image )
# Test not batched input
lowercase__: Dict = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
lowercase__: Dict = image_processing(__lowerCamelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def _snake_case ( self ):
# Initialize image_processing
lowercase__: Dict = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowercase__: List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase , numpify=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , np.ndarray )
# Test not batched input
lowercase__: Any = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
lowercase__: Any = image_processing(__lowerCamelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def _snake_case ( self ):
# Initialize image_processing
lowercase__: Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowercase__: int = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase , torchify=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , torch.Tensor )
# Test not batched input
lowercase__: List[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
lowercase__: int = image_processing(__lowerCamelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
| 366 | """simple docstring"""
import warnings
from ...utils import logging
from .image_processing_videomae import VideoMAEImageProcessor
__A = logging.get_logger(__name__)
class UpperCAmelCase (_UpperCAmelCase ):
"""simple docstring"""
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ):
warnings.warn(
'''The class VideoMAEFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use VideoMAEImageProcessor instead.''' , _UpperCAmelCase , )
super().__init__(*_UpperCAmelCase , **_UpperCAmelCase )
| 2 | 0 |
from ..utils import DummyObject, requires_backends
class UpperCAmelCase (metaclass=A_ ):
"""simple docstring"""
_UpperCAmelCase :Optional[Any] = ["torch", "torchsde"]
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ):
requires_backends(self , ['''torch''', '''torchsde'''] )
@classmethod
def _snake_case ( cls , *_UpperCAmelCase , **_UpperCAmelCase ):
requires_backends(cls , ['''torch''', '''torchsde'''] )
@classmethod
def _snake_case ( cls , *_UpperCAmelCase , **_UpperCAmelCase ):
requires_backends(cls , ['''torch''', '''torchsde'''] )
| 367 | """simple docstring"""
import math
from typing import Any, Callable, List, Optional, Tuple, Union
import numpy as np
import torch
from ...models import TaFilmDecoder
from ...schedulers import DDPMScheduler
from ...utils import is_onnx_available, logging, randn_tensor
if is_onnx_available():
from ..onnx_utils import OnnxRuntimeModel
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
from .continous_encoder import SpectrogramContEncoder
from .notes_encoder import SpectrogramNotesEncoder
__A = logging.get_logger(__name__) # pylint: disable=invalid-name
__A = 2_5_6
class UpperCAmelCase (_UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase :int = ["melgan"]
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ):
super().__init__()
# From MELGAN
lowercase__: Union[str, Any] = math.log(1e-5 ) # Matches MelGAN training.
lowercase__: Union[str, Any] = 4.0 # Largest value for most examples
lowercase__: Union[str, Any] = 128
self.register_modules(
notes_encoder=_UpperCAmelCase , continuous_encoder=_UpperCAmelCase , decoder=_UpperCAmelCase , scheduler=_UpperCAmelCase , melgan=_UpperCAmelCase , )
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase=(-1.0, 1.0) , _UpperCAmelCase=False ):
lowercase__, lowercase__: int = output_range
if clip:
lowercase__: Any = torch.clip(_UpperCAmelCase , self.min_value , self.max_value )
# Scale to [0, 1].
lowercase__: Optional[int] = (features - self.min_value) / (self.max_value - self.min_value)
# Scale to [min_out, max_out].
return zero_one * (max_out - min_out) + min_out
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase=(-1.0, 1.0) , _UpperCAmelCase=False ):
lowercase__, lowercase__: str = input_range
lowercase__: Dict = torch.clip(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) if clip else outputs
# Scale to [0, 1].
lowercase__: Tuple = (outputs - min_out) / (max_out - min_out)
# Scale to [self.min_value, self.max_value].
return zero_one * (self.max_value - self.min_value) + self.min_value
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
lowercase__: List[str] = input_tokens > 0
lowercase__, lowercase__: str = self.notes_encoder(
encoder_input_tokens=_UpperCAmelCase , encoder_inputs_mask=_UpperCAmelCase )
lowercase__, lowercase__: Optional[int] = self.continuous_encoder(
encoder_inputs=_UpperCAmelCase , encoder_inputs_mask=_UpperCAmelCase )
return [(tokens_encoded, tokens_mask), (continuous_encoded, continuous_mask)]
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
lowercase__: Tuple = noise_time
if not torch.is_tensor(_UpperCAmelCase ):
lowercase__: Tuple = torch.tensor([timesteps] , dtype=torch.long , device=input_tokens.device )
elif torch.is_tensor(_UpperCAmelCase ) and len(timesteps.shape ) == 0:
lowercase__: str = timesteps[None].to(input_tokens.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
lowercase__: Dict = timesteps * torch.ones(input_tokens.shape[0] , dtype=timesteps.dtype , device=timesteps.device )
lowercase__: Union[str, Any] = self.decoder(
encodings_and_masks=_UpperCAmelCase , decoder_input_tokens=_UpperCAmelCase , decoder_noise_time=_UpperCAmelCase )
return logits
@torch.no_grad()
def __call__( self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = 100 , _UpperCAmelCase = True , _UpperCAmelCase = "numpy" , _UpperCAmelCase = None , _UpperCAmelCase = 1 , ):
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(_UpperCAmelCase , _UpperCAmelCase ) or callback_steps <= 0)
):
raise ValueError(
F"""`callback_steps` has to be a positive integer but is {callback_steps} of type"""
F""" {type(_UpperCAmelCase )}.""" )
lowercase__: List[str] = np.zeros([1, TARGET_FEATURE_LENGTH, self.n_dims] , dtype=np.floataa )
lowercase__: Any = np.zeros([1, 0, self.n_dims] , np.floataa )
lowercase__: Tuple = torch.ones((1, TARGET_FEATURE_LENGTH) , dtype=_UpperCAmelCase , device=self.device )
for i, encoder_input_tokens in enumerate(_UpperCAmelCase ):
if i == 0:
lowercase__: str = torch.from_numpy(pred_mel[:1].copy() ).to(
device=self.device , dtype=self.decoder.dtype )
# The first chunk has no previous context.
lowercase__: Optional[int] = torch.zeros((1, TARGET_FEATURE_LENGTH) , dtype=_UpperCAmelCase , device=self.device )
else:
# The full song pipeline does not feed in a context feature, so the mask
# will be all 0s after the feature converter. Because we know we're
# feeding in a full context chunk from the previous prediction, set it
# to all 1s.
lowercase__: Union[str, Any] = ones
lowercase__: str = self.scale_features(
_UpperCAmelCase , output_range=[-1.0, 1.0] , clip=_UpperCAmelCase )
lowercase__: Dict = self.encode(
input_tokens=torch.IntTensor([encoder_input_tokens] ).to(device=self.device ) , continuous_inputs=_UpperCAmelCase , continuous_mask=_UpperCAmelCase , )
# Sample encoder_continuous_inputs shaped gaussian noise to begin loop
lowercase__: int = randn_tensor(
shape=encoder_continuous_inputs.shape , generator=_UpperCAmelCase , device=self.device , dtype=self.decoder.dtype , )
# set step values
self.scheduler.set_timesteps(_UpperCAmelCase )
# Denoising diffusion loop
for j, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
lowercase__: List[Any] = self.decode(
encodings_and_masks=_UpperCAmelCase , input_tokens=_UpperCAmelCase , noise_time=t / self.scheduler.config.num_train_timesteps , )
# Compute previous output: x_t -> x_t-1
lowercase__: Union[str, Any] = self.scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , generator=_UpperCAmelCase ).prev_sample
lowercase__: int = self.scale_to_features(_UpperCAmelCase , input_range=[-1.0, 1.0] )
lowercase__: Dict = mel[:1]
lowercase__: List[Any] = mel.cpu().float().numpy()
lowercase__: Optional[int] = np.concatenate([full_pred_mel, pred_mel[:1]] , axis=1 )
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(_UpperCAmelCase , _UpperCAmelCase )
logger.info('''Generated segment''' , _UpperCAmelCase )
if output_type == "numpy" and not is_onnx_available():
raise ValueError(
'''Cannot return output in \'np\' format if ONNX is not available. Make sure to have ONNX installed or set \'output_type\' to \'mel\'.''' )
elif output_type == "numpy" and self.melgan is None:
raise ValueError(
'''Cannot return output in \'np\' format if melgan component is not defined. Make sure to define `self.melgan` or set \'output_type\' to \'mel\'.''' )
if output_type == "numpy":
lowercase__: Tuple = self.melgan(input_features=full_pred_mel.astype(np.floataa ) )
else:
lowercase__: Dict = full_pred_mel
if not return_dict:
return (output,)
return AudioPipelineOutput(audios=_UpperCAmelCase )
| 2 | 0 |
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all feature extractors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...feature_extraction_utils import FeatureExtractionMixin
from ...utils import CONFIG_NAME, FEATURE_EXTRACTOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
__A = logging.get_logger(__name__)
__A = OrderedDict(
[
("audio-spectrogram-transformer", "ASTFeatureExtractor"),
("beit", "BeitFeatureExtractor"),
("chinese_clip", "ChineseCLIPFeatureExtractor"),
("clap", "ClapFeatureExtractor"),
("clip", "CLIPFeatureExtractor"),
("clipseg", "ViTFeatureExtractor"),
("conditional_detr", "ConditionalDetrFeatureExtractor"),
("convnext", "ConvNextFeatureExtractor"),
("cvt", "ConvNextFeatureExtractor"),
("data2vec-audio", "Wav2Vec2FeatureExtractor"),
("data2vec-vision", "BeitFeatureExtractor"),
("deformable_detr", "DeformableDetrFeatureExtractor"),
("deit", "DeiTFeatureExtractor"),
("detr", "DetrFeatureExtractor"),
("dinat", "ViTFeatureExtractor"),
("donut-swin", "DonutFeatureExtractor"),
("dpt", "DPTFeatureExtractor"),
("encodec", "EncodecFeatureExtractor"),
("flava", "FlavaFeatureExtractor"),
("glpn", "GLPNFeatureExtractor"),
("groupvit", "CLIPFeatureExtractor"),
("hubert", "Wav2Vec2FeatureExtractor"),
("imagegpt", "ImageGPTFeatureExtractor"),
("layoutlmv2", "LayoutLMv2FeatureExtractor"),
("layoutlmv3", "LayoutLMv3FeatureExtractor"),
("levit", "LevitFeatureExtractor"),
("maskformer", "MaskFormerFeatureExtractor"),
("mctct", "MCTCTFeatureExtractor"),
("mobilenet_v1", "MobileNetV1FeatureExtractor"),
("mobilenet_v2", "MobileNetV2FeatureExtractor"),
("mobilevit", "MobileViTFeatureExtractor"),
("nat", "ViTFeatureExtractor"),
("owlvit", "OwlViTFeatureExtractor"),
("perceiver", "PerceiverFeatureExtractor"),
("poolformer", "PoolFormerFeatureExtractor"),
("regnet", "ConvNextFeatureExtractor"),
("resnet", "ConvNextFeatureExtractor"),
("segformer", "SegformerFeatureExtractor"),
("sew", "Wav2Vec2FeatureExtractor"),
("sew-d", "Wav2Vec2FeatureExtractor"),
("speech_to_text", "Speech2TextFeatureExtractor"),
("speecht5", "SpeechT5FeatureExtractor"),
("swiftformer", "ViTFeatureExtractor"),
("swin", "ViTFeatureExtractor"),
("swinv2", "ViTFeatureExtractor"),
("table-transformer", "DetrFeatureExtractor"),
("timesformer", "VideoMAEFeatureExtractor"),
("tvlt", "TvltFeatureExtractor"),
("unispeech", "Wav2Vec2FeatureExtractor"),
("unispeech-sat", "Wav2Vec2FeatureExtractor"),
("van", "ConvNextFeatureExtractor"),
("videomae", "VideoMAEFeatureExtractor"),
("vilt", "ViltFeatureExtractor"),
("vit", "ViTFeatureExtractor"),
("vit_mae", "ViTFeatureExtractor"),
("vit_msn", "ViTFeatureExtractor"),
("wav2vec2", "Wav2Vec2FeatureExtractor"),
("wav2vec2-conformer", "Wav2Vec2FeatureExtractor"),
("wavlm", "Wav2Vec2FeatureExtractor"),
("whisper", "WhisperFeatureExtractor"),
("xclip", "CLIPFeatureExtractor"),
("yolos", "YolosFeatureExtractor"),
]
)
__A = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FEATURE_EXTRACTOR_MAPPING_NAMES)
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> List[str]:
for module_name, extractors in FEATURE_EXTRACTOR_MAPPING_NAMES.items():
if class_name in extractors:
lowercase__: Optional[int] = model_type_to_module_name(_snake_case )
lowercase__: Union[str, Any] = importlib.import_module(F""".{module_name}""" , '''transformers.models''' )
try:
return getattr(_snake_case , _snake_case )
except AttributeError:
continue
for _, extractor in FEATURE_EXTRACTOR_MAPPING._extra_content.items():
if getattr(_snake_case , '''__name__''' , _snake_case ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
lowercase__: Union[str, Any] = importlib.import_module('''transformers''' )
if hasattr(_snake_case , _snake_case ):
return getattr(_snake_case , _snake_case )
return None
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = False , __UpperCAmelCase = False , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = False , **__UpperCAmelCase , ) -> Union[str, Any]:
lowercase__: List[Any] = get_file_from_repo(
_snake_case , _snake_case , cache_dir=_snake_case , force_download=_snake_case , resume_download=_snake_case , proxies=_snake_case , use_auth_token=_snake_case , revision=_snake_case , local_files_only=_snake_case , )
if resolved_config_file is None:
logger.info(
'''Could not locate the feature extractor configuration file, will try to use the model config instead.''' )
return {}
with open(_snake_case , encoding='''utf-8''' ) as reader:
return json.load(_snake_case )
class UpperCAmelCase :
"""simple docstring"""
def __init__( self ):
raise EnvironmentError(
'''AutoFeatureExtractor is designed to be instantiated '''
'''using the `AutoFeatureExtractor.from_pretrained(pretrained_model_name_or_path)` method.''' )
@classmethod
@replace_list_option_in_docstrings(_a )
def _snake_case ( cls , _UpperCAmelCase , **_UpperCAmelCase ):
lowercase__: Optional[Any] = kwargs.pop('''config''' , _a )
lowercase__: List[Any] = kwargs.pop('''trust_remote_code''' , _a )
lowercase__: List[Any] = True
lowercase__: List[Any] = FeatureExtractionMixin.get_feature_extractor_dict(_a , **_a )
lowercase__: Optional[Any] = config_dict.get('''feature_extractor_type''' , _a )
lowercase__: Tuple = None
if "AutoFeatureExtractor" in config_dict.get('''auto_map''' , {} ):
lowercase__: int = config_dict["auto_map"]["AutoFeatureExtractor"]
# If we don't find the feature extractor class in the feature extractor config, let's try the model config.
if feature_extractor_class is None and feature_extractor_auto_map is None:
if not isinstance(_a , _a ):
lowercase__: Dict = AutoConfig.from_pretrained(_a , **_a )
# It could be in `config.feature_extractor_type``
lowercase__: Union[str, Any] = getattr(_a , '''feature_extractor_type''' , _a )
if hasattr(_a , '''auto_map''' ) and "AutoFeatureExtractor" in config.auto_map:
lowercase__: Tuple = config.auto_map["AutoFeatureExtractor"]
if feature_extractor_class is not None:
lowercase__: List[str] = feature_extractor_class_from_name(_a )
lowercase__: Dict = feature_extractor_auto_map is not None
lowercase__: str = feature_extractor_class is not None or type(_a ) in FEATURE_EXTRACTOR_MAPPING
lowercase__: Dict = resolve_trust_remote_code(
_a , _a , _a , _a )
if has_remote_code and trust_remote_code:
lowercase__: Union[str, Any] = get_class_from_dynamic_module(
_a , _a , **_a )
lowercase__: Union[str, Any] = kwargs.pop('''code_revision''' , _a )
if os.path.isdir(_a ):
feature_extractor_class.register_for_auto_class()
return feature_extractor_class.from_dict(_a , **_a )
elif feature_extractor_class is not None:
return feature_extractor_class.from_dict(_a , **_a )
# Last try: we use the FEATURE_EXTRACTOR_MAPPING.
elif type(_a ) in FEATURE_EXTRACTOR_MAPPING:
lowercase__: Dict = FEATURE_EXTRACTOR_MAPPING[type(_a )]
return feature_extractor_class.from_dict(_a , **_a )
raise ValueError(
F"""Unrecognized feature extractor in {pretrained_model_name_or_path}. Should have a """
F"""`feature_extractor_type` key in its {FEATURE_EXTRACTOR_NAME} of {CONFIG_NAME}, or one of the following """
F"""`model_type` keys in its {CONFIG_NAME}: {', '.join(c for c in FEATURE_EXTRACTOR_MAPPING_NAMES.keys() )}""" )
@staticmethod
def _snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
FEATURE_EXTRACTOR_MAPPING.register(_a , _a )
| 368 | """simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional
from packaging import version
if TYPE_CHECKING:
from ... import PreTrainedTokenizer, TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import is_torch_available, logging
__A = logging.get_logger(__name__)
__A = {
"bigscience/bloom": "https://huggingface.co/bigscience/bloom/resolve/main/config.json",
"bigscience/bloom-560m": "https://huggingface.co/bigscience/bloom-560m/blob/main/config.json",
"bigscience/bloom-1b1": "https://huggingface.co/bigscience/bloom-1b1/blob/main/config.json",
"bigscience/bloom-1b7": "https://huggingface.co/bigscience/bloom-1b7/blob/main/config.json",
"bigscience/bloom-3b": "https://huggingface.co/bigscience/bloom-3b/blob/main/config.json",
"bigscience/bloom-7b1": "https://huggingface.co/bigscience/bloom-7b1/blob/main/config.json",
}
class UpperCAmelCase (_UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase :str = "bloom"
_UpperCAmelCase :List[str] = ["past_key_values"]
_UpperCAmelCase :Optional[Any] = {
"num_hidden_layers": "n_layer",
"num_attention_heads": "n_head",
}
def __init__( self , _UpperCAmelCase=250880 , _UpperCAmelCase=64 , _UpperCAmelCase=2 , _UpperCAmelCase=8 , _UpperCAmelCase=1e-5 , _UpperCAmelCase=0.02 , _UpperCAmelCase=True , _UpperCAmelCase=1 , _UpperCAmelCase=2 , _UpperCAmelCase=False , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.0 , _UpperCAmelCase=1 , _UpperCAmelCase=False , **_UpperCAmelCase , ):
lowercase__: Any = vocab_size
# Backward compatibility with n_embed kwarg
lowercase__: Optional[Any] = kwargs.pop('''n_embed''' , _UpperCAmelCase )
lowercase__: int = hidden_size if n_embed is None else n_embed
lowercase__: int = n_layer
lowercase__: int = n_head
lowercase__: Optional[Any] = layer_norm_epsilon
lowercase__: int = initializer_range
lowercase__: List[Any] = use_cache
lowercase__: str = pretraining_tp
lowercase__: Tuple = apply_residual_connection_post_layernorm
lowercase__: int = hidden_dropout
lowercase__: Optional[Any] = attention_dropout
lowercase__: int = bos_token_id
lowercase__: Union[str, Any] = eos_token_id
lowercase__: Any = slow_but_exact
super().__init__(bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase )
class UpperCAmelCase (_UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase :int = version.parse("1.12" )
def __init__( self , _UpperCAmelCase , _UpperCAmelCase = "default" , _UpperCAmelCase = None , _UpperCAmelCase = False , ):
super().__init__(_UpperCAmelCase , task=_UpperCAmelCase , patching_specs=_UpperCAmelCase , use_past=_UpperCAmelCase )
if not getattr(self._config , '''pad_token_id''' , _UpperCAmelCase ):
# TODO: how to do that better?
lowercase__: Any = 0
@property
def _snake_case ( self ):
lowercase__: str = OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}} )
if self.use_past:
# BLOOM stores values on dynamic axis 2. For more details see: https://github.com/huggingface/transformers/pull/18344
self.fill_with_past_key_values_(_UpperCAmelCase , direction='''inputs''' , inverted_values_shape=_UpperCAmelCase )
lowercase__: List[str] = {0: '''batch''', 1: '''past_sequence + sequence'''}
else:
lowercase__: str = {0: '''batch''', 1: '''sequence'''}
return common_inputs
@property
def _snake_case ( self ):
return self._config.n_layer
@property
def _snake_case ( self ):
return self._config.n_head
@property
def _snake_case ( self ):
return 1e-3
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase = -1 , _UpperCAmelCase = -1 , _UpperCAmelCase = False , _UpperCAmelCase = None , ):
lowercase__: str = super(_UpperCAmelCase , self ).generate_dummy_inputs(
_UpperCAmelCase , batch_size=_UpperCAmelCase , seq_length=_UpperCAmelCase , is_pair=_UpperCAmelCase , framework=_UpperCAmelCase )
# We need to order the input in the way they appears in the forward()
lowercase__: List[Any] = OrderedDict({'''input_ids''': common_inputs['''input_ids''']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
lowercase__, lowercase__: Optional[Any] = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
lowercase__: Tuple = seqlen + 2
lowercase__: str = self._config.hidden_size // self.num_attention_heads
lowercase__: Optional[int] = (
batch * self.num_attention_heads,
head_dim,
past_key_values_length,
)
lowercase__: Union[str, Any] = (
batch * self.num_attention_heads,
past_key_values_length,
head_dim,
)
lowercase__: str = [
(torch.zeros(_UpperCAmelCase ), torch.zeros(_UpperCAmelCase )) for _ in range(self.num_layers )
]
lowercase__: Tuple = common_inputs['''attention_mask''']
if self.use_past:
lowercase__: int = ordered_inputs['''attention_mask'''].dtype
lowercase__: List[str] = torch.cat(
[ordered_inputs['''attention_mask'''], torch.ones(_UpperCAmelCase , _UpperCAmelCase , dtype=_UpperCAmelCase )] , dim=1 )
return ordered_inputs
@property
def _snake_case ( self ):
return 13
| 2 | 0 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ....tokenization_utils_fast import PreTrainedTokenizerFast
from ....utils import logging
from .tokenization_retribert import RetriBertTokenizer
__A = logging.get_logger(__name__)
__A = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
__A = {
"vocab_file": {
"yjernite/retribert-base-uncased": (
"https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"yjernite/retribert-base-uncased": (
"https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/tokenizer.json"
),
},
}
__A = {
"yjernite/retribert-base-uncased": 5_1_2,
}
__A = {
"yjernite/retribert-base-uncased": {"do_lower_case": True},
}
class UpperCAmelCase (_a ):
"""simple docstring"""
_UpperCAmelCase :Tuple = VOCAB_FILES_NAMES
_UpperCAmelCase :Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase :int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase :Optional[Any] = PRETRAINED_INIT_CONFIGURATION
_UpperCAmelCase :Optional[Any] = RetriBertTokenizer
_UpperCAmelCase :str = ["input_ids", "attention_mask"]
def __init__( self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=True , _UpperCAmelCase="[UNK]" , _UpperCAmelCase="[SEP]" , _UpperCAmelCase="[PAD]" , _UpperCAmelCase="[CLS]" , _UpperCAmelCase="[MASK]" , _UpperCAmelCase=True , _UpperCAmelCase=None , **_UpperCAmelCase , ):
super().__init__(
_a , tokenizer_file=_a , do_lower_case=_a , unk_token=_a , sep_token=_a , pad_token=_a , cls_token=_a , mask_token=_a , tokenize_chinese_chars=_a , strip_accents=_a , **_a , )
lowercase__: Dict = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , _a ) != do_lower_case
or normalizer_state.get('''strip_accents''' , _a ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , _a ) != tokenize_chinese_chars
):
lowercase__: Optional[Any] = getattr(_a , normalizer_state.pop('''type''' ) )
lowercase__: List[Any] = do_lower_case
lowercase__: List[str] = strip_accents
lowercase__: Any = tokenize_chinese_chars
lowercase__: List[str] = normalizer_class(**_a )
lowercase__: List[str] = do_lower_case
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase=None ):
lowercase__: List[str] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase = None ):
lowercase__: int = [self.sep_token_id]
lowercase__: int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase = None ):
lowercase__: Optional[int] = self._tokenizer.model.save(_a , name=_a )
return tuple(_a )
| 369 | """simple docstring"""
import unittest
from transformers import MobileBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertModel,
)
class UpperCAmelCase :
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=7 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=99 , _UpperCAmelCase=64 , _UpperCAmelCase=32 , _UpperCAmelCase=5 , _UpperCAmelCase=4 , _UpperCAmelCase=37 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=512 , _UpperCAmelCase=16 , _UpperCAmelCase=2 , _UpperCAmelCase=0.02 , _UpperCAmelCase=3 , _UpperCAmelCase=4 , _UpperCAmelCase=None , ):
lowercase__: Dict = parent
lowercase__: Optional[int] = batch_size
lowercase__: List[str] = seq_length
lowercase__: Optional[int] = is_training
lowercase__: Dict = use_input_mask
lowercase__: List[Any] = use_token_type_ids
lowercase__: List[str] = use_labels
lowercase__: Union[str, Any] = vocab_size
lowercase__: str = hidden_size
lowercase__: Any = embedding_size
lowercase__: Any = num_hidden_layers
lowercase__: Any = num_attention_heads
lowercase__: List[Any] = intermediate_size
lowercase__: Dict = hidden_act
lowercase__: List[Any] = hidden_dropout_prob
lowercase__: Dict = attention_probs_dropout_prob
lowercase__: Optional[int] = max_position_embeddings
lowercase__: List[Any] = type_vocab_size
lowercase__: Tuple = type_sequence_label_size
lowercase__: Optional[int] = initializer_range
lowercase__: Dict = num_labels
lowercase__: int = num_choices
lowercase__: int = scope
def _snake_case ( self ):
lowercase__: List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase__: List[Any] = None
if self.use_input_mask:
lowercase__: Any = random_attention_mask([self.batch_size, self.seq_length] )
lowercase__: List[Any] = None
if self.use_token_type_ids:
lowercase__: Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase__: Optional[Any] = None
lowercase__: Any = None
lowercase__: str = None
if self.use_labels:
lowercase__: Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase__: List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase__: Tuple = ids_tensor([self.batch_size] , self.num_choices )
lowercase__: Any = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _snake_case ( self ):
return MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_UpperCAmelCase , initializer_range=self.initializer_range , )
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
lowercase__: int = MobileBertModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
lowercase__: List[str] = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase )
lowercase__: Dict = model(_UpperCAmelCase , token_type_ids=_UpperCAmelCase )
lowercase__: str = model(_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
lowercase__: Any = MobileBertForMaskedLM(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
lowercase__: Union[str, Any] = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
lowercase__: List[Any] = MobileBertForNextSentencePrediction(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
lowercase__: List[str] = model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
lowercase__: Any = MobileBertForPreTraining(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
lowercase__: str = model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase , next_sentence_label=_UpperCAmelCase , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
lowercase__: Any = MobileBertForQuestionAnswering(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
lowercase__: int = model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , start_positions=_UpperCAmelCase , end_positions=_UpperCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
lowercase__: str = self.num_labels
lowercase__: Any = MobileBertForSequenceClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
lowercase__: Tuple = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
lowercase__: str = self.num_labels
lowercase__: Union[str, Any] = MobileBertForTokenClassification(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
lowercase__: Optional[int] = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
lowercase__: Dict = self.num_choices
lowercase__: Union[str, Any] = MobileBertForMultipleChoice(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
lowercase__: List[str] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase__: Optional[Any] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase__: Optional[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase__: Optional[Any] = model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _snake_case ( self ):
lowercase__: Optional[int] = self.prepare_config_and_inputs()
(
(
lowercase__
), (
lowercase__
), (
lowercase__
), (
lowercase__
), (
lowercase__
), (
lowercase__
), (
lowercase__
),
): Union[str, Any] = config_and_inputs
lowercase__: Optional[Any] = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase (_UpperCAmelCase ,_UpperCAmelCase ,unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase :Tuple = (
(
MobileBertModel,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
)
if is_torch_available()
else ()
)
_UpperCAmelCase :Optional[Any] = (
{
"feature-extraction": MobileBertModel,
"fill-mask": MobileBertForMaskedLM,
"question-answering": MobileBertForQuestionAnswering,
"text-classification": MobileBertForSequenceClassification,
"token-classification": MobileBertForTokenClassification,
"zero-shot": MobileBertForSequenceClassification,
}
if is_torch_available()
else {}
)
_UpperCAmelCase :Optional[Any] = True
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=False ):
lowercase__: int = super()._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase , return_labels=_UpperCAmelCase )
if return_labels:
if model_class in get_values(_UpperCAmelCase ):
lowercase__: Any = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=_UpperCAmelCase )
lowercase__: Dict = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_UpperCAmelCase )
return inputs_dict
def _snake_case ( self ):
lowercase__: int = MobileBertModelTester(self )
lowercase__: Dict = ConfigTester(self , config_class=_UpperCAmelCase , hidden_size=37 )
def _snake_case ( self ):
self.config_tester.run_common_tests()
def _snake_case ( self ):
lowercase__: str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*_UpperCAmelCase )
def _snake_case ( self ):
lowercase__: int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*_UpperCAmelCase )
def _snake_case ( self ):
lowercase__: List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*_UpperCAmelCase )
def _snake_case ( self ):
lowercase__: str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*_UpperCAmelCase )
def _snake_case ( self ):
lowercase__: List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*_UpperCAmelCase )
def _snake_case ( self ):
lowercase__: Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*_UpperCAmelCase )
def _snake_case ( self ):
lowercase__: Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*_UpperCAmelCase )
def _snake_case ( self ):
lowercase__: Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> List[Any]:
return torch.tensor(
__UpperCAmelCase , dtype=torch.long , device=__UpperCAmelCase , )
__A = 1E-3
@require_torch
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase (unittest.TestCase ):
"""simple docstring"""
@slow
def _snake_case ( self ):
lowercase__: Tuple = MobileBertModel.from_pretrained('''google/mobilebert-uncased''' ).to(_UpperCAmelCase )
lowercase__: Tuple = _long_tensor([[101, 7110, 1005, 1056, 2023, 11333, 17413, 1029, 102]] )
with torch.no_grad():
lowercase__: Tuple = model(_UpperCAmelCase )[0]
lowercase__: Dict = torch.Size((1, 9, 512) )
self.assertEqual(output.shape , _UpperCAmelCase )
lowercase__: List[Any] = torch.tensor(
[
[
[-2.4_7_3_6_5_2_6e0_7, 8.2_6_9_1_6_5_6e0_4, 1.6_5_2_1_8_3_8e0_5],
[-5.7_5_4_1_7_0_4e-0_1, 3.9_0_5_6_0_2_2e0_0, 4.4_0_1_1_5_0_7e0_0],
[2.6_0_4_7_3_5_9e0_0, 1.5_6_7_7_6_5_2e0_0, -1.7_3_2_4_1_8_8e-0_1],
]
] , device=_UpperCAmelCase , )
# MobileBERT results range from 10e0 to 10e8. Even a 0.0000001% difference with a value of 10e8 results in a
# ~1 difference, it's therefore not a good idea to measure using addition.
# Here, we instead divide the expected result with the result in order to obtain ~1. We then check that the
# result is held between bounds: 1 - TOLERANCE < expected_result / result < 1 + TOLERANCE
lowercase__: int = torch.all((expected_slice / output[..., :3, :3]) >= 1 - TOLERANCE )
lowercase__: Optional[int] = torch.all((expected_slice / output[..., :3, :3]) <= 1 + TOLERANCE )
self.assertTrue(lower_bound and upper_bound )
| 2 | 0 |
"""simple docstring"""
from __future__ import annotations
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> list[int]:
return [ord(_lowerCAmelCase ) - 9_6 for elem in plain]
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> str:
return "".join(chr(elem + 9_6 ) for elem in encoded )
def SCREAMING_SNAKE_CASE__ ( ) -> None:
lowercase__: List[str] = encode(input('''-> ''' ).strip().lower() )
print('''Encoded: ''' , _lowerCAmelCase )
print('''Decoded:''' , decode(_lowerCAmelCase ) )
if __name__ == "__main__":
main()
| 370 | """simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
"microsoft/unispeech-sat-base-100h-libri-ft": (
"https://huggingface.co/microsoft/unispeech-sat-base-100h-libri-ft/resolve/main/config.json"
),
# See all UniSpeechSat models at https://huggingface.co/models?filter=unispeech_sat
}
class UpperCAmelCase (_UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase :Any = "unispeech-sat"
def __init__( self , _UpperCAmelCase=32 , _UpperCAmelCase=768 , _UpperCAmelCase=12 , _UpperCAmelCase=12 , _UpperCAmelCase=3072 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.02 , _UpperCAmelCase=1e-5 , _UpperCAmelCase="group" , _UpperCAmelCase="gelu" , _UpperCAmelCase=(512, 512, 512, 512, 512, 512, 512) , _UpperCAmelCase=(5, 2, 2, 2, 2, 2, 2) , _UpperCAmelCase=(10, 3, 3, 3, 3, 2, 2) , _UpperCAmelCase=False , _UpperCAmelCase=128 , _UpperCAmelCase=16 , _UpperCAmelCase=False , _UpperCAmelCase=True , _UpperCAmelCase=0.05 , _UpperCAmelCase=10 , _UpperCAmelCase=2 , _UpperCAmelCase=0.0 , _UpperCAmelCase=10 , _UpperCAmelCase=0 , _UpperCAmelCase=320 , _UpperCAmelCase=2 , _UpperCAmelCase=0.1 , _UpperCAmelCase=100 , _UpperCAmelCase=256 , _UpperCAmelCase=256 , _UpperCAmelCase=0.1 , _UpperCAmelCase="mean" , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=256 , _UpperCAmelCase=(512, 512, 512, 512, 1500) , _UpperCAmelCase=(5, 3, 3, 1, 1) , _UpperCAmelCase=(1, 2, 3, 1, 1) , _UpperCAmelCase=512 , _UpperCAmelCase=0 , _UpperCAmelCase=1 , _UpperCAmelCase=2 , _UpperCAmelCase=504 , **_UpperCAmelCase , ):
super().__init__(**_UpperCAmelCase , pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase )
lowercase__: Union[str, Any] = hidden_size
lowercase__: Union[str, Any] = feat_extract_norm
lowercase__: Any = feat_extract_activation
lowercase__: List[Any] = list(_UpperCAmelCase )
lowercase__: Optional[int] = list(_UpperCAmelCase )
lowercase__: int = list(_UpperCAmelCase )
lowercase__: Any = conv_bias
lowercase__: List[str] = num_conv_pos_embeddings
lowercase__: List[str] = num_conv_pos_embedding_groups
lowercase__: int = len(self.conv_dim )
lowercase__: Dict = num_hidden_layers
lowercase__: List[Any] = intermediate_size
lowercase__: Dict = hidden_act
lowercase__: Optional[Any] = num_attention_heads
lowercase__: Union[str, Any] = hidden_dropout
lowercase__: List[Any] = attention_dropout
lowercase__: str = activation_dropout
lowercase__: Optional[Any] = feat_proj_dropout
lowercase__: Optional[int] = final_dropout
lowercase__: Any = layerdrop
lowercase__: int = layer_norm_eps
lowercase__: Any = initializer_range
lowercase__: Union[str, Any] = vocab_size
lowercase__: Optional[Any] = num_clusters
lowercase__: Dict = do_stable_layer_norm
lowercase__: List[str] = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
F""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"""
F""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowercase__: Dict = apply_spec_augment
lowercase__: Union[str, Any] = mask_time_prob
lowercase__: List[str] = mask_time_length
lowercase__: Union[str, Any] = mask_time_min_masks
lowercase__: str = mask_feature_prob
lowercase__: Dict = mask_feature_length
lowercase__: List[Any] = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
lowercase__: Tuple = num_codevectors_per_group
lowercase__: Optional[Any] = num_codevector_groups
lowercase__: int = contrastive_logits_temperature
lowercase__: Any = feat_quantizer_dropout
lowercase__: int = num_negatives
lowercase__: Optional[Any] = codevector_dim
lowercase__: int = proj_codevector_dim
lowercase__: str = diversity_loss_weight
# ctc loss
lowercase__: int = ctc_loss_reduction
lowercase__: Union[str, Any] = ctc_zero_infinity
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
lowercase__: Optional[Any] = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
lowercase__: Union[str, Any] = list(_UpperCAmelCase )
lowercase__: Tuple = list(_UpperCAmelCase )
lowercase__: Union[str, Any] = list(_UpperCAmelCase )
lowercase__: Tuple = xvector_output_dim
@property
def _snake_case ( self ):
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 2 | 0 |
"""simple docstring"""
import unittest
from transformers import SPIECE_UNDERLINE, ReformerTokenizer, ReformerTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__A = get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase (__lowerCamelCase ,unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase :Dict = ReformerTokenizer
_UpperCAmelCase :Dict = ReformerTokenizerFast
_UpperCAmelCase :Optional[int] = True
_UpperCAmelCase :Tuple = False
_UpperCAmelCase :Optional[int] = True
def _snake_case ( self ):
super().setUp()
lowercase__: List[str] = ReformerTokenizer(__lowercase , keep_accents=__lowercase )
tokenizer.save_pretrained(self.tmpdirname )
def _snake_case ( self ):
lowercase__: Union[str, Any] = '''<s>'''
lowercase__: Tuple = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__lowercase ) , __lowercase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__lowercase ) , __lowercase )
def _snake_case ( self ):
lowercase__: Dict = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<unk>''' )
self.assertEqual(vocab_keys[1] , '''<s>''' )
self.assertEqual(vocab_keys[-1] , '''j''' )
self.assertEqual(len(__lowercase ) , 1000 )
def _snake_case ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 1000 )
def _snake_case ( self ):
if not self.test_rust_tokenizer:
return
lowercase__: List[str] = self.get_tokenizer()
lowercase__: Tuple = self.get_rust_tokenizer()
lowercase__: Union[str, Any] = '''I was born in 92000, and this is falsé.'''
lowercase__: List[Any] = tokenizer.tokenize(__lowercase )
lowercase__: Optional[Any] = rust_tokenizer.tokenize(__lowercase )
self.assertListEqual(__lowercase , __lowercase )
lowercase__: int = tokenizer.encode(__lowercase , add_special_tokens=__lowercase )
lowercase__: List[Any] = rust_tokenizer.encode(__lowercase , add_special_tokens=__lowercase )
self.assertListEqual(__lowercase , __lowercase )
lowercase__: Optional[int] = self.get_rust_tokenizer()
lowercase__: Any = tokenizer.encode(__lowercase )
lowercase__: List[str] = rust_tokenizer.encode(__lowercase )
self.assertListEqual(__lowercase , __lowercase )
def _snake_case ( self , _UpperCAmelCase=15 ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
lowercase__: Optional[int] = self.rust_tokenizer_class.from_pretrained(__lowercase , **__lowercase )
# Simple input
lowercase__: List[str] = '''This is a simple input'''
lowercase__: str = ['''This is a simple input 1''', '''This is a simple input 2''']
lowercase__: Dict = ('''This is a simple input''', '''This is a pair''')
lowercase__: Optional[int] = [
('''This is a simple input 1''', '''This is a simple input 2'''),
('''This is a simple pair 1''', '''This is a simple pair 2'''),
]
# Simple input tests
self.assertRaises(__lowercase , tokenizer_r.encode , __lowercase , max_length=__lowercase , padding='''max_length''' )
# Simple input
self.assertRaises(__lowercase , tokenizer_r.encode_plus , __lowercase , max_length=__lowercase , padding='''max_length''' )
# Simple input
self.assertRaises(
__lowercase , tokenizer_r.batch_encode_plus , __lowercase , max_length=__lowercase , padding='''max_length''' , )
# Pair input
self.assertRaises(__lowercase , tokenizer_r.encode , __lowercase , max_length=__lowercase , padding='''max_length''' )
# Pair input
self.assertRaises(__lowercase , tokenizer_r.encode_plus , __lowercase , max_length=__lowercase , padding='''max_length''' )
# Pair input
self.assertRaises(
__lowercase , tokenizer_r.batch_encode_plus , __lowercase , max_length=__lowercase , padding='''max_length''' , )
def _snake_case ( self ):
pass
def _snake_case ( self ):
lowercase__: List[Any] = ReformerTokenizer(__lowercase , keep_accents=__lowercase )
lowercase__: Union[str, Any] = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(__lowercase , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__lowercase ) , [285, 46, 10, 170, 382] , )
lowercase__: List[Any] = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
__lowercase , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
lowercase__: Any = tokenizer.convert_tokens_to_ids(__lowercase )
self.assertListEqual(
__lowercase , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
lowercase__: str = tokenizer.convert_ids_to_tokens(__lowercase )
self.assertListEqual(
__lowercase , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
@cached_property
def _snake_case ( self ):
return ReformerTokenizer.from_pretrained('''google/reformer-crime-and-punishment''' )
@slow
def _snake_case ( self ):
lowercase__: str = '''Hello World!'''
lowercase__: int = [126, 32, 262, 152, 38, 72, 287]
self.assertListEqual(__lowercase , self.big_tokenizer.encode(__lowercase ) )
@slow
def _snake_case ( self ):
lowercase__: int = (
'''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'''
''' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'''
)
lowercase__: List[Any] = [
108,
265,
24,
111,
4,
258,
156,
35,
28,
275,
3,
259,
297,
260,
84,
4,
35,
110,
44,
8,
259,
91,
268,
21,
11,
209,
274,
109,
266,
277,
117,
86,
93,
315,
258,
278,
258,
277,
258,
0,
258,
288,
258,
319,
258,
0,
258,
0,
258,
0,
258,
0,
258,
287,
258,
315,
258,
289,
258,
278,
99,
269,
266,
262,
8,
259,
241,
4,
217,
230,
268,
266,
55,
168,
106,
75,
193,
266,
223,
27,
49,
26,
282,
25,
264,
299,
19,
26,
0,
258,
277,
117,
86,
93,
176,
183,
270,
11,
262,
42,
61,
265,
]
self.assertListEqual(__lowercase , self.big_tokenizer.encode(__lowercase ) )
@require_torch
@slow
def _snake_case ( self ):
import torch
from transformers import ReformerConfig, ReformerModel
# Build sequence
lowercase__: int = list(self.big_tokenizer.get_vocab().keys() )[:10]
lowercase__: Tuple = ''' '''.join(__lowercase )
lowercase__: Optional[Any] = self.big_tokenizer.encode_plus(__lowercase , return_tensors='''pt''' )
lowercase__: Tuple = self.big_tokenizer.batch_encode_plus([sequence, sequence] , return_tensors='''pt''' )
lowercase__: List[Any] = ReformerConfig()
# The input gets padded during training so adjust the axial position encodings from the pretrained model value of (512, 1024)
lowercase__: Optional[int] = encoded_sequence['''input_ids'''].shape
lowercase__: Optional[Any] = ReformerModel(__lowercase )
# Reformer has config.vocab_size == tokenizer.vocab_size == len(tokenizer) - 1 = 320; len(tokenizer) is 321 (including a pad token with id 320)
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**__lowercase )
model(**__lowercase )
@slow
def _snake_case ( self ):
# fmt: off
lowercase__: List[Any] = {'''input_ids''': [[108, 265, 24, 111, 4, 258, 156, 7, 51, 279, 58, 7, 76, 25, 69, 278], [140, 243, 264, 134, 17, 267, 77, 263, 22, 262, 297, 258, 304, 177, 279, 266, 14, 89, 13, 35, 261, 299, 272, 137, 275, 278]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# This tokenizer does not know some characters like ")".
# That is the reason why we use very simple texts here.
# Also see https://github.com/huggingface/transformers/pull/11737#issuecomment-850769064
lowercase__: List[Any] = [
'''This is a very simple sentence.''',
'''The quick brown fox jumps over the lazy dog.''',
]
self.tokenizer_integration_test_util(
expected_encoding=__lowercase , model_name='''google/reformer-crime-and-punishment''' , revision='''0e6c3decb8211d49bf881013425dc8b0448b3f5a''' , padding=__lowercase , sequences=__lowercase , )
| 371 | """simple docstring"""
import argparse
import torch
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_from_original_stable_diffusion_ckpt
if __name__ == "__main__":
__A = argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_path", default=None, type=str, required=True, help="Path to the checkpoint to convert."
)
# !wget https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml
parser.add_argument(
"--original_config_file",
default=None,
type=str,
help="The YAML config file corresponding to the original architecture.",
)
parser.add_argument(
"--num_in_channels",
default=None,
type=int,
help="The number of input channels. If `None` number of input channels will be automatically inferred.",
)
parser.add_argument(
"--scheduler_type",
default="pndm",
type=str,
help="Type of scheduler to use. Should be one of ['pndm', 'lms', 'ddim', 'euler', 'euler-ancestral', 'dpm']",
)
parser.add_argument(
"--pipeline_type",
default=None,
type=str,
help=(
"The pipeline type. One of 'FrozenOpenCLIPEmbedder', 'FrozenCLIPEmbedder', 'PaintByExample'"
". If `None` pipeline will be automatically inferred."
),
)
parser.add_argument(
"--image_size",
default=None,
type=int,
help=(
"The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2"
" Base. Use 768 for Stable Diffusion v2."
),
)
parser.add_argument(
"--prediction_type",
default=None,
type=str,
help=(
"The prediction type that the model was trained on. Use 'epsilon' for Stable Diffusion v1.X and Stable"
" Diffusion v2 Base. Use 'v_prediction' for Stable Diffusion v2."
),
)
parser.add_argument(
"--extract_ema",
action="store_true",
help=(
"Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights"
" or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield"
" higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning."
),
)
parser.add_argument(
"--upcast_attention",
action="store_true",
help=(
"Whether the attention computation should always be upcasted. This is necessary when running stable"
" diffusion 2.1."
),
)
parser.add_argument(
"--from_safetensors",
action="store_true",
help="If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.",
)
parser.add_argument(
"--to_safetensors",
action="store_true",
help="Whether to store pipeline in safetensors format or not.",
)
parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.")
parser.add_argument("--device", type=str, help="Device to use (e.g. cpu, cuda:0, cuda:1, etc.)")
parser.add_argument(
"--stable_unclip",
type=str,
default=None,
required=False,
help="Set if this is a stable unCLIP model. One of 'txt2img' or 'img2img'.",
)
parser.add_argument(
"--stable_unclip_prior",
type=str,
default=None,
required=False,
help="Set if this is a stable unCLIP txt2img model. Selects which prior to use. If `--stable_unclip` is set to `txt2img`, the karlo prior (https://huggingface.co/kakaobrain/karlo-v1-alpha/tree/main/prior) is selected by default.",
)
parser.add_argument(
"--clip_stats_path",
type=str,
help="Path to the clip stats file. Only required if the stable unclip model's config specifies `model.params.noise_aug_config.params.clip_stats_path`.",
required=False,
)
parser.add_argument(
"--controlnet", action="store_true", default=None, help="Set flag if this is a controlnet checkpoint."
)
parser.add_argument("--half", action="store_true", help="Save weights in half precision.")
parser.add_argument(
"--vae_path",
type=str,
default=None,
required=False,
help="Set to a path, hub id to an already converted vae to not convert it again.",
)
__A = parser.parse_args()
__A = download_from_original_stable_diffusion_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
prediction_type=args.prediction_type,
model_type=args.pipeline_type,
extract_ema=args.extract_ema,
scheduler_type=args.scheduler_type,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
stable_unclip=args.stable_unclip,
stable_unclip_prior=args.stable_unclip_prior,
clip_stats_path=args.clip_stats_path,
controlnet=args.controlnet,
vae_path=args.vae_path,
)
if args.half:
pipe.to(torch_dtype=torch.floataa)
if args.controlnet:
# only save the controlnet model
pipe.controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
else:
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 2 | 0 |
"""simple docstring"""
from typing import Optional, Union
import torch
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_mobilenet_va import MobileNetVaConfig
__A = logging.get_logger(__name__)
# General docstring
__A = """MobileNetV1Config"""
# Base docstring
__A = """google/mobilenet_v1_1.0_224"""
__A = [1, 1_0_2_4, 7, 7]
# Image classification docstring
__A = """google/mobilenet_v1_1.0_224"""
__A = """tabby, tabby cat"""
__A = [
"""google/mobilenet_v1_1.0_224""",
"""google/mobilenet_v1_0.75_192""",
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
]
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=None ) -> str:
lowercase__: str = {}
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
lowercase__: Dict = model.mobilenet_va
else:
lowercase__: Any = model
lowercase__: Any = 'MobilenetV1/Conv2d_0/'
lowercase__: List[Any] = backbone.conv_stem.convolution.weight
lowercase__: int = backbone.conv_stem.normalization.bias
lowercase__: List[Any] = backbone.conv_stem.normalization.weight
lowercase__: Tuple = backbone.conv_stem.normalization.running_mean
lowercase__: Tuple = backbone.conv_stem.normalization.running_var
for i in range(1_3 ):
lowercase__: List[str] = i + 1
lowercase__: int = i * 2
lowercase__: Optional[int] = backbone.layer[pt_index]
lowercase__: List[Any] = F"""MobilenetV1/Conv2d_{tf_index}_depthwise/"""
lowercase__: Optional[int] = pointer.convolution.weight
lowercase__: Any = pointer.normalization.bias
lowercase__: Union[str, Any] = pointer.normalization.weight
lowercase__: Dict = pointer.normalization.running_mean
lowercase__: Optional[Any] = pointer.normalization.running_var
lowercase__: Union[str, Any] = backbone.layer[pt_index + 1]
lowercase__: Tuple = F"""MobilenetV1/Conv2d_{tf_index}_pointwise/"""
lowercase__: Any = pointer.convolution.weight
lowercase__: List[Any] = pointer.normalization.bias
lowercase__: int = pointer.normalization.weight
lowercase__: str = pointer.normalization.running_mean
lowercase__: Optional[int] = pointer.normalization.running_var
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
lowercase__: List[str] = 'MobilenetV1/Logits/Conv2d_1c_1x1/'
lowercase__: Any = model.classifier.weight
lowercase__: int = model.classifier.bias
return tf_to_pt_map
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Union[str, Any]:
try:
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
'''Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see '''
'''https://www.tensorflow.org/install/ for installation instructions.''' )
raise
# Load weights from TF model
lowercase__: List[str] = tf.train.list_variables(_UpperCAmelCase )
lowercase__: Tuple = {}
for name, shape in init_vars:
logger.info(F"""Loading TF weight {name} with shape {shape}""" )
lowercase__: Optional[int] = tf.train.load_variable(_UpperCAmelCase , _UpperCAmelCase )
lowercase__: Optional[Any] = array
# Build TF to PyTorch weights loading map
lowercase__: int = _build_tf_to_pytorch_map(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
for name, pointer in tf_to_pt_map.items():
logger.info(F"""Importing {name}""" )
if name not in tf_weights:
logger.info(F"""{name} not in tf pre-trained weights, skipping""" )
continue
lowercase__: Optional[Any] = tf_weights[name]
if "depthwise_weights" in name:
logger.info('''Transposing depthwise''' )
lowercase__: Any = np.transpose(_UpperCAmelCase , (2, 3, 0, 1) )
elif "weights" in name:
logger.info('''Transposing''' )
if len(pointer.shape ) == 2: # copying into linear layer
lowercase__: Optional[int] = array.squeeze().transpose()
else:
lowercase__: Tuple = np.transpose(_UpperCAmelCase , (3, 2, 0, 1) )
if pointer.shape != array.shape:
raise ValueError(F"""Pointer shape {pointer.shape} and array shape {array.shape} mismatched""" )
logger.info(F"""Initialize PyTorch weight {name} {array.shape}""" )
lowercase__: List[str] = torch.from_numpy(_UpperCAmelCase )
tf_weights.pop(_UpperCAmelCase , _UpperCAmelCase )
tf_weights.pop(name + '''/RMSProp''' , _UpperCAmelCase )
tf_weights.pop(name + '''/RMSProp_1''' , _UpperCAmelCase )
tf_weights.pop(name + '''/ExponentialMovingAverage''' , _UpperCAmelCase )
logger.info(F"""Weights not copied to PyTorch model: {', '.join(tf_weights.keys() )}""" )
return model
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase ) -> torch.Tensor:
lowercase__: Dict = features.shape[-2:]
lowercase__: str = conv_layer.stride
lowercase__: Any = conv_layer.kernel_size
if in_height % stride_height == 0:
lowercase__: str = max(kernel_height - stride_height , 0 )
else:
lowercase__: Dict = max(kernel_height - (in_height % stride_height) , 0 )
if in_width % stride_width == 0:
lowercase__: int = max(kernel_width - stride_width , 0 )
else:
lowercase__: Tuple = max(kernel_width - (in_width % stride_width) , 0 )
lowercase__: Any = pad_along_width // 2
lowercase__: Optional[Any] = pad_along_width - pad_left
lowercase__: Any = pad_along_height // 2
lowercase__: Optional[int] = pad_along_height - pad_top
lowercase__: str = (pad_left, pad_right, pad_top, pad_bottom)
return nn.functional.pad(_UpperCAmelCase , _UpperCAmelCase , '''constant''' , 0.0 )
class UpperCAmelCase (nn.Module ):
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = 1 , _UpperCAmelCase = 1 , _UpperCAmelCase = False , _UpperCAmelCase = True , _UpperCAmelCase = True , ):
super().__init__()
lowercase__: Dict = config
if in_channels % groups != 0:
raise ValueError(F"""Input channels ({in_channels}) are not divisible by {groups} groups.""" )
if out_channels % groups != 0:
raise ValueError(F"""Output channels ({out_channels}) are not divisible by {groups} groups.""" )
lowercase__: List[str] = 0 if config.tf_padding else int((kernel_size - 1) / 2 )
lowercase__: Any = nn.Convad(
in_channels=_UpperCAmelCase , out_channels=_UpperCAmelCase , kernel_size=_UpperCAmelCase , stride=_UpperCAmelCase , padding=_UpperCAmelCase , groups=_UpperCAmelCase , bias=_UpperCAmelCase , padding_mode='''zeros''' , )
if use_normalization:
lowercase__: Optional[int] = nn.BatchNormad(
num_features=_UpperCAmelCase , eps=config.layer_norm_eps , momentum=0.9_997 , affine=_UpperCAmelCase , track_running_stats=_UpperCAmelCase , )
else:
lowercase__: Tuple = None
if use_activation:
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
lowercase__: Tuple = ACTaFN[use_activation]
elif isinstance(config.hidden_act , _UpperCAmelCase ):
lowercase__: Any = ACTaFN[config.hidden_act]
else:
lowercase__: Dict = config.hidden_act
else:
lowercase__: Optional[int] = None
def _snake_case ( self , _UpperCAmelCase ):
if self.config.tf_padding:
lowercase__: Dict = apply_tf_padding(_UpperCAmelCase , self.convolution )
lowercase__: Union[str, Any] = self.convolution(_UpperCAmelCase )
if self.normalization is not None:
lowercase__: Union[str, Any] = self.normalization(_UpperCAmelCase )
if self.activation is not None:
lowercase__: List[str] = self.activation(_UpperCAmelCase )
return features
class UpperCAmelCase (__UpperCamelCase ):
"""simple docstring"""
_UpperCAmelCase :int = MobileNetVaConfig
_UpperCAmelCase :Any = load_tf_weights_in_mobilenet_va
_UpperCAmelCase :Optional[Any] = "mobilenet_v1"
_UpperCAmelCase :List[Any] = "pixel_values"
_UpperCAmelCase :Tuple = False
def _snake_case ( self , _UpperCAmelCase ):
if isinstance(_UpperCAmelCase , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(_UpperCAmelCase , nn.BatchNormad ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
__A = R"""
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`MobileNetV1Config`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
__A = R"""
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`MobileNetV1ImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
"The bare MobileNetV1 model outputting raw hidden-states without any specific head on top." ,__UpperCamelCase ,)
class UpperCAmelCase (__UpperCamelCase ):
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase = True ):
super().__init__(_UpperCAmelCase )
lowercase__: Optional[Any] = config
lowercase__: Dict = 32
lowercase__: List[str] = max(int(depth * config.depth_multiplier ) , config.min_depth )
lowercase__: str = MobileNetVaConvLayer(
_UpperCAmelCase , in_channels=config.num_channels , out_channels=_UpperCAmelCase , kernel_size=3 , stride=2 , )
lowercase__: List[str] = [1, 2, 1, 2, 1, 2, 1, 1, 1, 1, 1, 2, 1]
lowercase__: Any = nn.ModuleList()
for i in range(13 ):
lowercase__: Tuple = out_channels
if strides[i] == 2 or i == 0:
depth *= 2
lowercase__: List[Any] = max(int(depth * config.depth_multiplier ) , config.min_depth )
self.layer.append(
MobileNetVaConvLayer(
_UpperCAmelCase , in_channels=_UpperCAmelCase , out_channels=_UpperCAmelCase , kernel_size=3 , stride=strides[i] , groups=_UpperCAmelCase , ) )
self.layer.append(
MobileNetVaConvLayer(
_UpperCAmelCase , in_channels=_UpperCAmelCase , out_channels=_UpperCAmelCase , kernel_size=1 , ) )
lowercase__: Optional[int] = nn.AdaptiveAvgPoolad((1, 1) ) if add_pooling_layer else None
# Initialize weights and apply final processing
self.post_init()
def _snake_case ( self , _UpperCAmelCase ):
raise NotImplementedError
@add_start_docstrings_to_model_forward(_UpperCAmelCase )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=_UpperCAmelCase , config_class=_CONFIG_FOR_DOC , modality='''vision''' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def _snake_case ( self , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , ):
lowercase__: Optional[Any] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowercase__: int = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError('''You have to specify pixel_values''' )
lowercase__: Tuple = self.conv_stem(_UpperCAmelCase )
lowercase__: Optional[Any] = () if output_hidden_states else None
for i, layer_module in enumerate(self.layer ):
lowercase__: Any = layer_module(_UpperCAmelCase )
if output_hidden_states:
lowercase__: Union[str, Any] = all_hidden_states + (hidden_states,)
lowercase__: List[Any] = hidden_states
if self.pooler is not None:
lowercase__: Tuple = torch.flatten(self.pooler(_UpperCAmelCase ) , start_dim=1 )
else:
lowercase__: Any = None
if not return_dict:
return tuple(v for v in [last_hidden_state, pooled_output, all_hidden_states] if v is not None )
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=_UpperCAmelCase , pooler_output=_UpperCAmelCase , hidden_states=_UpperCAmelCase , )
@add_start_docstrings(
"\n MobileNetV1 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " ,__UpperCamelCase ,)
class UpperCAmelCase (__UpperCamelCase ):
"""simple docstring"""
def __init__( self , _UpperCAmelCase ):
super().__init__(_UpperCAmelCase )
lowercase__: List[str] = config.num_labels
lowercase__: Dict = MobileNetVaModel(_UpperCAmelCase )
lowercase__: Optional[int] = self.mobilenet_va.layer[-1].convolution.out_channels
# Classifier head
lowercase__: str = nn.Dropout(config.classifier_dropout_prob , inplace=_UpperCAmelCase )
lowercase__: str = nn.Linear(_UpperCAmelCase , config.num_labels ) if config.num_labels > 0 else nn.Identity()
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(_UpperCAmelCase )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=_UpperCAmelCase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def _snake_case ( self , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , ):
lowercase__: List[Any] = return_dict if return_dict is not None else self.config.use_return_dict
lowercase__: List[str] = self.mobilenet_va(_UpperCAmelCase , output_hidden_states=_UpperCAmelCase , return_dict=_UpperCAmelCase )
lowercase__: Optional[Any] = outputs.pooler_output if return_dict else outputs[1]
lowercase__: List[Any] = self.classifier(self.dropout(_UpperCAmelCase ) )
lowercase__: Tuple = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
lowercase__: Dict = 'regression'
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
lowercase__: Tuple = 'single_label_classification'
else:
lowercase__: Dict = 'multi_label_classification'
if self.config.problem_type == "regression":
lowercase__: Union[str, Any] = MSELoss()
if self.num_labels == 1:
lowercase__: int = loss_fct(logits.squeeze() , labels.squeeze() )
else:
lowercase__: Dict = loss_fct(_UpperCAmelCase , _UpperCAmelCase )
elif self.config.problem_type == "single_label_classification":
lowercase__: Optional[int] = CrossEntropyLoss()
lowercase__: List[str] = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
lowercase__: Union[str, Any] = BCEWithLogitsLoss()
lowercase__: int = loss_fct(_UpperCAmelCase , _UpperCAmelCase )
if not return_dict:
lowercase__: Any = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(
loss=_UpperCAmelCase , logits=_UpperCAmelCase , hidden_states=outputs.hidden_states , )
| 350 | """simple docstring"""
import warnings
from diffusers import StableDiffusionImgaImgPipeline # noqa F401
warnings.warn(
"The `image_to_image.py` script is outdated. Please use directly `from diffusers import"
" StableDiffusionImg2ImgPipeline` instead."
)
| 2 | 0 |
"""simple docstring"""
import unittest
from transformers.testing_utils import CaptureStdout
from transformers.tools.python_interpreter import evaluate
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> Optional[Any]:
return x + 2
class UpperCAmelCase (unittest.TestCase ):
"""simple docstring"""
def _snake_case ( self ):
lowercase__: Optional[int] = '''x = 3'''
lowercase__: str = {}
lowercase__: str = evaluate(__SCREAMING_SNAKE_CASE , {} , state=__SCREAMING_SNAKE_CASE )
assert result == 3
self.assertDictEqual(__SCREAMING_SNAKE_CASE , {'''x''': 3} )
lowercase__: Optional[int] = '''x = y'''
lowercase__: Optional[Any] = {'''y''': 5}
lowercase__: Any = evaluate(__SCREAMING_SNAKE_CASE , {} , state=__SCREAMING_SNAKE_CASE )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(__SCREAMING_SNAKE_CASE , {'''x''': 5, '''y''': 5} )
def _snake_case ( self ):
lowercase__: Tuple = '''y = add_two(x)'''
lowercase__: int = {'''x''': 3}
lowercase__: Optional[int] = evaluate(__SCREAMING_SNAKE_CASE , {'''add_two''': add_two} , state=__SCREAMING_SNAKE_CASE )
assert result == 5
self.assertDictEqual(__SCREAMING_SNAKE_CASE , {'''x''': 3, '''y''': 5} )
# Won't work without the tool
with CaptureStdout() as out:
lowercase__: Union[str, Any] = evaluate(__SCREAMING_SNAKE_CASE , {} , state=__SCREAMING_SNAKE_CASE )
assert result is None
assert "tried to execute add_two" in out.out
def _snake_case ( self ):
lowercase__: Any = '''x = 3'''
lowercase__: Any = {}
lowercase__: List[str] = evaluate(__SCREAMING_SNAKE_CASE , {} , state=__SCREAMING_SNAKE_CASE )
assert result == 3
self.assertDictEqual(__SCREAMING_SNAKE_CASE , {'''x''': 3} )
def _snake_case ( self ):
lowercase__: Union[str, Any] = '''test_dict = {\'x\': x, \'y\': add_two(x)}'''
lowercase__: Optional[Any] = {'''x''': 3}
lowercase__: Any = evaluate(__SCREAMING_SNAKE_CASE , {'''add_two''': add_two} , state=__SCREAMING_SNAKE_CASE )
self.assertDictEqual(__SCREAMING_SNAKE_CASE , {'''x''': 3, '''y''': 5} )
self.assertDictEqual(__SCREAMING_SNAKE_CASE , {'''x''': 3, '''test_dict''': {'''x''': 3, '''y''': 5}} )
def _snake_case ( self ):
lowercase__: List[str] = '''x = 3\ny = 5'''
lowercase__: Optional[Any] = {}
lowercase__: Dict = evaluate(__SCREAMING_SNAKE_CASE , {} , state=__SCREAMING_SNAKE_CASE )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(__SCREAMING_SNAKE_CASE , {'''x''': 3, '''y''': 5} )
def _snake_case ( self ):
lowercase__: Optional[int] = '''text = f\'This is x: {x}.\''''
lowercase__: Tuple = {'''x''': 3}
lowercase__: Tuple = evaluate(__SCREAMING_SNAKE_CASE , {} , state=__SCREAMING_SNAKE_CASE )
# evaluate returns the value of the last assignment.
assert result == "This is x: 3."
self.assertDictEqual(__SCREAMING_SNAKE_CASE , {'''x''': 3, '''text''': '''This is x: 3.'''} )
def _snake_case ( self ):
lowercase__: List[str] = '''if x <= 3:\n y = 2\nelse:\n y = 5'''
lowercase__: Union[str, Any] = {'''x''': 3}
lowercase__: Any = evaluate(__SCREAMING_SNAKE_CASE , {} , state=__SCREAMING_SNAKE_CASE )
# evaluate returns the value of the last assignment.
assert result == 2
self.assertDictEqual(__SCREAMING_SNAKE_CASE , {'''x''': 3, '''y''': 2} )
lowercase__: Optional[Any] = {'''x''': 8}
lowercase__: List[str] = evaluate(__SCREAMING_SNAKE_CASE , {} , state=__SCREAMING_SNAKE_CASE )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(__SCREAMING_SNAKE_CASE , {'''x''': 8, '''y''': 5} )
def _snake_case ( self ):
lowercase__: Any = '''test_list = [x, add_two(x)]'''
lowercase__: Dict = {'''x''': 3}
lowercase__: int = evaluate(__SCREAMING_SNAKE_CASE , {'''add_two''': add_two} , state=__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , [3, 5] )
self.assertDictEqual(__SCREAMING_SNAKE_CASE , {'''x''': 3, '''test_list''': [3, 5]} )
def _snake_case ( self ):
lowercase__: List[str] = '''y = x'''
lowercase__: List[str] = {'''x''': 3}
lowercase__: Optional[int] = evaluate(__SCREAMING_SNAKE_CASE , {} , state=__SCREAMING_SNAKE_CASE )
assert result == 3
self.assertDictEqual(__SCREAMING_SNAKE_CASE , {'''x''': 3, '''y''': 3} )
def _snake_case ( self ):
lowercase__: List[Any] = '''test_list = [x, add_two(x)]\ntest_list[1]'''
lowercase__: Optional[Any] = {'''x''': 3}
lowercase__: List[str] = evaluate(__SCREAMING_SNAKE_CASE , {'''add_two''': add_two} , state=__SCREAMING_SNAKE_CASE )
assert result == 5
self.assertDictEqual(__SCREAMING_SNAKE_CASE , {'''x''': 3, '''test_list''': [3, 5]} )
lowercase__: Union[str, Any] = '''test_dict = {\'x\': x, \'y\': add_two(x)}\ntest_dict[\'y\']'''
lowercase__: Union[str, Any] = {'''x''': 3}
lowercase__: Dict = evaluate(__SCREAMING_SNAKE_CASE , {'''add_two''': add_two} , state=__SCREAMING_SNAKE_CASE )
assert result == 5
self.assertDictEqual(__SCREAMING_SNAKE_CASE , {'''x''': 3, '''test_dict''': {'''x''': 3, '''y''': 5}} )
def _snake_case ( self ):
lowercase__: Tuple = '''x = 0\nfor i in range(3):\n x = i'''
lowercase__: List[str] = {}
lowercase__: Union[str, Any] = evaluate(__SCREAMING_SNAKE_CASE , {'''range''': range} , state=__SCREAMING_SNAKE_CASE )
assert result == 2
self.assertDictEqual(__SCREAMING_SNAKE_CASE , {'''x''': 2, '''i''': 2} )
| 351 | """simple docstring"""
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
"Salesforce/codegen-350M-nl": "https://huggingface.co/Salesforce/codegen-350M-nl/resolve/main/config.json",
"Salesforce/codegen-350M-multi": "https://huggingface.co/Salesforce/codegen-350M-multi/resolve/main/config.json",
"Salesforce/codegen-350M-mono": "https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/config.json",
"Salesforce/codegen-2B-nl": "https://huggingface.co/Salesforce/codegen-2B-nl/resolve/main/config.json",
"Salesforce/codegen-2B-multi": "https://huggingface.co/Salesforce/codegen-2B-multi/resolve/main/config.json",
"Salesforce/codegen-2B-mono": "https://huggingface.co/Salesforce/codegen-2B-mono/resolve/main/config.json",
"Salesforce/codegen-6B-nl": "https://huggingface.co/Salesforce/codegen-6B-nl/resolve/main/config.json",
"Salesforce/codegen-6B-multi": "https://huggingface.co/Salesforce/codegen-6B-multi/resolve/main/config.json",
"Salesforce/codegen-6B-mono": "https://huggingface.co/Salesforce/codegen-6B-mono/resolve/main/config.json",
"Salesforce/codegen-16B-nl": "https://huggingface.co/Salesforce/codegen-16B-nl/resolve/main/config.json",
"Salesforce/codegen-16B-multi": "https://huggingface.co/Salesforce/codegen-16B-multi/resolve/main/config.json",
"Salesforce/codegen-16B-mono": "https://huggingface.co/Salesforce/codegen-16B-mono/resolve/main/config.json",
}
class UpperCAmelCase (_UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase :List[str] = "codegen"
_UpperCAmelCase :Optional[int] = {
"max_position_embeddings": "n_positions",
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self , _UpperCAmelCase=50400 , _UpperCAmelCase=2048 , _UpperCAmelCase=2048 , _UpperCAmelCase=4096 , _UpperCAmelCase=28 , _UpperCAmelCase=16 , _UpperCAmelCase=64 , _UpperCAmelCase=None , _UpperCAmelCase="gelu_new" , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.0 , _UpperCAmelCase=1e-5 , _UpperCAmelCase=0.02 , _UpperCAmelCase=True , _UpperCAmelCase=50256 , _UpperCAmelCase=50256 , _UpperCAmelCase=False , **_UpperCAmelCase , ):
lowercase__: int = vocab_size
lowercase__: str = n_ctx
lowercase__: List[Any] = n_positions
lowercase__: Union[str, Any] = n_embd
lowercase__: Optional[Any] = n_layer
lowercase__: str = n_head
lowercase__: List[Any] = n_inner
lowercase__: Union[str, Any] = rotary_dim
lowercase__: Optional[Any] = activation_function
lowercase__: Union[str, Any] = resid_pdrop
lowercase__: Optional[int] = embd_pdrop
lowercase__: Optional[Any] = attn_pdrop
lowercase__: Optional[int] = layer_norm_epsilon
lowercase__: List[Any] = initializer_range
lowercase__: Tuple = use_cache
lowercase__: Any = bos_token_id
lowercase__: Any = eos_token_id
super().__init__(
bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , tie_word_embeddings=_UpperCAmelCase , **_UpperCAmelCase )
class UpperCAmelCase (_UpperCAmelCase ):
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase = "default" , _UpperCAmelCase = None , _UpperCAmelCase = False , ):
super().__init__(_UpperCAmelCase , task=_UpperCAmelCase , patching_specs=_UpperCAmelCase , use_past=_UpperCAmelCase )
if not getattr(self._config , '''pad_token_id''' , _UpperCAmelCase ):
# TODO: how to do that better?
lowercase__: Any = 0
@property
def _snake_case ( self ):
lowercase__: int = OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}} )
if self.use_past:
self.fill_with_past_key_values_(_UpperCAmelCase , direction='''inputs''' )
lowercase__: int = {0: '''batch''', 1: '''past_sequence + sequence'''}
else:
lowercase__: Tuple = {0: '''batch''', 1: '''sequence'''}
return common_inputs
@property
def _snake_case ( self ):
return self._config.n_layer
@property
def _snake_case ( self ):
return self._config.n_head
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase = -1 , _UpperCAmelCase = -1 , _UpperCAmelCase = False , _UpperCAmelCase = None , ):
lowercase__: Optional[int] = super(_UpperCAmelCase , self ).generate_dummy_inputs(
_UpperCAmelCase , batch_size=_UpperCAmelCase , seq_length=_UpperCAmelCase , is_pair=_UpperCAmelCase , framework=_UpperCAmelCase )
# We need to order the input in the way they appears in the forward()
lowercase__: List[Any] = OrderedDict({'''input_ids''': common_inputs['''input_ids''']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
lowercase__, lowercase__: Union[str, Any] = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
lowercase__: Any = seqlen + 2
lowercase__: List[str] = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
lowercase__: Optional[Any] = [
(torch.zeros(_UpperCAmelCase ), torch.zeros(_UpperCAmelCase )) for _ in range(self.num_layers )
]
lowercase__: Optional[Any] = common_inputs['''attention_mask''']
if self.use_past:
lowercase__: List[str] = ordered_inputs['''attention_mask'''].dtype
lowercase__: List[Any] = torch.cat(
[ordered_inputs['''attention_mask'''], torch.ones(_UpperCAmelCase , _UpperCAmelCase , dtype=_UpperCAmelCase )] , dim=1 )
return ordered_inputs
@property
def _snake_case ( self ):
return 13
| 2 | 0 |
"""simple docstring"""
import math
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase ) -> Tuple:
return math.pow(lowerCamelCase__ , 2 ) - a
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> List[str]:
return 2 * x
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> Union[str, Any]:
lowercase__: str = 2.0
while start <= a:
lowercase__: List[Any] = math.pow(lowerCamelCase__ , 2 )
return start
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase = 9_9_9_9 , __UpperCAmelCase = 0.0_0_0_0_0_0_0_0_0_0_0_0_0_1 ) -> Union[str, Any]:
if a < 0:
raise ValueError('''math domain error''' )
lowercase__: Optional[int] = get_initial_point(lowerCamelCase__ )
for _ in range(lowerCamelCase__ ):
lowercase__: Any = value
lowercase__: int = value - fx(lowerCamelCase__ , lowerCamelCase__ ) / fx_derivative(lowerCamelCase__ )
if abs(prev_value - value ) < tolerance:
return value
return value
if __name__ == "__main__":
from doctest import testmod
testmod()
| 352 | """simple docstring"""
from dataclasses import dataclass, field
from typing import Optional
from transformers import AutoConfig, AutoImageProcessor, AutoTokenizer, FlaxVisionEncoderDecoderModel, HfArgumentParser
@dataclass
class UpperCAmelCase :
"""simple docstring"""
_UpperCAmelCase :str = field(
metadata={"help": "The output directory where the model will be written."} ,)
_UpperCAmelCase :str = field(
metadata={
"help": (
"The encoder model checkpoint for weights initialization."
"Don't set if you want to train an encoder model from scratch."
)
} ,)
_UpperCAmelCase :str = field(
metadata={
"help": (
"The decoder model checkpoint for weights initialization."
"Don't set if you want to train a decoder model from scratch."
)
} ,)
_UpperCAmelCase :Optional[str] = field(
default=_UpperCAmelCase ,metadata={"help": "Pretrained encoder config name or path if not the same as encoder_model_name"} )
_UpperCAmelCase :Optional[str] = field(
default=_UpperCAmelCase ,metadata={"help": "Pretrained decoder config name or path if not the same as decoder_model_name"} )
def SCREAMING_SNAKE_CASE__ ( ) -> Tuple:
lowercase__: Dict = HfArgumentParser((ModelArguments,) )
((lowercase__), ): List[str] = parser.parse_args_into_dataclasses()
# Load pretrained model and tokenizer
# Use explicit specified encoder config
if model_args.encoder_config_name:
lowercase__: List[Any] = AutoConfig.from_pretrained(model_args.encoder_config_name )
# Use pretrained encoder model's config
else:
lowercase__: int = AutoConfig.from_pretrained(model_args.encoder_model_name_or_path )
# Use explicit specified decoder config
if model_args.decoder_config_name:
lowercase__: str = AutoConfig.from_pretrained(model_args.decoder_config_name )
# Use pretrained decoder model's config
else:
lowercase__: Union[str, Any] = AutoConfig.from_pretrained(model_args.decoder_model_name_or_path )
# necessary for `from_encoder_decoder_pretrained` when `decoder_config` is passed
lowercase__: Tuple = True
lowercase__: int = True
lowercase__: Any = FlaxVisionEncoderDecoderModel.from_encoder_decoder_pretrained(
encoder_pretrained_model_name_or_path=model_args.encoder_model_name_or_path , decoder_pretrained_model_name_or_path=model_args.decoder_model_name_or_path , encoder_config=__UpperCAmelCase , decoder_config=__UpperCAmelCase , )
# GPT2 only has bos/eos tokens but not decoder_start/pad tokens
lowercase__: int = decoder_config.decoder_start_token_id
lowercase__: Tuple = decoder_config.pad_token_id
if decoder_start_token_id is None:
lowercase__: Tuple = decoder_config.bos_token_id
if pad_token_id is None:
lowercase__: Optional[int] = decoder_config.eos_token_id
# This is necessary to make Flax's generate() work
lowercase__: Optional[Any] = decoder_config.eos_token_id
lowercase__: Tuple = decoder_start_token_id
lowercase__: Dict = pad_token_id
lowercase__: Optional[int] = AutoImageProcessor.from_pretrained(model_args.encoder_model_name_or_path )
lowercase__: Union[str, Any] = AutoTokenizer.from_pretrained(model_args.decoder_model_name_or_path )
lowercase__: Tuple = tokenizer.convert_ids_to_tokens(model.config.pad_token_id )
model.save_pretrained(model_args.output_dir )
image_processor.save_pretrained(model_args.output_dir )
tokenizer.save_pretrained(model_args.output_dir )
if __name__ == "__main__":
main()
| 2 | 0 |
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> int:
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ) or number < 0:
raise ValueError('''Input must be a non-negative integer''' )
lowercase__: Tuple = 0
while number:
# This way we arrive at next set bit (next 1) instead of looping
# through each bit and checking for 1s hence the
# loop won't run 32 times it will only run the number of `1` times
number &= number - 1
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 353 | """simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {"ctrl": "https://huggingface.co/ctrl/resolve/main/config.json"}
class UpperCAmelCase (_UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase :Optional[Any] = "ctrl"
_UpperCAmelCase :int = ["past_key_values"]
_UpperCAmelCase :Dict = {
"max_position_embeddings": "n_positions",
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self , _UpperCAmelCase=246534 , _UpperCAmelCase=256 , _UpperCAmelCase=1280 , _UpperCAmelCase=8192 , _UpperCAmelCase=48 , _UpperCAmelCase=16 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=1e-6 , _UpperCAmelCase=0.02 , _UpperCAmelCase=True , **_UpperCAmelCase , ):
lowercase__: Union[str, Any] = vocab_size
lowercase__: Optional[int] = n_positions
lowercase__: Optional[int] = n_embd
lowercase__: Any = n_layer
lowercase__: Any = n_head
lowercase__: int = dff
lowercase__: Dict = resid_pdrop
lowercase__: Any = embd_pdrop
lowercase__: Any = layer_norm_epsilon
lowercase__: Optional[int] = initializer_range
lowercase__: Dict = use_cache
super().__init__(**_UpperCAmelCase )
| 2 | 0 |
"""simple docstring"""
import os
import tempfile
import unittest
from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter
from transformers.testing_utils import slow
from transformers.utils import cached_property
@unittest.skipUnless(os.path.exists(lowerCAmelCase__ ) ,"Tatoeba directory does not exist." )
class UpperCAmelCase (unittest.TestCase ):
"""simple docstring"""
@cached_property
def _snake_case ( self ):
lowercase__: Optional[Any] = tempfile.mkdtemp()
return TatoebaConverter(save_dir=_SCREAMING_SNAKE_CASE )
@slow
def _snake_case ( self ):
self.resolver.convert_models(['''heb-eng'''] )
@slow
def _snake_case ( self ):
lowercase__, lowercase__: List[str] = self.resolver.write_model_card('''opus-mt-he-en''' , dry_run=_SCREAMING_SNAKE_CASE )
assert mmeta["long_pair"] == "heb-eng"
| 354 | """simple docstring"""
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase = 5_0 ) -> int:
lowercase__: str = [[0] * 3 for _ in range(length + 1 )]
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
different_colour_ways_number[row_length][tile_length - 2] += (
different_colour_ways_number[row_length - tile_start - tile_length][
tile_length - 2
]
+ 1
)
return sum(different_colour_ways_number[length] )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 2 | 0 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_video_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import VivitImageProcessor
class UpperCAmelCase (unittest.TestCase ):
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=7 , _UpperCAmelCase=3 , _UpperCAmelCase=10 , _UpperCAmelCase=18 , _UpperCAmelCase=30 , _UpperCAmelCase=400 , _UpperCAmelCase=True , _UpperCAmelCase=None , _UpperCAmelCase=True , _UpperCAmelCase=[0.5, 0.5, 0.5] , _UpperCAmelCase=[0.5, 0.5, 0.5] , _UpperCAmelCase=None , ):
lowercase__: Optional[int] = size if size is not None else {'shortest_edge': 18}
lowercase__: Optional[Any] = crop_size if crop_size is not None else {'height': 18, 'width': 18}
lowercase__: Union[str, Any] = parent
lowercase__: int = batch_size
lowercase__: List[Any] = num_channels
lowercase__: str = num_frames
lowercase__: Tuple = image_size
lowercase__: Tuple = min_resolution
lowercase__: List[Any] = max_resolution
lowercase__: int = do_resize
lowercase__: str = size
lowercase__: str = do_normalize
lowercase__: Optional[Any] = image_mean
lowercase__: List[Any] = image_std
lowercase__: Optional[int] = crop_size
def _snake_case ( self ):
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class UpperCAmelCase (snake_case_ ,unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase :List[str] = VivitImageProcessor if is_vision_available() else None
def _snake_case ( self ):
lowercase__: Optional[Any] = VivitImageProcessingTester(self )
@property
def _snake_case ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def _snake_case ( self ):
lowercase__: Optional[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_UpperCAmelCase , '''image_mean''' ) )
self.assertTrue(hasattr(_UpperCAmelCase , '''image_std''' ) )
self.assertTrue(hasattr(_UpperCAmelCase , '''do_normalize''' ) )
self.assertTrue(hasattr(_UpperCAmelCase , '''do_resize''' ) )
self.assertTrue(hasattr(_UpperCAmelCase , '''do_center_crop''' ) )
self.assertTrue(hasattr(_UpperCAmelCase , '''size''' ) )
def _snake_case ( self ):
lowercase__: List[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 18} )
self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} )
lowercase__: Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42} )
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} )
def _snake_case ( self ):
lowercase__: Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL videos
lowercase__: List[Any] = prepare_video_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase )
for video in video_inputs:
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
self.assertIsInstance(video[0] , Image.Image )
# Test not batched input
lowercase__: Union[str, Any] = image_processing(video_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
lowercase__: int = image_processing(_UpperCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def _snake_case ( self ):
lowercase__: int = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowercase__: Optional[int] = prepare_video_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , numpify=_UpperCAmelCase )
for video in video_inputs:
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
self.assertIsInstance(video[0] , np.ndarray )
# Test not batched input
lowercase__: List[Any] = image_processing(video_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
lowercase__: List[str] = image_processing(_UpperCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def _snake_case ( self ):
lowercase__: List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowercase__: str = prepare_video_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , torchify=_UpperCAmelCase )
for video in video_inputs:
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
self.assertIsInstance(video[0] , torch.Tensor )
# Test not batched input
lowercase__: Any = image_processing(video_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
lowercase__: Optional[Any] = image_processing(_UpperCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
| 355 | """simple docstring"""
import pickle
import numpy as np
from matplotlib import pyplot as plt
class UpperCAmelCase :
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=0.2 , _UpperCAmelCase=0.2 ):
lowercase__: int = bp_numa
lowercase__: Union[str, Any] = bp_numa
lowercase__: List[str] = bp_numa
lowercase__: str = conva_get[:2]
lowercase__: Union[str, Any] = conva_get[2]
lowercase__: Any = size_pa
lowercase__: Optional[Any] = rate_w
lowercase__: Tuple = rate_t
lowercase__: List[str] = [
np.mat(-1 * np.random.rand(self.conva[0] , self.conva[0] ) + 0.5 )
for i in range(self.conva[1] )
]
lowercase__: Dict = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 )
lowercase__: str = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 )
lowercase__: Union[str, Any] = -2 * np.random.rand(self.conva[1] ) + 1
lowercase__: Any = -2 * np.random.rand(self.num_bpa ) + 1
lowercase__: Any = -2 * np.random.rand(self.num_bpa ) + 1
def _snake_case ( self , _UpperCAmelCase ):
# save model dict with pickle
lowercase__: int = {
'''num_bp1''': self.num_bpa,
'''num_bp2''': self.num_bpa,
'''num_bp3''': self.num_bpa,
'''conv1''': self.conva,
'''step_conv1''': self.step_conva,
'''size_pooling1''': self.size_poolinga,
'''rate_weight''': self.rate_weight,
'''rate_thre''': self.rate_thre,
'''w_conv1''': self.w_conva,
'''wkj''': self.wkj,
'''vji''': self.vji,
'''thre_conv1''': self.thre_conva,
'''thre_bp2''': self.thre_bpa,
'''thre_bp3''': self.thre_bpa,
}
with open(_UpperCAmelCase , '''wb''' ) as f:
pickle.dump(_UpperCAmelCase , _UpperCAmelCase )
print(F"""Model saved: {save_path}""" )
@classmethod
def _snake_case ( cls , _UpperCAmelCase ):
# read saved model
with open(_UpperCAmelCase , '''rb''' ) as f:
lowercase__: Optional[int] = pickle.load(_UpperCAmelCase ) # noqa: S301
lowercase__: Tuple = model_dic.get('''conv1''' )
conv_get.append(model_dic.get('''step_conv1''' ) )
lowercase__: Any = model_dic.get('''size_pooling1''' )
lowercase__: int = model_dic.get('''num_bp1''' )
lowercase__: Optional[int] = model_dic.get('''num_bp2''' )
lowercase__: str = model_dic.get('''num_bp3''' )
lowercase__: Any = model_dic.get('''rate_weight''' )
lowercase__: Union[str, Any] = model_dic.get('''rate_thre''' )
# create model instance
lowercase__: str = CNN(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# modify model parameter
lowercase__: Dict = model_dic.get('''w_conv1''' )
lowercase__: Dict = model_dic.get('''wkj''' )
lowercase__: str = model_dic.get('''vji''' )
lowercase__: List[Any] = model_dic.get('''thre_conv1''' )
lowercase__: Optional[int] = model_dic.get('''thre_bp2''' )
lowercase__: Tuple = model_dic.get('''thre_bp3''' )
return conv_ins
def _snake_case ( self , _UpperCAmelCase ):
return 1 / (1 + np.exp(-1 * x ))
def _snake_case ( self , _UpperCAmelCase ):
return round(_UpperCAmelCase , 3 )
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
# convolution process
lowercase__: Any = convs[0]
lowercase__: Tuple = convs[1]
lowercase__: List[Any] = np.shape(_UpperCAmelCase )[0]
# get the data slice of original image data, data_focus
lowercase__: List[Any] = []
for i_focus in range(0 , size_data - size_conv + 1 , _UpperCAmelCase ):
for j_focus in range(0 , size_data - size_conv + 1 , _UpperCAmelCase ):
lowercase__: Tuple = data[
i_focus : i_focus + size_conv, j_focus : j_focus + size_conv
]
data_focus.append(_UpperCAmelCase )
# calculate the feature map of every single kernel, and saved as list of matrix
lowercase__: Optional[int] = []
lowercase__: Optional[int] = int((size_data - size_conv) / conv_step + 1 )
for i_map in range(_UpperCAmelCase ):
lowercase__: str = []
for i_focus in range(len(_UpperCAmelCase ) ):
lowercase__: Any = (
np.sum(np.multiply(data_focus[i_focus] , w_convs[i_map] ) )
- thre_convs[i_map]
)
featuremap.append(self.sig(_UpperCAmelCase ) )
lowercase__: str = np.asmatrix(_UpperCAmelCase ).reshape(
_UpperCAmelCase , _UpperCAmelCase )
data_featuremap.append(_UpperCAmelCase )
# expanding the data slice to One dimenssion
lowercase__: Union[str, Any] = []
for each_focus in data_focus:
focusa_list.extend(self.Expand_Mat(_UpperCAmelCase ) )
lowercase__: Any = np.asarray(_UpperCAmelCase )
return focus_list, data_featuremap
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase="average_pool" ):
# pooling process
lowercase__: List[Any] = len(featuremaps[0] )
lowercase__: Any = int(size_map / size_pooling )
lowercase__: List[Any] = []
for i_map in range(len(_UpperCAmelCase ) ):
lowercase__: Any = featuremaps[i_map]
lowercase__: Tuple = []
for i_focus in range(0 , _UpperCAmelCase , _UpperCAmelCase ):
for j_focus in range(0 , _UpperCAmelCase , _UpperCAmelCase ):
lowercase__: Optional[Any] = feature_map[
i_focus : i_focus + size_pooling,
j_focus : j_focus + size_pooling,
]
if pooling_type == "average_pool":
# average pooling
map_pooled.append(np.average(_UpperCAmelCase ) )
elif pooling_type == "max_pooling":
# max pooling
map_pooled.append(np.max(_UpperCAmelCase ) )
lowercase__: str = np.asmatrix(_UpperCAmelCase ).reshape(_UpperCAmelCase , _UpperCAmelCase )
featuremap_pooled.append(_UpperCAmelCase )
return featuremap_pooled
def _snake_case ( self , _UpperCAmelCase ):
# expanding three dimension data to one dimension list
lowercase__: Optional[Any] = []
for i in range(len(_UpperCAmelCase ) ):
lowercase__: Any = np.shape(data[i] )
lowercase__: List[Any] = data[i].reshape(1 , shapes[0] * shapes[1] )
lowercase__: List[str] = data_listed.getA().tolist()[0]
data_expanded.extend(_UpperCAmelCase )
lowercase__: List[str] = np.asarray(_UpperCAmelCase )
return data_expanded
def _snake_case ( self , _UpperCAmelCase ):
# expanding matrix to one dimension list
lowercase__: Union[str, Any] = np.asarray(_UpperCAmelCase )
lowercase__: List[str] = np.shape(_UpperCAmelCase )
lowercase__: List[Any] = data_mat.reshape(1 , shapes[0] * shapes[1] )
return data_expanded
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
lowercase__: str = []
lowercase__: List[str] = 0
for i_map in range(_UpperCAmelCase ):
lowercase__: Union[str, Any] = np.ones((size_map, size_map) )
for i in range(0 , _UpperCAmelCase , _UpperCAmelCase ):
for j in range(0 , _UpperCAmelCase , _UpperCAmelCase ):
lowercase__: Optional[Any] = pd_pool[
i_pool
]
lowercase__: List[Any] = i_pool + 1
lowercase__: str = np.multiply(
_UpperCAmelCase , np.multiply(out_map[i_map] , (1 - out_map[i_map]) ) )
pd_all.append(_UpperCAmelCase )
return pd_all
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=bool ):
# model traning
print('''----------------------Start Training-------------------------''' )
print((''' - - Shape: Train_Data ''', np.shape(_UpperCAmelCase )) )
print((''' - - Shape: Teach_Data ''', np.shape(_UpperCAmelCase )) )
lowercase__: Tuple = 0
lowercase__: Tuple = []
lowercase__: Optional[int] = 10000
while rp < n_repeat and mse >= error_accuracy:
lowercase__: Tuple = 0
print(F"""-------------Learning Time {rp}--------------""" )
for p in range(len(_UpperCAmelCase ) ):
# print('------------Learning Image: %d--------------'%p)
lowercase__: List[Any] = np.asmatrix(datas_train[p] )
lowercase__: Optional[int] = np.asarray(datas_teach[p] )
lowercase__, lowercase__: List[str] = self.convolute(
_UpperCAmelCase , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
lowercase__: Optional[int] = self.pooling(_UpperCAmelCase , self.size_poolinga )
lowercase__: int = np.shape(_UpperCAmelCase )
lowercase__: Optional[Any] = self._expand(_UpperCAmelCase )
lowercase__: Any = data_bp_input
lowercase__: Any = np.dot(_UpperCAmelCase , self.vji.T ) - self.thre_bpa
lowercase__: str = self.sig(_UpperCAmelCase )
lowercase__: Optional[Any] = np.dot(_UpperCAmelCase , self.wkj.T ) - self.thre_bpa
lowercase__: Dict = self.sig(_UpperCAmelCase )
# --------------Model Leaning ------------------------
# calculate error and gradient---------------
lowercase__: str = np.multiply(
(data_teach - bp_outa) , np.multiply(_UpperCAmelCase , (1 - bp_outa) ) )
lowercase__: str = np.multiply(
np.dot(_UpperCAmelCase , self.wkj ) , np.multiply(_UpperCAmelCase , (1 - bp_outa) ) )
lowercase__: Dict = np.dot(_UpperCAmelCase , self.vji )
lowercase__: Any = pd_i_all / (self.size_poolinga * self.size_poolinga)
lowercase__: List[str] = pd_conva_pooled.T.getA().tolist()
lowercase__: Optional[Any] = self._calculate_gradient_from_pool(
_UpperCAmelCase , _UpperCAmelCase , shape_featuremapa[0] , shape_featuremapa[1] , self.size_poolinga , )
# weight and threshold learning process---------
# convolution layer
for k_conv in range(self.conva[1] ):
lowercase__: str = self._expand_mat(pd_conva_all[k_conv] )
lowercase__: str = self.rate_weight * np.dot(_UpperCAmelCase , _UpperCAmelCase )
lowercase__: Any = self.w_conva[k_conv] + delta_w.reshape(
(self.conva[0], self.conva[0]) )
lowercase__: List[Any] = (
self.thre_conva[k_conv]
- np.sum(pd_conva_all[k_conv] ) * self.rate_thre
)
# all connected layer
lowercase__: Optional[int] = self.wkj + pd_k_all.T * bp_outa * self.rate_weight
lowercase__: List[Any] = self.vji + pd_j_all.T * bp_outa * self.rate_weight
lowercase__: List[str] = self.thre_bpa - pd_k_all * self.rate_thre
lowercase__: Optional[Any] = self.thre_bpa - pd_j_all * self.rate_thre
# calculate the sum error of all single image
lowercase__: Optional[Any] = np.sum(abs(data_teach - bp_outa ) )
error_count += errors
# print(' ----Teach ',data_teach)
# print(' ----BP_output ',bp_out3)
lowercase__: str = rp + 1
lowercase__: Optional[Any] = error_count / patterns
all_mse.append(_UpperCAmelCase )
def draw_error():
lowercase__: Union[str, Any] = [error_accuracy for i in range(int(n_repeat * 1.2 ) )]
plt.plot(_UpperCAmelCase , '''+-''' )
plt.plot(_UpperCAmelCase , '''r--''' )
plt.xlabel('''Learning Times''' )
plt.ylabel('''All_mse''' )
plt.grid(_UpperCAmelCase , alpha=0.5 )
plt.show()
print('''------------------Training Complished---------------------''' )
print((''' - - Training epoch: ''', rp, F""" - - Mse: {mse:.6f}""") )
if draw_e:
draw_error()
return mse
def _snake_case ( self , _UpperCAmelCase ):
# model predict
lowercase__: Union[str, Any] = []
print('''-------------------Start Testing-------------------------''' )
print((''' - - Shape: Test_Data ''', np.shape(_UpperCAmelCase )) )
for p in range(len(_UpperCAmelCase ) ):
lowercase__: Union[str, Any] = np.asmatrix(datas_test[p] )
lowercase__, lowercase__: Any = self.convolute(
_UpperCAmelCase , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
lowercase__: List[str] = self.pooling(_UpperCAmelCase , self.size_poolinga )
lowercase__: str = self._expand(_UpperCAmelCase )
lowercase__: List[Any] = data_bp_input
lowercase__: List[str] = bp_outa * self.vji.T - self.thre_bpa
lowercase__: Any = self.sig(_UpperCAmelCase )
lowercase__: Optional[int] = bp_outa * self.wkj.T - self.thre_bpa
lowercase__: Any = self.sig(_UpperCAmelCase )
produce_out.extend(bp_outa.getA().tolist() )
lowercase__: str = [list(map(self.do_round , _UpperCAmelCase ) ) for each in produce_out]
return np.asarray(_UpperCAmelCase )
def _snake_case ( self , _UpperCAmelCase ):
# return the data of image after convoluting process so we can check it out
lowercase__: int = np.asmatrix(_UpperCAmelCase )
lowercase__, lowercase__: Optional[int] = self.convolute(
_UpperCAmelCase , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
lowercase__: List[Any] = self.pooling(_UpperCAmelCase , self.size_poolinga )
return data_conveda, data_pooleda
if __name__ == "__main__":
pass
| 2 | 0 |
import argparse
import math
import traceback
import dateutil.parser as date_parser
import requests
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> List[str]:
lowercase__: Optional[int] = {}
lowercase__: List[str] = job['''started_at''']
lowercase__: Optional[int] = job['''completed_at''']
lowercase__: List[Any] = date_parser.parse(__UpperCAmelCase )
lowercase__: int = date_parser.parse(__UpperCAmelCase )
lowercase__: Dict = round((end_datetime - start_datetime).total_seconds() / 6_0.0 )
lowercase__: List[str] = start
lowercase__: List[str] = end
lowercase__: List[str] = duration_in_min
return job_info
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase=None ) -> Any:
lowercase__: Dict = None
if token is not None:
lowercase__: Dict = {'''Accept''': '''application/vnd.github+json''', '''Authorization''': F"""Bearer {token}"""}
lowercase__: Any = F"""https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100"""
lowercase__: Tuple = requests.get(__UpperCAmelCase , headers=__UpperCAmelCase ).json()
lowercase__: Optional[int] = {}
try:
job_time.update({job['''name''']: extract_time_from_single_job(__UpperCAmelCase ) for job in result['''jobs''']} )
lowercase__: Tuple = math.ceil((result['''total_count'''] - 1_0_0) / 1_0_0 )
for i in range(__UpperCAmelCase ):
lowercase__: List[Any] = requests.get(url + F"""&page={i + 2}""" , headers=__UpperCAmelCase ).json()
job_time.update({job['''name''']: extract_time_from_single_job(__UpperCAmelCase ) for job in result['''jobs''']} )
return job_time
except Exception:
print(F"""Unknown error, could not fetch links:\n{traceback.format_exc()}""" )
return {}
if __name__ == "__main__":
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--workflow_run_id", type=str, required=True, help="A GitHub Actions workflow run id.")
__A = parser.parse_args()
__A = get_job_time(args.workflow_run_id)
__A = dict(sorted(job_time.items(), key=lambda item: item[1]["duration"], reverse=True))
for k, v in job_time.items():
print(f'''{k}: {v['duration']}''')
| 356 | """simple docstring"""
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class UpperCAmelCase (_UpperCAmelCase ,unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase :Union[str, Any] = CTRLTokenizer
_UpperCAmelCase :Any = False
_UpperCAmelCase :List[Any] = False
def _snake_case ( self ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowercase__: Dict = ['''adapt''', '''re@@''', '''a@@''', '''apt''', '''c@@''', '''t''', '''<unk>''']
lowercase__: Any = dict(zip(_UpperCAmelCase , range(len(_UpperCAmelCase ) ) ) )
lowercase__: Optional[int] = ['''#version: 0.2''', '''a p''', '''ap t</w>''', '''r e''', '''a d''', '''ad apt</w>''', '''''']
lowercase__: Optional[Any] = {'''unk_token''': '''<unk>'''}
lowercase__: Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
lowercase__: int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_UpperCAmelCase ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(_UpperCAmelCase ) )
def _snake_case ( self , **_UpperCAmelCase ):
kwargs.update(self.special_tokens_map )
return CTRLTokenizer.from_pretrained(self.tmpdirname , **_UpperCAmelCase )
def _snake_case ( self , _UpperCAmelCase ):
lowercase__: Optional[int] = '''adapt react readapt apt'''
lowercase__: Optional[int] = '''adapt react readapt apt'''
return input_text, output_text
def _snake_case ( self ):
lowercase__: List[str] = CTRLTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
lowercase__: Optional[int] = '''adapt react readapt apt'''
lowercase__: Any = '''adapt re@@ a@@ c@@ t re@@ adapt apt'''.split()
lowercase__: Optional[Any] = tokenizer.tokenize(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
lowercase__: int = tokens + [tokenizer.unk_token]
lowercase__: str = [0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) , _UpperCAmelCase )
| 2 | 0 |
"""simple docstring"""
import copy
import tempfile
import unittest
from transformers import MaMaaaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from transformers.utils import cached_property
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaMaaaForConditionalGeneration, MaMaaaModel, MaMaaaTokenizer
from transformers.models.mam_aaa.modeling_mam_aaa import MaMaaaDecoder, MaMaaaEncoder
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , ) -> Optional[int]:
if attention_mask is None:
lowercase__: Optional[int] = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
lowercase__: List[Any] = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
lowercase__: Optional[Any] = torch.ones(config.encoder_layers , config.encoder_attention_heads , device=A__ )
if decoder_head_mask is None:
lowercase__: Optional[Any] = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=A__ )
if cross_attn_head_mask is None:
lowercase__: str = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=A__ )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
class UpperCAmelCase :
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=7 , _UpperCAmelCase=True , _UpperCAmelCase=False , _UpperCAmelCase=99 , _UpperCAmelCase=16 , _UpperCAmelCase=2 , _UpperCAmelCase=4 , _UpperCAmelCase=4 , _UpperCAmelCase="relu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.0 , _UpperCAmelCase=20 , _UpperCAmelCase=2 , _UpperCAmelCase=1 , _UpperCAmelCase=0 , ):
lowercase__: Optional[Any] = parent
lowercase__: Optional[Any] = batch_size
lowercase__: Optional[int] = seq_length
lowercase__: Union[str, Any] = is_training
lowercase__: Optional[int] = use_labels
lowercase__: List[str] = vocab_size
lowercase__: int = hidden_size
lowercase__: Tuple = num_hidden_layers
lowercase__: Tuple = num_attention_heads
lowercase__: Optional[int] = intermediate_size
lowercase__: List[Any] = hidden_act
lowercase__: str = hidden_dropout_prob
lowercase__: Any = attention_probs_dropout_prob
lowercase__: Tuple = encoder_layerdrop
lowercase__: str = decoder_layerdrop
lowercase__: Dict = max_position_embeddings
lowercase__: Any = eos_token_id
lowercase__: Tuple = pad_token_id
lowercase__: Union[str, Any] = bos_token_id
def _snake_case ( self ):
lowercase__: Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase__: str = self.eos_token_id # Eos Token
lowercase__: Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for M2M100 the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
lowercase__: List[str] = input_ids.clamp(self.pad_token_id + 1 )
lowercase__: int = decoder_input_ids.clamp(self.pad_token_id + 1 )
lowercase__: Dict = self.get_config()
lowercase__: Any = prepare_mam_aaa_inputs_dict(lowercase__ , lowercase__ , lowercase__ )
return config, inputs_dict
def _snake_case ( self ):
return MaMaaaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , encoder_layerdrop=self.encoder_layerdrop , decoder_layerdrop=self.decoder_layerdrop , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , )
def _snake_case ( self ):
lowercase__, lowercase__: Any = self.prepare_config_and_inputs()
return config, inputs_dict
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase ):
lowercase__: Optional[int] = MaMaaaModel(config=lowercase__ ).get_decoder().to(lowercase__ ).eval()
lowercase__: Optional[Any] = inputs_dict['''input_ids''']
lowercase__: Optional[int] = inputs_dict['''attention_mask''']
lowercase__: Union[str, Any] = inputs_dict['''head_mask''']
# first forward pass
lowercase__: Dict = model(lowercase__ , attention_mask=lowercase__ , head_mask=lowercase__ , use_cache=lowercase__ )
lowercase__, lowercase__: Any = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
lowercase__: List[Any] = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowercase__: List[Any] = ids_tensor((self.batch_size, 3) , 2 )
# append to next input_ids and
lowercase__: List[Any] = torch.cat([input_ids, next_tokens] , dim=-1 )
lowercase__: Tuple = torch.cat([attention_mask, next_attn_mask] , dim=-1 )
lowercase__: str = model(lowercase__ , attention_mask=lowercase__ )['''last_hidden_state''']
lowercase__: Optional[int] = model(lowercase__ , attention_mask=lowercase__ , past_key_values=lowercase__ )[
'''last_hidden_state'''
]
# select random slice
lowercase__: Tuple = ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowercase__: Any = output_from_no_past[:, -3:, random_slice_idx].detach()
lowercase__: Any = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowercase__ , lowercase__ , atol=1e-2 ) )
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase ):
lowercase__: Any = MaMaaaModel(config=lowercase__ ).to(lowercase__ ).eval()
lowercase__: Tuple = model(**lowercase__ )
lowercase__: Dict = outputs.encoder_last_hidden_state
lowercase__: Dict = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase__: int = model.get_encoder()
encoder.save_pretrained(lowercase__ )
lowercase__: List[str] = MaMaaaEncoder.from_pretrained(lowercase__ ).to(lowercase__ )
lowercase__: int = encoder(inputs_dict['''input_ids'''] , attention_mask=inputs_dict['''attention_mask'''] )[
0
]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1e-3 )
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase__: str = model.get_decoder()
decoder.save_pretrained(lowercase__ )
lowercase__: str = MaMaaaDecoder.from_pretrained(lowercase__ ).to(lowercase__ )
lowercase__: List[str] = decoder(
input_ids=inputs_dict['''decoder_input_ids'''] , attention_mask=inputs_dict['''decoder_attention_mask'''] , encoder_hidden_states=lowercase__ , encoder_attention_mask=inputs_dict['''attention_mask'''] , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1e-3 )
@require_torch
class UpperCAmelCase (lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase :Dict = (
(
MaMaaaModel,
MaMaaaForConditionalGeneration,
)
if is_torch_available()
else ()
)
_UpperCAmelCase :Union[str, Any] = (MaMaaaForConditionalGeneration,) if is_torch_available() else ()
_UpperCAmelCase :Optional[int] = (
{
'conversational': MaMaaaForConditionalGeneration,
'feature-extraction': MaMaaaModel,
'summarization': MaMaaaForConditionalGeneration,
'text2text-generation': MaMaaaForConditionalGeneration,
'translation': MaMaaaForConditionalGeneration,
}
if is_torch_available()
else {}
)
_UpperCAmelCase :int = True
_UpperCAmelCase :List[Any] = True
_UpperCAmelCase :List[Any] = False
_UpperCAmelCase :Dict = False
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
if pipeline_test_casse_name == "TranslationPipelineTests":
# Get `ValueError: Translation requires a `src_lang` and a `tgt_lang` for this model`.
# `M2M100Config` was never used in pipeline tests: cannot create a simple tokenizer.
return True
return False
def _snake_case ( self ):
lowercase__: Optional[Any] = MaMaaaModelTester(self )
lowercase__: str = ConfigTester(self , config_class=lowercase__ )
def _snake_case ( self ):
self.config_tester.run_common_tests()
def _snake_case ( self ):
lowercase__, lowercase__: Union[str, Any] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
lowercase__: Optional[Any] = model_class(lowercase__ )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowercase__ )
lowercase__, lowercase__: Union[str, Any] = model_class.from_pretrained(lowercase__ , output_loading_info=lowercase__ )
self.assertEqual(info['''missing_keys'''] , [] )
def _snake_case ( self ):
lowercase__: Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*lowercase__ )
def _snake_case ( self ):
lowercase__: Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*lowercase__ )
def _snake_case ( self ):
lowercase__, lowercase__: List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in (MaMaaaModel, MaMaaaForConditionalGeneration):
lowercase__: Any = model_class(lowercase__ )
model.to(lowercase__ )
model.eval()
lowercase__: List[str] = copy.deepcopy(self._prepare_for_class(lowercase__ , lowercase__ ) )
if not self.is_encoder_decoder:
lowercase__: Optional[int] = inputs['''input_ids''']
del inputs["input_ids"]
else:
lowercase__: Optional[Any] = inputs['''input_ids''']
lowercase__: Dict = inputs.get('''decoder_input_ids''' , lowercase__ )
del inputs["input_ids"]
inputs.pop('''decoder_input_ids''' , lowercase__ )
lowercase__: List[str] = model.get_input_embeddings()
if not self.is_encoder_decoder:
lowercase__: Dict = wte(lowercase__ )
else:
lowercase__: Tuple = wte(lowercase__ )
lowercase__: Union[str, Any] = wte(lowercase__ )
with torch.no_grad():
model(**lowercase__ )[0]
def _snake_case ( self ):
lowercase__, lowercase__: Optional[Any] = self.model_tester.prepare_config_and_inputs()
lowercase__: Optional[int] = input_dict['''input_ids''']
lowercase__: Any = input_ids.ne(1 ).to(lowercase__ )
lowercase__: Dict = MaMaaaForConditionalGeneration(lowercase__ ).eval().to(lowercase__ )
if torch_device == "cuda":
model.half()
model.generate(lowercase__ , attention_mask=lowercase__ )
model.generate(num_beams=4 , do_sample=lowercase__ , early_stopping=lowercase__ , num_return_sequences=3 )
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> List[Any]:
return torch.tensor(A__ , dtype=torch.long , device=A__ )
__A = 1E-4
@require_torch
@require_sentencepiece
@require_tokenizers
@slow
class UpperCAmelCase (unittest.TestCase ):
"""simple docstring"""
@cached_property
def _snake_case ( self ):
return MaMaaaTokenizer.from_pretrained('''facebook/m2m100_418M''' )
def _snake_case ( self ):
lowercase__: Union[str, Any] = MaMaaaModel.from_pretrained('''facebook/m2m100_418M''' ).to(lowercase__ )
lowercase__: Tuple = _long_tensor([[128028, 98, 12, 30527, 2732, 159, 7755, 61904, 39144, 38, 2]] )
lowercase__: List[str] = _long_tensor([[2, 128028, 98, 12, 30527, 2732, 159, 7755, 61904, 39144, 38]] )
lowercase__: str = prepare_mam_aaa_inputs_dict(model.config , lowercase__ , lowercase__ )
with torch.no_grad():
lowercase__: int = model(**lowercase__ )[0]
lowercase__: int = torch.Size((1, 11, 1024) )
self.assertEqual(output.shape , lowercase__ )
# change to expected output here
lowercase__: List[Any] = torch.tensor(
[[-0.7_780, -0.1_676, 0.1_038], [-6.7_556, -1.3_992, 0.0_567], [-7.5_383, -0.5_920, -0.2_779]] , device=lowercase__ )
self.assertTrue(torch.allclose(output[:, :3, :3] , lowercase__ , atol=lowercase__ ) )
def _snake_case ( self ):
lowercase__: int = MaMaaaForConditionalGeneration.from_pretrained('''facebook/m2m100_418M''' ).to(lowercase__ )
# change to intended input
lowercase__: List[Any] = _long_tensor([[128028, 98, 12, 30527, 2732, 159, 7755, 61904, 39144, 38, 2]] )
lowercase__: Optional[int] = _long_tensor([[2, 128028, 98, 12, 30527, 2732, 159, 7755, 61904, 39144, 38]] )
lowercase__: Any = prepare_mam_aaa_inputs_dict(model.config , lowercase__ , lowercase__ )
with torch.no_grad():
lowercase__: Tuple = model(**lowercase__ )[0]
lowercase__: Any = torch.Size((1, 11, model.config.vocab_size) )
self.assertEqual(output.shape , lowercase__ )
# change to expected output here
lowercase__: int = torch.tensor(
[[-1.0_448, -1.0_411, 3.7_992], [-3.2_191, -3.2_386, -1.3_451], [-3.6_210, -3.5_993, 0.4_925]] , device=lowercase__ )
self.assertTrue(torch.allclose(output[:, :3, :3] , lowercase__ , atol=lowercase__ ) )
def _snake_case ( self ):
lowercase__: Tuple = MaMaaaForConditionalGeneration.from_pretrained('''facebook/m2m100_418M''' ).to(lowercase__ )
lowercase__: str = MaMaaaTokenizer.from_pretrained('''facebook/m2m100_418M''' , src_lang='''fr''' , tgt_lang='''en''' )
lowercase__: Optional[Any] = [
'''L\'affaire NSA souligne l\'absence totale de débat sur le renseignement''',
'''Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.''',
'''Lorsque François Hollande téléphone à Barack Obama ou quand le ministre des affaires étrangères Laurent'''
''' Fabius convoque l\'ambassadeur des Etats-Unis, ils réagissent à une vraie découverte, qui est celle de'''
''' l\'ampleur de la surveillance américaine sur l\'ensemble des communications en France.''',
]
# The below article tests that we don't add any hypotheses outside of the top n_beams
lowercase__: Optional[int] = tokenizer(lowercase__ , padding=lowercase__ , return_tensors='''pt''' )
lowercase__: Optional[Any] = model.generate(
input_ids=dct['''input_ids'''].to(lowercase__ ) , attention_mask=dct['''attention_mask'''].to(lowercase__ ) , num_beams=5 , forced_bos_token_id=tokenizer.get_lang_id('''en''' ) , )
lowercase__: Any = [
'''The NSA case highlights the total absence of intelligence debate''',
'''I think there are two levels of response from the French government.''',
'''When François Hollande calls Barack Obama or when Foreign Minister Laurent Fabius calls the U.S.'''
''' Ambassador, they respond to a real discovery, which is that of the scale of U.S. surveillance on all'''
''' communications in France.''',
]
lowercase__: Optional[Any] = tokenizer.batch_decode(
hypotheses_batch.tolist() , clean_up_tokenization_spaces=lowercase__ , skip_special_tokens=lowercase__ )
assert generated == expected_en
| 357 | """simple docstring"""
import os
import re
import shutil
from argparse import ArgumentParser, Namespace
from datasets.commands import BaseDatasetsCLICommand
from datasets.utils.logging import get_logger
__A = "<<<<<<< This should probably be modified because it mentions: "
__A = "=======\n>>>>>>>\n"
__A = [
"TextEncoderConfig",
"ByteTextEncoder",
"SubwordTextEncoder",
"encoder_config",
"maybe_build_from_corpus",
"manual_dir",
]
__A = [
# (pattern, replacement)
# Order is important here for some replacements
(R"tfds\.core", R"datasets"),
(R"tf\.io\.gfile\.GFile", R"open"),
(R"tf\.([\w\d]+)", R"datasets.Value('\1')"),
(R"tfds\.features\.Text\(\)", R"datasets.Value('string')"),
(R"tfds\.features\.Text\(", R"datasets.Value('string'),"),
(R"features\s*=\s*tfds.features.FeaturesDict\(", R"features=datasets.Features("),
(R"tfds\.features\.FeaturesDict\(", R"dict("),
(R"The TensorFlow Datasets Authors", R"The TensorFlow Datasets Authors and the HuggingFace Datasets Authors"),
(R"tfds\.", R"datasets."),
(R"dl_manager\.manual_dir", R"self.config.data_dir"),
(R"self\.builder_config", R"self.config"),
]
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> Tuple:
return ConvertCommand(args.tfds_path , args.datasets_directory )
class UpperCAmelCase (_UpperCAmelCase ):
"""simple docstring"""
@staticmethod
def _snake_case ( _UpperCAmelCase ):
lowercase__: int = parser.add_parser(
'''convert''' , help='''Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset.''' , )
train_parser.add_argument(
'''--tfds_path''' , type=_UpperCAmelCase , required=_UpperCAmelCase , help='''Path to a TensorFlow Datasets folder to convert or a single tfds file to convert.''' , )
train_parser.add_argument(
'''--datasets_directory''' , type=_UpperCAmelCase , required=_UpperCAmelCase , help='''Path to the HuggingFace Datasets folder.''' )
train_parser.set_defaults(func=_UpperCAmelCase )
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , *_UpperCAmelCase ):
lowercase__: List[str] = get_logger('''datasets-cli/converting''' )
lowercase__: Optional[Any] = tfds_path
lowercase__: Dict = datasets_directory
def _snake_case ( self ):
if os.path.isdir(self._tfds_path ):
lowercase__: Optional[Any] = os.path.abspath(self._tfds_path )
elif os.path.isfile(self._tfds_path ):
lowercase__: Optional[int] = os.path.dirname(self._tfds_path )
else:
raise ValueError('''--tfds_path is neither a directory nor a file. Please check path.''' )
lowercase__: int = os.path.abspath(self._datasets_directory )
self._logger.info(F"""Converting datasets from {abs_tfds_path} to {abs_datasets_path}""" )
lowercase__: Tuple = []
lowercase__: Dict = []
lowercase__: Any = {}
if os.path.isdir(self._tfds_path ):
lowercase__: Dict = os.listdir(_UpperCAmelCase )
else:
lowercase__: Dict = [os.path.basename(self._tfds_path )]
for f_name in file_names:
self._logger.info(F"""Looking at file {f_name}""" )
lowercase__: Tuple = os.path.join(_UpperCAmelCase , _UpperCAmelCase )
lowercase__: Optional[int] = os.path.join(_UpperCAmelCase , _UpperCAmelCase )
if not os.path.isfile(_UpperCAmelCase ) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name:
self._logger.info('''Skipping file''' )
continue
with open(_UpperCAmelCase , encoding='''utf-8''' ) as f:
lowercase__: Tuple = f.readlines()
lowercase__: Optional[Any] = []
lowercase__: Dict = False
lowercase__: List[str] = False
lowercase__: List[Any] = []
for line in lines:
lowercase__: List[str] = line
# Convert imports
if "import tensorflow.compat.v2 as tf" in out_line:
continue
elif "@tfds.core" in out_line:
continue
elif "builder=self" in out_line:
continue
elif "import tensorflow_datasets.public_api as tfds" in out_line:
lowercase__: Optional[int] = '''import datasets\n'''
elif "import tensorflow" in out_line:
# order is important here
lowercase__: Dict = ''''''
continue
elif "from absl import logging" in out_line:
lowercase__: Tuple = '''from datasets import logging\n'''
elif "getLogger" in out_line:
lowercase__: Optional[Any] = out_line.replace('''getLogger''' , '''get_logger''' )
elif any(expression in out_line for expression in TO_HIGHLIGHT ):
lowercase__: Any = True
lowercase__: str = list(filter(lambda _UpperCAmelCase : e in out_line , _UpperCAmelCase ) )
out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(_UpperCAmelCase ) + '''\n''' )
out_lines.append(_UpperCAmelCase )
out_lines.append(_UpperCAmelCase )
continue
else:
for pattern, replacement in TO_CONVERT:
lowercase__: List[Any] = re.sub(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# Take care of saving utilities (to later move them together with main script)
if "tensorflow_datasets" in out_line:
lowercase__: Any = re.match(r'''from\stensorflow_datasets.*import\s([^\.\r\n]+)''' , _UpperCAmelCase )
tfds_imports.extend(imp.strip() for imp in match.group(1 ).split(''',''' ) )
lowercase__: List[str] = '''from . import ''' + match.group(1 )
# Check we have not forget anything
if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line:
raise ValueError(F"""Error converting {out_line.strip()}""" )
if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line:
lowercase__: Optional[Any] = True
out_lines.append(_UpperCAmelCase )
if is_builder or "wmt" in f_name:
# We create a new directory for each dataset
lowercase__: Dict = f_name.replace('''.py''' , '''''' )
lowercase__: Dict = os.path.join(_UpperCAmelCase , _UpperCAmelCase )
lowercase__: Optional[Any] = os.path.join(_UpperCAmelCase , _UpperCAmelCase )
os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase )
self._logger.info(F"""Adding directory {output_dir}""" )
imports_to_builder_map.update({imp: output_dir for imp in tfds_imports} )
else:
# Utilities will be moved at the end
utils_files.append(_UpperCAmelCase )
if needs_manual_update:
with_manual_update.append(_UpperCAmelCase )
with open(_UpperCAmelCase , '''w''' , encoding='''utf-8''' ) as f:
f.writelines(_UpperCAmelCase )
self._logger.info(F"""Converted in {output_file}""" )
for utils_file in utils_files:
try:
lowercase__: str = os.path.basename(_UpperCAmelCase )
lowercase__: Union[str, Any] = imports_to_builder_map[f_name.replace('''.py''' , '''''' )]
self._logger.info(F"""Moving {dest_folder} to {utils_file}""" )
shutil.copy(_UpperCAmelCase , _UpperCAmelCase )
except KeyError:
self._logger.error(F"""Cannot find destination folder for {utils_file}. Please copy manually.""" )
if with_manual_update:
for file_path in with_manual_update:
self._logger.warning(
F"""You need to manually update file {file_path} to remove configurations using 'TextEncoderConfig'.""" )
| 2 | 0 |
import copy
import unittest
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
)
from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class UpperCAmelCase :
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=2 , _UpperCAmelCase=3 , _UpperCAmelCase=4 , _UpperCAmelCase=2 , _UpperCAmelCase=7 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=99 , _UpperCAmelCase=36 , _UpperCAmelCase=3 , _UpperCAmelCase=4 , _UpperCAmelCase=37 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=512 , _UpperCAmelCase=16 , _UpperCAmelCase=2 , _UpperCAmelCase=0.02 , _UpperCAmelCase=6 , _UpperCAmelCase=6 , _UpperCAmelCase=3 , _UpperCAmelCase=4 , _UpperCAmelCase=None , _UpperCAmelCase=1000 , ):
lowercase__: Optional[Any] = parent
lowercase__: str = batch_size
lowercase__: Optional[int] = num_channels
lowercase__: str = image_size
lowercase__: Optional[int] = patch_size
lowercase__: Any = text_seq_length
lowercase__: List[str] = is_training
lowercase__: Optional[int] = use_input_mask
lowercase__: Any = use_token_type_ids
lowercase__: int = use_labels
lowercase__: List[Any] = vocab_size
lowercase__: Tuple = hidden_size
lowercase__: Optional[int] = num_hidden_layers
lowercase__: List[str] = num_attention_heads
lowercase__: Optional[int] = intermediate_size
lowercase__: str = hidden_act
lowercase__: List[Any] = hidden_dropout_prob
lowercase__: int = attention_probs_dropout_prob
lowercase__: List[Any] = max_position_embeddings
lowercase__: Any = type_vocab_size
lowercase__: List[str] = type_sequence_label_size
lowercase__: Optional[Any] = initializer_range
lowercase__: Optional[Any] = coordinate_size
lowercase__: List[str] = shape_size
lowercase__: Optional[int] = num_labels
lowercase__: List[str] = num_choices
lowercase__: List[str] = scope
lowercase__: Union[str, Any] = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
lowercase__: Tuple = text_seq_length
lowercase__: Union[str, Any] = (image_size // patch_size) ** 2 + 1
lowercase__: Dict = self.text_seq_length + self.image_seq_length
def _snake_case ( self ):
lowercase__: Tuple = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
lowercase__: Union[str, Any] = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
lowercase__: Tuple = bbox[i, j, 3]
lowercase__: Optional[Any] = bbox[i, j, 1]
lowercase__: List[str] = t
if bbox[i, j, 2] < bbox[i, j, 0]:
lowercase__: str = bbox[i, j, 2]
lowercase__: Optional[int] = bbox[i, j, 0]
lowercase__: str = t
lowercase__: List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase__: List[Any] = None
if self.use_input_mask:
lowercase__: Optional[int] = random_attention_mask([self.batch_size, self.text_seq_length] )
lowercase__: str = None
if self.use_token_type_ids:
lowercase__: str = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
lowercase__: List[str] = None
lowercase__: List[str] = None
if self.use_labels:
lowercase__: str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase__: List[str] = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
lowercase__: Dict = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
lowercase__: str = LayoutLMvaModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
# text + image
lowercase__: Dict = model(_UpperCAmelCase , pixel_values=_UpperCAmelCase )
lowercase__: Tuple = model(
_UpperCAmelCase , bbox=_UpperCAmelCase , pixel_values=_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase )
lowercase__: Optional[Any] = model(_UpperCAmelCase , bbox=_UpperCAmelCase , pixel_values=_UpperCAmelCase , token_type_ids=_UpperCAmelCase )
lowercase__: Tuple = model(_UpperCAmelCase , bbox=_UpperCAmelCase , pixel_values=_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
lowercase__: List[str] = model(_UpperCAmelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
lowercase__: Any = model(pixel_values=_UpperCAmelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
lowercase__: Tuple = self.num_labels
lowercase__: Tuple = LayoutLMvaForSequenceClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
lowercase__: List[str] = model(
_UpperCAmelCase , bbox=_UpperCAmelCase , pixel_values=_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
lowercase__: Union[str, Any] = self.num_labels
lowercase__: str = LayoutLMvaForTokenClassification(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
lowercase__: List[Any] = model(
_UpperCAmelCase , bbox=_UpperCAmelCase , pixel_values=_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
lowercase__: Tuple = LayoutLMvaForQuestionAnswering(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
lowercase__: Tuple = model(
_UpperCAmelCase , bbox=_UpperCAmelCase , pixel_values=_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , start_positions=_UpperCAmelCase , end_positions=_UpperCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _snake_case ( self ):
lowercase__: Optional[Any] = self.prepare_config_and_inputs()
(
lowercase__
): int = config_and_inputs
lowercase__: int = {
'input_ids': input_ids,
'bbox': bbox,
'pixel_values': pixel_values,
'token_type_ids': token_type_ids,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_torch
class UpperCAmelCase (SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase :List[Any] = False
_UpperCAmelCase :Union[str, Any] = False
_UpperCAmelCase :Tuple = False
_UpperCAmelCase :Union[str, Any] = (
(
LayoutLMvaModel,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaForQuestionAnswering,
)
if is_torch_available()
else ()
)
_UpperCAmelCase :Tuple = (
{"document-question-answering": LayoutLMvaForQuestionAnswering, "feature-extraction": LayoutLMvaModel}
if is_torch_available()
else {}
)
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
# `DocumentQuestionAnsweringPipeline` is expected to work with this model, but it combines the text and visual
# embedding along the sequence dimension (dim 1), which causes an error during post-processing as `p_mask` has
# the sequence dimension of the text embedding only.
# (see the line `embedding_output = torch.cat([embedding_output, visual_embeddings], dim=1)`)
return True
def _snake_case ( self ):
lowercase__: Optional[int] = LayoutLMvaModelTester(self )
lowercase__: Union[str, Any] = ConfigTester(self , config_class=_UpperCAmelCase , hidden_size=37 )
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=False ):
lowercase__: Tuple = copy.deepcopy(_UpperCAmelCase )
if model_class in get_values(_UpperCAmelCase ):
lowercase__: Dict = {
k: v.unsqueeze(1 ).expand(-1 , self.model_tester.num_choices , -1 ).contiguous()
if isinstance(_UpperCAmelCase , torch.Tensor ) and v.ndim > 1
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(_UpperCAmelCase ):
lowercase__: str = torch.ones(self.model_tester.batch_size , dtype=torch.long , device=_UpperCAmelCase )
elif model_class in get_values(_UpperCAmelCase ):
lowercase__: str = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_UpperCAmelCase )
lowercase__: Dict = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_UpperCAmelCase )
elif model_class in [
*get_values(_UpperCAmelCase ),
]:
lowercase__: Optional[Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_UpperCAmelCase )
elif model_class in [
*get_values(_UpperCAmelCase ),
]:
lowercase__: Optional[int] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=torch.long , device=_UpperCAmelCase , )
return inputs_dict
def _snake_case ( self ):
self.config_tester.run_common_tests()
def _snake_case ( self ):
lowercase__: Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def _snake_case ( self ):
lowercase__: int = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowercase__: int = type
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def _snake_case ( self ):
lowercase__: List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_UpperCAmelCase )
def _snake_case ( self ):
lowercase__: Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_UpperCAmelCase )
def _snake_case ( self ):
lowercase__: List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_UpperCAmelCase )
@slow
def _snake_case ( self ):
for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__: List[Any] = LayoutLMvaModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( ) -> Optional[Any]:
lowercase__: List[Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
class UpperCAmelCase (unittest.TestCase ):
"""simple docstring"""
@cached_property
def _snake_case ( self ):
return LayoutLMvaImageProcessor(apply_ocr=_UpperCAmelCase ) if is_vision_available() else None
@slow
def _snake_case ( self ):
lowercase__: Optional[Any] = LayoutLMvaModel.from_pretrained('''microsoft/layoutlmv3-base''' ).to(_UpperCAmelCase )
lowercase__: Optional[Any] = self.default_image_processor
lowercase__: List[Any] = prepare_img()
lowercase__: Tuple = image_processor(images=_UpperCAmelCase , return_tensors='''pt''' ).pixel_values.to(_UpperCAmelCase )
lowercase__: str = torch.tensor([[1, 2]] )
lowercase__: List[str] = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]] ).unsqueeze(0 )
# forward pass
lowercase__: Optional[int] = model(
input_ids=input_ids.to(_UpperCAmelCase ) , bbox=bbox.to(_UpperCAmelCase ) , pixel_values=pixel_values.to(_UpperCAmelCase ) , )
# verify the logits
lowercase__: Tuple = torch.Size((1, 199, 768) )
self.assertEqual(outputs.last_hidden_state.shape , _UpperCAmelCase )
lowercase__: int = torch.tensor(
[[-0.0_529, 0.3_618, 0.1_632], [-0.1_587, -0.1_667, -0.0_400], [-0.1_557, -0.1_671, -0.0_505]] ).to(_UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , _UpperCAmelCase , atol=1e-4 ) )
| 358 | """simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
"microsoft/cvt-13": "https://huggingface.co/microsoft/cvt-13/resolve/main/config.json",
# See all Cvt models at https://huggingface.co/models?filter=cvt
}
class UpperCAmelCase (_UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase :Tuple = "cvt"
def __init__( self , _UpperCAmelCase=3 , _UpperCAmelCase=[7, 3, 3] , _UpperCAmelCase=[4, 2, 2] , _UpperCAmelCase=[2, 1, 1] , _UpperCAmelCase=[64, 192, 384] , _UpperCAmelCase=[1, 3, 6] , _UpperCAmelCase=[1, 2, 10] , _UpperCAmelCase=[4.0, 4.0, 4.0] , _UpperCAmelCase=[0.0, 0.0, 0.0] , _UpperCAmelCase=[0.0, 0.0, 0.0] , _UpperCAmelCase=[0.0, 0.0, 0.1] , _UpperCAmelCase=[True, True, True] , _UpperCAmelCase=[False, False, True] , _UpperCAmelCase=["dw_bn", "dw_bn", "dw_bn"] , _UpperCAmelCase=[3, 3, 3] , _UpperCAmelCase=[1, 1, 1] , _UpperCAmelCase=[2, 2, 2] , _UpperCAmelCase=[1, 1, 1] , _UpperCAmelCase=[1, 1, 1] , _UpperCAmelCase=0.02 , _UpperCAmelCase=1e-1_2 , **_UpperCAmelCase , ):
super().__init__(**_UpperCAmelCase )
lowercase__: Dict = num_channels
lowercase__: str = patch_sizes
lowercase__: Optional[Any] = patch_stride
lowercase__: List[str] = patch_padding
lowercase__: Optional[Any] = embed_dim
lowercase__: Optional[int] = num_heads
lowercase__: Any = depth
lowercase__: str = mlp_ratio
lowercase__: Any = attention_drop_rate
lowercase__: Any = drop_rate
lowercase__: Optional[Any] = drop_path_rate
lowercase__: Dict = qkv_bias
lowercase__: Dict = cls_token
lowercase__: Any = qkv_projection_method
lowercase__: List[str] = kernel_qkv
lowercase__: Union[str, Any] = padding_kv
lowercase__: Optional[int] = stride_kv
lowercase__: int = padding_q
lowercase__: Dict = stride_q
lowercase__: Any = initializer_range
lowercase__: Union[str, Any] = layer_norm_eps
| 2 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
"bigcode/gpt_bigcode-santacoder": "https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json",
}
class UpperCAmelCase (_lowercase ):
"""simple docstring"""
_UpperCAmelCase :Dict = "gpt_bigcode"
_UpperCAmelCase :Optional[int] = ["past_key_values"]
_UpperCAmelCase :Tuple = {
"hidden_size": "n_embd",
"max_position_embeddings": "n_positions",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self , _UpperCAmelCase=50257 , _UpperCAmelCase=1024 , _UpperCAmelCase=768 , _UpperCAmelCase=12 , _UpperCAmelCase=12 , _UpperCAmelCase=None , _UpperCAmelCase="gelu_pytorch_tanh" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=1e-5 , _UpperCAmelCase=0.02 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=50256 , _UpperCAmelCase=50256 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , **_UpperCAmelCase , ):
lowercase__: Any = vocab_size
lowercase__: Dict = n_positions
lowercase__: str = n_embd
lowercase__: Optional[int] = n_layer
lowercase__: List[Any] = n_head
lowercase__: Dict = n_inner
lowercase__: Any = activation_function
lowercase__: Optional[Any] = resid_pdrop
lowercase__: Optional[int] = embd_pdrop
lowercase__: Dict = attn_pdrop
lowercase__: Union[str, Any] = layer_norm_epsilon
lowercase__: List[str] = initializer_range
lowercase__: int = scale_attn_weights
lowercase__: int = use_cache
lowercase__: Tuple = attention_softmax_in_fpaa
lowercase__: Dict = scale_attention_softmax_in_fpaa
lowercase__: Dict = multi_query
lowercase__: List[Any] = bos_token_id
lowercase__: Dict = eos_token_id
super().__init__(bos_token_id=__UpperCamelCase , eos_token_id=__UpperCamelCase , **__UpperCamelCase )
| 359 | """simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import add_start_docstrings
__A = R"\n [`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and\n can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.\n\n Args:\n title_sep (`str`, *optional*, defaults to `\" / \"`):\n Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].\n doc_sep (`str`, *optional*, defaults to `\" // \"`):\n Separator inserted between the text of the retrieved document and the original input when calling\n [`RagRetriever`].\n n_docs (`int`, *optional*, defaults to 5):\n Number of documents to retrieve.\n max_combined_length (`int`, *optional*, defaults to 300):\n Max length of contextualized input returned by [`~RagRetriever.__call__`].\n retrieval_vector_size (`int`, *optional*, defaults to 768):\n Dimensionality of the document embeddings indexed by [`RagRetriever`].\n retrieval_batch_size (`int`, *optional*, defaults to 8):\n Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated\n [`RagRetriever`].\n dataset (`str`, *optional*, defaults to `\"wiki_dpr\"`):\n A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids\n using `datasets.list_datasets()`).\n dataset_split (`str`, *optional*, defaults to `\"train\"`)\n Which split of the `dataset` to load.\n index_name (`str`, *optional*, defaults to `\"compressed\"`)\n The index name of the index associated with the `dataset`. One can choose between `\"legacy\"`, `\"exact\"` and\n `\"compressed\"`.\n index_path (`str`, *optional*)\n The path to the serialized faiss index on disk.\n passages_path (`str`, *optional*):\n A path to text passages compatible with the faiss index. Required if using\n [`~models.rag.retrieval_rag.LegacyIndex`]\n use_dummy_dataset (`bool`, *optional*, defaults to `False`)\n Whether to load a \"dummy\" variant of the dataset specified by `dataset`.\n label_smoothing (`float`, *optional*, defaults to 0.0):\n Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing\n in the loss calculation. If set to 0, no label smoothing is performed.\n do_marginalize (`bool`, *optional*, defaults to `False`):\n If `True`, the logits are marginalized over all documents by making use of\n `torch.nn.functional.log_softmax`.\n reduce_loss (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.\n do_deduplication (`bool`, *optional*, defaults to `True`):\n Whether or not to deduplicate the generations from different context documents for a given input. Has to be\n set to `False` if used while training with distributed backend.\n exclude_bos_score (`bool`, *optional*, defaults to `False`):\n Whether or not to disregard the BOS token when computing the loss.\n output_retrieved(`bool`, *optional*, defaults to `False`):\n If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and\n `context_attention_mask` are returned. See returned tensors for more detail.\n use_cache (`bool`, *optional*, defaults to `True`):\n Whether or not the model should return the last key/values attentions (not used by all models).\n forced_eos_token_id (`int`, *optional*):\n The id of the token to force as the last generated token when `max_length` is reached. Usually set to\n `eos_token_id`.\n"
@add_start_docstrings(_UpperCAmelCase )
class UpperCAmelCase (_UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase :Optional[int] = "rag"
_UpperCAmelCase :List[Any] = True
def __init__( self , _UpperCAmelCase=None , _UpperCAmelCase=True , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=" / " , _UpperCAmelCase=" // " , _UpperCAmelCase=5 , _UpperCAmelCase=300 , _UpperCAmelCase=768 , _UpperCAmelCase=8 , _UpperCAmelCase="wiki_dpr" , _UpperCAmelCase="train" , _UpperCAmelCase="compressed" , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=0.0 , _UpperCAmelCase=True , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=True , _UpperCAmelCase=None , **_UpperCAmelCase , ):
super().__init__(
bos_token_id=_UpperCAmelCase , pad_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , decoder_start_token_id=_UpperCAmelCase , forced_eos_token_id=_UpperCAmelCase , is_encoder_decoder=_UpperCAmelCase , prefix=_UpperCAmelCase , vocab_size=_UpperCAmelCase , **_UpperCAmelCase , )
assert (
"question_encoder" in kwargs and "generator" in kwargs
), "Config has to be initialized with question_encoder and generator config"
lowercase__: Optional[Any] = kwargs.pop('''question_encoder''' )
lowercase__: Any = question_encoder_config.pop('''model_type''' )
lowercase__: Tuple = kwargs.pop('''generator''' )
lowercase__: Union[str, Any] = decoder_config.pop('''model_type''' )
from ..auto.configuration_auto import AutoConfig
lowercase__: Optional[int] = AutoConfig.for_model(_UpperCAmelCase , **_UpperCAmelCase )
lowercase__: Any = AutoConfig.for_model(_UpperCAmelCase , **_UpperCAmelCase )
lowercase__: str = reduce_loss
lowercase__: str = label_smoothing
lowercase__: Dict = exclude_bos_score
lowercase__: Any = do_marginalize
lowercase__: Optional[int] = title_sep
lowercase__: Any = doc_sep
lowercase__: Any = n_docs
lowercase__: List[Any] = max_combined_length
lowercase__: int = dataset
lowercase__: int = dataset_split
lowercase__: str = index_name
lowercase__: Dict = retrieval_vector_size
lowercase__: Dict = retrieval_batch_size
lowercase__: List[str] = passages_path
lowercase__: str = index_path
lowercase__: Optional[Any] = use_dummy_dataset
lowercase__: str = output_retrieved
lowercase__: List[str] = do_deduplication
lowercase__: List[Any] = use_cache
if self.forced_eos_token_id is None:
lowercase__: int = getattr(self.generator , '''forced_eos_token_id''' , _UpperCAmelCase )
@classmethod
def _snake_case ( cls , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ):
return cls(question_encoder=question_encoder_config.to_dict() , generator=generator_config.to_dict() , **_UpperCAmelCase )
def _snake_case ( self ):
lowercase__: List[str] = copy.deepcopy(self.__dict__ )
lowercase__: str = self.question_encoder.to_dict()
lowercase__: str = self.generator.to_dict()
lowercase__: str = self.__class__.model_type
return output
| 2 | 0 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import EsmConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers.models.esm.modeling_tf_esm import (
TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
TFEsmModel,
)
class UpperCAmelCase :
"""simple docstring"""
def __init__( self , _UpperCAmelCase , ):
lowercase__ = parent
lowercase__ = 13
lowercase__ = 7
lowercase__ = True
lowercase__ = True
lowercase__ = True
lowercase__ = 99
lowercase__ = 32
lowercase__ = 2
lowercase__ = 4
lowercase__ = 37
lowercase__ = '''gelu'''
lowercase__ = 0.1
lowercase__ = 0.1
lowercase__ = 512
lowercase__ = 16
lowercase__ = 2
lowercase__ = 0.02
lowercase__ = 3
lowercase__ = 4
lowercase__ = None
def _snake_case ( self ):
lowercase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase__ = None
if self.use_input_mask:
lowercase__ = random_attention_mask([self.batch_size, self.seq_length] )
lowercase__ = None
lowercase__ = None
lowercase__ = None
if self.use_labels:
lowercase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase__ = ids_tensor([self.batch_size] , self.num_choices )
lowercase__ = EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , pad_token_id=1 , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def _snake_case ( self ):
(
(
lowercase__
), (
lowercase__
), (
lowercase__
), (
lowercase__
), (
lowercase__
), (
lowercase__
),
) = self.prepare_config_and_inputs()
lowercase__ = True
lowercase__ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowercase__ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
lowercase__ = TFEsmModel(config=UpperCAmelCase__ )
lowercase__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
lowercase__ = model(UpperCAmelCase__ )
lowercase__ = [input_ids, input_mask]
lowercase__ = model(UpperCAmelCase__ )
lowercase__ = model(UpperCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ):
lowercase__ = True
lowercase__ = TFEsmModel(config=UpperCAmelCase__ )
lowercase__ = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''encoder_hidden_states''': encoder_hidden_states,
'''encoder_attention_mask''': encoder_attention_mask,
}
lowercase__ = model(UpperCAmelCase__ )
lowercase__ = [input_ids, input_mask]
lowercase__ = model(UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ )
# Also check the case where encoder outputs are not passed
lowercase__ = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
lowercase__ = TFEsmForMaskedLM(config=UpperCAmelCase__ )
lowercase__ = model([input_ids, input_mask] )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
lowercase__ = self.num_labels
lowercase__ = TFEsmForTokenClassification(config=UpperCAmelCase__ )
lowercase__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
lowercase__ = model(UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _snake_case ( self ):
lowercase__ = self.prepare_config_and_inputs()
(
(
lowercase__
), (
lowercase__
), (
lowercase__
), (
lowercase__
), (
lowercase__
), (
lowercase__
),
) = config_and_inputs
lowercase__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class UpperCAmelCase (__lowercase ,__lowercase ,unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase :Tuple = (
(
TFEsmModel,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
)
if is_tf_available()
else ()
)
_UpperCAmelCase :Dict = (
{
'''feature-extraction''': TFEsmModel,
'''fill-mask''': TFEsmForMaskedLM,
'''text-classification''': TFEsmForSequenceClassification,
'''token-classification''': TFEsmForTokenClassification,
'''zero-shot''': TFEsmForSequenceClassification,
}
if is_tf_available()
else {}
)
_UpperCAmelCase :Union[str, Any] = False
_UpperCAmelCase :Any = False
def _snake_case ( self ):
lowercase__ = TFEsmModelTester(self )
lowercase__ = ConfigTester(self , config_class=UpperCAmelCase__ , hidden_size=37 )
def _snake_case ( self ):
self.config_tester.run_common_tests()
def _snake_case ( self ):
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase__ )
def _snake_case ( self ):
lowercase__ = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*UpperCAmelCase__ )
def _snake_case ( self ):
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCAmelCase__ )
def _snake_case ( self ):
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCAmelCase__ )
@slow
def _snake_case ( self ):
for model_name in TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ = TFEsmModel.from_pretrained(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
@unittest.skip('''Protein models do not support embedding resizing.''' )
def _snake_case ( self ):
pass
@unittest.skip('''Protein models do not support embedding resizing.''' )
def _snake_case ( self ):
pass
def _snake_case ( self ):
lowercase__, lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = model_class(UpperCAmelCase__ )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class is TFEsmForMaskedLM:
# Output embedding test differs from the main test because they're a matrix, not a layer
lowercase__ = model.get_bias()
assert isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
for k, v in name.items():
assert isinstance(UpperCAmelCase__ , tf.Variable )
else:
lowercase__ = model.get_output_embeddings()
assert x is None
lowercase__ = model.get_bias()
assert name is None
@require_tf
class UpperCAmelCase (unittest.TestCase ):
"""simple docstring"""
@slow
def _snake_case ( self ):
lowercase__ = TFEsmForMaskedLM.from_pretrained('''facebook/esm2_t6_8M_UR50D''' )
lowercase__ = tf.constant([[0, 1, 2, 3, 4, 5]] )
lowercase__ = model(UpperCAmelCase__ )[0]
lowercase__ = [1, 6, 33]
self.assertEqual(list(output.numpy().shape ) , UpperCAmelCase__ )
# compare the actual values for a slice.
lowercase__ = tf.constant(
[
[
[8.921_518, -10.589_814, -6.4_671_307],
[-6.3_967_156, -13.911_377, -1.1_211_915],
[-7.781_247, -13.951_557, -3.740_592],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-2 ) )
@slow
def _snake_case ( self ):
lowercase__ = TFEsmModel.from_pretrained('''facebook/esm2_t6_8M_UR50D''' )
lowercase__ = tf.constant([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
lowercase__ = model(UpperCAmelCase__ )[0]
# compare the actual values for a slice.
lowercase__ = tf.constant(
[
[
[0.14_443_092, 0.54_125_327, 0.3_247_739],
[0.30_340_484, 0.00_526_676, 0.31_077_722],
[0.32_278_043, -0.24_987_096, 0.3_414_628],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 360 | """simple docstring"""
import json
import os
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from requests.exceptions import HTTPError
from transformers.utils import (
CONFIG_NAME,
FLAX_WEIGHTS_NAME,
TF2_WEIGHTS_NAME,
TRANSFORMERS_CACHE,
WEIGHTS_NAME,
cached_file,
get_file_from_repo,
has_file,
)
__A = "hf-internal-testing/tiny-random-bert"
__A = os.path.join(TRANSFORMERS_CACHE, "models--hf-internal-testing--tiny-random-bert")
__A = "9b8c223d42b2188cb49d29af482996f9d0f3e5a6"
class UpperCAmelCase (unittest.TestCase ):
"""simple docstring"""
def _snake_case ( self ):
lowercase__: Union[str, Any] = cached_file(_UpperCAmelCase , _UpperCAmelCase )
# Should have downloaded the file in here
self.assertTrue(os.path.isdir(_UpperCAmelCase ) )
# Cache should contain at least those three subfolders:
for subfolder in ["blobs", "refs", "snapshots"]:
self.assertTrue(os.path.isdir(os.path.join(_UpperCAmelCase , _UpperCAmelCase ) ) )
with open(os.path.join(_UpperCAmelCase , '''refs''' , '''main''' ) ) as f:
lowercase__: Dict = f.read()
self.assertEqual(_UpperCAmelCase , os.path.join(_UpperCAmelCase , '''snapshots''' , _UpperCAmelCase , _UpperCAmelCase ) )
self.assertTrue(os.path.isfile(_UpperCAmelCase ) )
# File is cached at the same place the second time.
lowercase__: Any = cached_file(_UpperCAmelCase , _UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
# Using a specific revision to test the full commit hash.
lowercase__: Dict = cached_file(_UpperCAmelCase , _UpperCAmelCase , revision='''9b8c223''' )
self.assertEqual(_UpperCAmelCase , os.path.join(_UpperCAmelCase , '''snapshots''' , _UpperCAmelCase , _UpperCAmelCase ) )
def _snake_case ( self ):
with self.assertRaisesRegex(_UpperCAmelCase , '''is not a valid model identifier''' ):
lowercase__: int = cached_file('''tiny-random-bert''' , _UpperCAmelCase )
with self.assertRaisesRegex(_UpperCAmelCase , '''is not a valid git identifier''' ):
lowercase__: List[Any] = cached_file(_UpperCAmelCase , _UpperCAmelCase , revision='''aaaa''' )
with self.assertRaisesRegex(_UpperCAmelCase , '''does not appear to have a file named''' ):
lowercase__: Dict = cached_file(_UpperCAmelCase , '''conf''' )
def _snake_case ( self ):
with self.assertRaisesRegex(_UpperCAmelCase , '''does not appear to have a file named''' ):
lowercase__: Optional[Any] = cached_file(_UpperCAmelCase , '''conf''' )
with open(os.path.join(_UpperCAmelCase , '''refs''' , '''main''' ) ) as f:
lowercase__: int = f.read()
self.assertTrue(os.path.isfile(os.path.join(_UpperCAmelCase , '''.no_exist''' , _UpperCAmelCase , '''conf''' ) ) )
lowercase__: Dict = cached_file(_UpperCAmelCase , '''conf''' , _raise_exceptions_for_missing_entries=_UpperCAmelCase )
self.assertIsNone(_UpperCAmelCase )
lowercase__: List[str] = cached_file(_UpperCAmelCase , '''conf''' , local_files_only=_UpperCAmelCase , _raise_exceptions_for_missing_entries=_UpperCAmelCase )
self.assertIsNone(_UpperCAmelCase )
lowercase__: Union[str, Any] = mock.Mock()
lowercase__: str = 500
lowercase__: Union[str, Any] = {}
lowercase__: List[str] = HTTPError
lowercase__: int = {}
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('''requests.Session.request''' , return_value=_UpperCAmelCase ) as mock_head:
lowercase__: Any = cached_file(_UpperCAmelCase , '''conf''' , _raise_exceptions_for_connection_errors=_UpperCAmelCase )
self.assertIsNone(_UpperCAmelCase )
# This check we did call the fake head request
mock_head.assert_called()
def _snake_case ( self ):
self.assertTrue(has_file('''hf-internal-testing/tiny-bert-pt-only''' , _UpperCAmelCase ) )
self.assertFalse(has_file('''hf-internal-testing/tiny-bert-pt-only''' , _UpperCAmelCase ) )
self.assertFalse(has_file('''hf-internal-testing/tiny-bert-pt-only''' , _UpperCAmelCase ) )
def _snake_case ( self ):
# `get_file_from_repo` returns None if the file does not exist
self.assertIsNone(get_file_from_repo('''bert-base-cased''' , '''ahah.txt''' ) )
# The function raises if the repository does not exist.
with self.assertRaisesRegex(_UpperCAmelCase , '''is not a valid model identifier''' ):
get_file_from_repo('''bert-base-case''' , _UpperCAmelCase )
# The function raises if the revision does not exist.
with self.assertRaisesRegex(_UpperCAmelCase , '''is not a valid git identifier''' ):
get_file_from_repo('''bert-base-cased''' , _UpperCAmelCase , revision='''ahaha''' )
lowercase__: Optional[Any] = get_file_from_repo('''bert-base-cased''' , _UpperCAmelCase )
# The name is the cached name which is not very easy to test, so instead we load the content.
lowercase__: Optional[Any] = json.loads(open(_UpperCAmelCase , '''r''' ).read() )
self.assertEqual(config['''hidden_size'''] , 768 )
def _snake_case ( self ):
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase__: Any = Path(_UpperCAmelCase ) / '''a.txt'''
filename.touch()
self.assertEqual(get_file_from_repo(_UpperCAmelCase , '''a.txt''' ) , str(_UpperCAmelCase ) )
self.assertIsNone(get_file_from_repo(_UpperCAmelCase , '''b.txt''' ) )
| 2 | 0 |
"""simple docstring"""
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import MaskaFormerConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel
if is_vision_available():
from transformers import MaskaFormerImageProcessor
if is_vision_available():
from PIL import Image
class UpperCAmelCase :
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=2 , _UpperCAmelCase=True , _UpperCAmelCase=False , _UpperCAmelCase=10 , _UpperCAmelCase=3 , _UpperCAmelCase=32 * 8 , _UpperCAmelCase=32 * 8 , _UpperCAmelCase=4 , _UpperCAmelCase=64 , ):
lowercase__: int = parent
lowercase__: str = batch_size
lowercase__: str = is_training
lowercase__: int = use_auxiliary_loss
lowercase__: Union[str, Any] = num_queries
lowercase__: Any = num_channels
lowercase__: Optional[Any] = min_size
lowercase__: Tuple = max_size
lowercase__: Optional[Any] = num_labels
lowercase__: Optional[int] = hidden_dim
lowercase__: List[str] = hidden_dim
def _snake_case ( self ):
lowercase__: List[str] = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
_a )
lowercase__: Dict = torch.ones([self.batch_size, self.min_size, self.max_size] , device=_a )
lowercase__: Tuple = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=_a ) > 0.5
).float()
lowercase__: str = (torch.rand((self.batch_size, self.num_labels) , device=_a ) > 0.5).long()
lowercase__: Union[str, Any] = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def _snake_case ( self ):
lowercase__: List[Any] = MaskaFormerConfig(
hidden_size=self.hidden_dim , )
lowercase__: Dict = self.num_queries
lowercase__: Tuple = self.num_labels
lowercase__: Dict = [1, 1, 1, 1]
lowercase__: Optional[int] = self.num_channels
lowercase__: List[str] = 64
lowercase__: Tuple = 128
lowercase__: List[str] = self.hidden_dim
lowercase__: List[str] = self.hidden_dim
lowercase__: int = self.hidden_dim
return config
def _snake_case ( self ):
lowercase__: Union[str, Any] = self.prepare_config_and_inputs()
lowercase__: Union[str, Any] = {"""pixel_values""": pixel_values, """pixel_mask""": pixel_mask}
return config, inputs_dict
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase ):
lowercase__: List[Any] = output.encoder_hidden_states
lowercase__: Tuple = output.pixel_decoder_hidden_states
lowercase__: Union[str, Any] = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(_a ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(_a ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(_a ) , config.decoder_layers )
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=False ):
with torch.no_grad():
lowercase__: Optional[Any] = MaskaFormerModel(config=_a )
model.to(_a )
model.eval()
lowercase__: Optional[Any] = model(pixel_values=_a , pixel_mask=_a )
lowercase__: Any = model(_a , output_hidden_states=_a )
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.hidden_dim) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(_a , _a )
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
lowercase__: Dict = MaskaFormerForUniversalSegmentation(config=_a )
model.to(_a )
model.eval()
def comm_check_on_output(_UpperCAmelCase ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
lowercase__: Optional[int] = model(pixel_values=_a , pixel_mask=_a )
lowercase__: List[Any] = model(_a )
comm_check_on_output(_a )
lowercase__: Optional[Any] = model(
pixel_values=_a , pixel_mask=_a , mask_labels=_a , class_labels=_a )
comm_check_on_output(_a )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class UpperCAmelCase (UpperCamelCase_ ,UpperCamelCase_ ,unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase :Optional[int] = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else ()
_UpperCAmelCase :Tuple = {"""feature-extraction""": MaskaFormerModel} if is_torch_available() else {}
_UpperCAmelCase :Tuple = False
_UpperCAmelCase :Dict = False
_UpperCAmelCase :Dict = False
_UpperCAmelCase :List[str] = False
def _snake_case ( self ):
lowercase__: Optional[int] = MaskaFormerModelTester(self )
lowercase__: Optional[int] = ConfigTester(self , config_class=_a , has_text_modality=_a )
def _snake_case ( self ):
self.config_tester.run_common_tests()
def _snake_case ( self ):
lowercase__: List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(_a , **_a , output_hidden_states=_a )
def _snake_case ( self ):
lowercase__: str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*_a )
@unittest.skip(reason='''Mask2Former does not use inputs_embeds''' )
def _snake_case ( self ):
pass
@unittest.skip(reason='''Mask2Former does not have a get_input_embeddings method''' )
def _snake_case ( self ):
pass
@unittest.skip(reason='''Mask2Former is not a generative model''' )
def _snake_case ( self ):
pass
@unittest.skip(reason='''Mask2Former does not use token embeddings''' )
def _snake_case ( self ):
pass
@require_torch_multi_gpu
@unittest.skip(
reason='''Mask2Former has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' )
def _snake_case ( self ):
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def _snake_case ( self ):
pass
def _snake_case ( self ):
lowercase__: Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__: str = model_class(_a )
lowercase__: Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__: str = [*signature.parameters.keys()]
lowercase__: Optional[int] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _a )
@slow
def _snake_case ( self ):
for model_name in ["facebook/mask2former-swin-small-coco-instance"]:
lowercase__: Any = MaskaFormerModel.from_pretrained(_a )
self.assertIsNotNone(_a )
def _snake_case ( self ):
lowercase__: Dict = (self.model_tester.min_size,) * 2
lowercase__: Tuple = {
"""pixel_values""": torch.randn((2, 3, *size) , device=_a ),
"""mask_labels""": torch.randn((2, 10, *size) , device=_a ),
"""class_labels""": torch.zeros(2 , 10 , device=_a ).long(),
}
lowercase__: List[str] = self.model_tester.get_config()
lowercase__: Dict = MaskaFormerForUniversalSegmentation(_a ).to(_a )
lowercase__: Optional[int] = model(**_a )
self.assertTrue(outputs.loss is not None )
def _snake_case ( self ):
lowercase__: Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(_a , **_a , output_hidden_states=_a )
def _snake_case ( self ):
lowercase__: Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__: Union[str, Any] = model_class(_a ).to(_a )
lowercase__: int = model(**_a , output_attentions=_a )
self.assertTrue(outputs.attentions is not None )
def _snake_case ( self ):
if not self.model_tester.is_training:
return
lowercase__: Dict = self.all_model_classes[1]
lowercase__: Any = self.model_tester.prepare_config_and_inputs()
lowercase__: int = model_class(_a )
model.to(_a )
model.train()
lowercase__: str = model(_a , mask_labels=_a , class_labels=_a ).loss
loss.backward()
def _snake_case ( self ):
lowercase__: Tuple = self.all_model_classes[1]
lowercase__: List[Any] = self.model_tester.prepare_config_and_inputs()
lowercase__: Tuple = True
lowercase__: Union[str, Any] = True
lowercase__: Optional[int] = model_class(_a ).to(_a )
model.train()
lowercase__: Tuple = model(_a , mask_labels=_a , class_labels=_a )
lowercase__: Any = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
lowercase__: int = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
lowercase__: Tuple = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
lowercase__: Optional[Any] = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=_a )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
__A = 1E-4
def SCREAMING_SNAKE_CASE__ ( ) -> Optional[Any]:
lowercase__: Dict = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_vision
@slow
class UpperCAmelCase (unittest.TestCase ):
"""simple docstring"""
@cached_property
def _snake_case ( self ):
return "facebook/mask2former-swin-small-coco-instance"
@cached_property
def _snake_case ( self ):
return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints ) if is_vision_available() else None
def _snake_case ( self ):
lowercase__: Union[str, Any] = MaskaFormerModel.from_pretrained(self.model_checkpoints ).to(_a )
lowercase__: Optional[Any] = self.default_image_processor
lowercase__: Union[str, Any] = prepare_img()
lowercase__: List[Any] = image_processor(_a , return_tensors='''pt''' ).to(_a )
lowercase__: int = inputs["""pixel_values"""].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(_a , (1, 3, 384, 384) )
with torch.no_grad():
lowercase__: str = model(**_a )
lowercase__: int = torch.tensor(
[[-0.2_790, -1.0_717, -1.1_668], [-0.5_128, -0.3_128, -0.4_987], [-0.5_832, 0.1_971, -0.0_197]] ).to(_a )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , _a , atol=_a ) )
lowercase__: int = torch.tensor(
[[0.8_973, 1.1_847, 1.1_776], [1.1_934, 1.5_040, 1.5_128], [1.1_153, 1.4_486, 1.4_951]] ).to(_a )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , _a , atol=_a ) )
lowercase__: int = torch.tensor(
[[2.1_152, 1.7_000, -0.8_603], [1.5_808, 1.8_004, -0.9_353], [1.6_043, 1.7_495, -0.5_999]] ).to(_a )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , _a , atol=_a ) )
def _snake_case ( self ):
lowercase__: Tuple = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(_a ).eval()
lowercase__: List[Any] = self.default_image_processor
lowercase__: Any = prepare_img()
lowercase__: Any = image_processor(_a , return_tensors='''pt''' ).to(_a )
lowercase__: List[Any] = inputs["""pixel_values"""].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(_a , (1, 3, 384, 384) )
with torch.no_grad():
lowercase__: Optional[Any] = model(**_a )
# masks_queries_logits
lowercase__: Dict = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) )
lowercase__: int = [
[-8.7_839, -9.0_056, -8.8_121],
[-7.4_104, -7.0_313, -6.5_401],
[-6.6_105, -6.3_427, -6.4_675],
]
lowercase__: Tuple = torch.tensor(_a ).to(_a )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , _a , atol=_a ) )
# class_queries_logits
lowercase__: int = outputs.class_queries_logits
self.assertEqual(class_queries_logits.shape , (1, model.config.num_queries, model.config.num_labels + 1) )
lowercase__: List[Any] = torch.tensor(
[
[1.8_324, -8.0_835, -4.1_922],
[0.8_450, -9.0_050, -3.6_053],
[0.3_045, -7.7_293, -3.0_275],
] ).to(_a )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , _a , atol=_a ) )
def _snake_case ( self ):
lowercase__: List[Any] = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(_a ).eval()
lowercase__: str = self.default_image_processor
lowercase__: int = image_processor(
[np.zeros((3, 800, 1333) ), np.zeros((3, 800, 1333) )] , segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] , return_tensors='''pt''' , )
lowercase__: Optional[int] = inputs["""pixel_values"""].to(_a )
lowercase__: int = [el.to(_a ) for el in inputs["""mask_labels"""]]
lowercase__: Union[str, Any] = [el.to(_a ) for el in inputs["""class_labels"""]]
with torch.no_grad():
lowercase__: Optional[Any] = model(**_a )
self.assertTrue(outputs.loss is not None )
| 361 | """simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
"microsoft/beit-base-patch16-224-pt22k": (
"https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json"
),
# See all BEiT models at https://huggingface.co/models?filter=beit
}
class UpperCAmelCase (_UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase :Optional[Any] = "beit"
def __init__( self , _UpperCAmelCase=8192 , _UpperCAmelCase=768 , _UpperCAmelCase=12 , _UpperCAmelCase=12 , _UpperCAmelCase=3072 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.02 , _UpperCAmelCase=1e-1_2 , _UpperCAmelCase=224 , _UpperCAmelCase=16 , _UpperCAmelCase=3 , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=True , _UpperCAmelCase=[3, 5, 7, 11] , _UpperCAmelCase=[1, 2, 3, 6] , _UpperCAmelCase=True , _UpperCAmelCase=0.4 , _UpperCAmelCase=256 , _UpperCAmelCase=1 , _UpperCAmelCase=False , _UpperCAmelCase=255 , **_UpperCAmelCase , ):
super().__init__(**_UpperCAmelCase )
lowercase__: Union[str, Any] = vocab_size
lowercase__: List[Any] = hidden_size
lowercase__: Optional[int] = num_hidden_layers
lowercase__: Optional[int] = num_attention_heads
lowercase__: int = intermediate_size
lowercase__: List[str] = hidden_act
lowercase__: List[Any] = hidden_dropout_prob
lowercase__: Dict = attention_probs_dropout_prob
lowercase__: List[str] = initializer_range
lowercase__: Optional[int] = layer_norm_eps
lowercase__: int = image_size
lowercase__: Tuple = patch_size
lowercase__: int = num_channels
lowercase__: Optional[Any] = use_mask_token
lowercase__: List[Any] = use_absolute_position_embeddings
lowercase__: Optional[int] = use_relative_position_bias
lowercase__: Optional[int] = use_shared_relative_position_bias
lowercase__: Optional[Any] = layer_scale_init_value
lowercase__: Union[str, Any] = drop_path_rate
lowercase__: Tuple = use_mean_pooling
# decode head attributes (semantic segmentation)
lowercase__: Tuple = out_indices
lowercase__: Optional[int] = pool_scales
# auxiliary head attributes (semantic segmentation)
lowercase__: List[str] = use_auxiliary_head
lowercase__: Optional[Any] = auxiliary_loss_weight
lowercase__: str = auxiliary_channels
lowercase__: List[str] = auxiliary_num_convs
lowercase__: Tuple = auxiliary_concat_input
lowercase__: Dict = semantic_loss_ignore_index
class UpperCAmelCase (_UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase :Dict = version.parse("1.11" )
@property
def _snake_case ( self ):
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def _snake_case ( self ):
return 1e-4
| 2 | 0 |
"""simple docstring"""
import argparse
import pytorch_lightning as pl
import torch
from torch import nn
from transformers import LongformerForQuestionAnswering, LongformerModel
class UpperCAmelCase (pl.LightningModule ):
"""simple docstring"""
def __init__( self , _UpperCAmelCase ):
super().__init__()
lowercase__: Optional[Any] = model
lowercase__: Union[str, Any] = 2
lowercase__: List[str] = nn.Linear(self.model.config.hidden_size , self.num_labels )
def _snake_case ( self ):
pass
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Optional[Any]:
lowercase__: Union[str, Any] = LongformerModel.from_pretrained(lowercase__ )
lowercase__: Optional[Any] = LightningModel(lowercase__ )
lowercase__: List[Any] = torch.load(lowercase__ , map_location=torch.device('''cpu''' ) )
lightning_model.load_state_dict(ckpt['''state_dict'''] )
# init longformer question answering model
lowercase__: Tuple = LongformerForQuestionAnswering.from_pretrained(lowercase__ )
# transfer weights
longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() )
longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() )
longformer_for_qa.eval()
# save model
longformer_for_qa.save_pretrained(lowercase__ )
print(F"""Conversion successful. Model saved under {pytorch_dump_folder_path}""" )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--longformer_model",
default=None,
type=str,
required=True,
help="model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.",
)
parser.add_argument(
"--longformer_question_answering_ckpt_path",
default=None,
type=str,
required=True,
help="Path the official PyTorch Lightning Checkpoint.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
__A = parser.parse_args()
convert_longformer_qa_checkpoint_to_pytorch(
args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path
)
| 362 | """simple docstring"""
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase ) -> str:
lowercase__: int = ''''''
for word_or_phrase in separated:
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
raise Exception('''join() accepts only strings to be joined''' )
joined += word_or_phrase + separator
return joined.strip(__UpperCAmelCase )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 2 | 0 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import floats_tensor, load_image, load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class UpperCAmelCase (__SCREAMING_SNAKE_CASE ,unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase :Tuple = ShapEImgaImgPipeline
_UpperCAmelCase :List[str] = ["image"]
_UpperCAmelCase :Tuple = ["image"]
_UpperCAmelCase :List[str] = [
"num_images_per_prompt",
"num_inference_steps",
"generator",
"latents",
"guidance_scale",
"frame_size",
"output_type",
"return_dict",
]
_UpperCAmelCase :List[Any] = False
@property
def _snake_case ( self ):
return 32
@property
def _snake_case ( self ):
return 32
@property
def _snake_case ( self ):
return self.time_input_dim * 4
@property
def _snake_case ( self ):
return 8
@property
def _snake_case ( self ):
torch.manual_seed(0 )
lowercase__: Optional[Any] = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=64 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1 , )
lowercase__: List[Any] = CLIPVisionModel(_SCREAMING_SNAKE_CASE )
return model
@property
def _snake_case ( self ):
lowercase__: Union[str, Any] = CLIPImageProcessor(
crop_size=224 , do_center_crop=_SCREAMING_SNAKE_CASE , do_normalize=_SCREAMING_SNAKE_CASE , do_resize=_SCREAMING_SNAKE_CASE , image_mean=[0.48_145_466, 0.4_578_275, 0.40_821_073] , image_std=[0.26_862_954, 0.26_130_258, 0.27_577_711] , resample=3 , size=224 , )
return image_processor
@property
def _snake_case ( self ):
torch.manual_seed(0 )
lowercase__: Any = {
"num_attention_heads": 2,
"attention_head_dim": 16,
"embedding_dim": self.time_input_dim,
"num_embeddings": 32,
"embedding_proj_dim": self.text_embedder_hidden_size,
"time_embed_dim": self.time_embed_dim,
"num_layers": 1,
"clip_embed_dim": self.time_input_dim * 2,
"additional_embeddings": 0,
"time_embed_act_fn": "gelu",
"norm_in_type": "layer",
"embedding_proj_norm_type": "layer",
"encoder_hid_proj_type": None,
"added_emb_type": None,
}
lowercase__: int = PriorTransformer(**_SCREAMING_SNAKE_CASE )
return model
@property
def _snake_case ( self ):
torch.manual_seed(0 )
lowercase__: Optional[int] = {
"param_shapes": (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
"d_latent": self.time_input_dim,
"d_hidden": self.renderer_dim,
"n_output": 12,
"background": (
0.1,
0.1,
0.1,
),
}
lowercase__: Any = ShapERenderer(**_SCREAMING_SNAKE_CASE )
return model
def _snake_case ( self ):
lowercase__: Dict = self.dummy_prior
lowercase__: Dict = self.dummy_image_encoder
lowercase__: Tuple = self.dummy_image_processor
lowercase__: Any = self.dummy_renderer
lowercase__: Tuple = HeunDiscreteScheduler(
beta_schedule='''exp''' , num_train_timesteps=1024 , prediction_type='''sample''' , use_karras_sigmas=_SCREAMING_SNAKE_CASE , clip_sample=_SCREAMING_SNAKE_CASE , clip_sample_range=1.0 , )
lowercase__: Tuple = {
"prior": prior,
"image_encoder": image_encoder,
"image_processor": image_processor,
"renderer": renderer,
"scheduler": scheduler,
}
return components
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase=0 ):
lowercase__: List[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(_SCREAMING_SNAKE_CASE ) ).to(_SCREAMING_SNAKE_CASE )
if str(_SCREAMING_SNAKE_CASE ).startswith('''mps''' ):
lowercase__: Tuple = torch.manual_seed(_SCREAMING_SNAKE_CASE )
else:
lowercase__: str = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(_SCREAMING_SNAKE_CASE )
lowercase__: Dict = {
"image": input_image,
"generator": generator,
"num_inference_steps": 1,
"frame_size": 32,
"output_type": "np",
}
return inputs
def _snake_case ( self ):
lowercase__: int = "cpu"
lowercase__: Tuple = self.get_dummy_components()
lowercase__: int = self.pipeline_class(**_SCREAMING_SNAKE_CASE )
lowercase__: Union[str, Any] = pipe.to(_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
lowercase__: List[Any] = pipe(**self.get_dummy_inputs(_SCREAMING_SNAKE_CASE ) )
lowercase__: List[str] = output.images[0]
lowercase__: Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
lowercase__: int = np.array(
[
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _snake_case ( self ):
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def _snake_case ( self ):
lowercase__: int = torch_device == "cpu"
lowercase__: List[Any] = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=_SCREAMING_SNAKE_CASE , relax_max_difference=_SCREAMING_SNAKE_CASE , )
def _snake_case ( self ):
lowercase__: List[Any] = self.get_dummy_components()
lowercase__: Optional[int] = self.pipeline_class(**_SCREAMING_SNAKE_CASE )
lowercase__: Tuple = pipe.to(_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
lowercase__: str = 1
lowercase__: Optional[int] = 2
lowercase__: str = self.get_dummy_inputs(_SCREAMING_SNAKE_CASE )
for key in inputs.keys():
if key in self.batch_params:
lowercase__: Dict = batch_size * [inputs[key]]
lowercase__: Optional[Any] = pipe(**_SCREAMING_SNAKE_CASE , num_images_per_prompt=_SCREAMING_SNAKE_CASE )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class UpperCAmelCase (unittest.TestCase ):
"""simple docstring"""
def _snake_case ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _snake_case ( self ):
lowercase__: Dict = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/shap_e/corgi.png''' )
lowercase__: Optional[int] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/shap_e/test_shap_e_img2img_out.npy''' )
lowercase__: List[str] = ShapEImgaImgPipeline.from_pretrained('''openai/shap-e-img2img''' )
lowercase__: Dict = pipe.to(_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
lowercase__: int = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(0 )
lowercase__: Any = pipe(
_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , guidance_scale=3.0 , num_inference_steps=64 , frame_size=64 , output_type='''np''' , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
| 363 | """simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPanoramaPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
@skip_mps
class UpperCAmelCase (_UpperCAmelCase ,_UpperCAmelCase ,unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase :Optional[int] = StableDiffusionPanoramaPipeline
_UpperCAmelCase :List[str] = TEXT_TO_IMAGE_PARAMS
_UpperCAmelCase :str = TEXT_TO_IMAGE_BATCH_PARAMS
_UpperCAmelCase :Dict = TEXT_TO_IMAGE_IMAGE_PARAMS
_UpperCAmelCase :List[Any] = TEXT_TO_IMAGE_IMAGE_PARAMS
def _snake_case ( self ):
torch.manual_seed(0 )
lowercase__: Optional[int] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
lowercase__: List[Any] = DDIMScheduler()
torch.manual_seed(0 )
lowercase__: Tuple = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
lowercase__: Tuple = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
lowercase__: List[str] = CLIPTextModel(_UpperCAmelCase )
lowercase__: int = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
lowercase__: int = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase=0 ):
lowercase__: int = torch.manual_seed(_UpperCAmelCase )
lowercase__: List[Any] = {
'''prompt''': '''a photo of the dolomites''',
'''generator''': generator,
# Setting height and width to None to prevent OOMs on CPU.
'''height''': None,
'''width''': None,
'''num_inference_steps''': 1,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def _snake_case ( self ):
lowercase__: Optional[int] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowercase__: List[str] = self.get_dummy_components()
lowercase__: Union[str, Any] = StableDiffusionPanoramaPipeline(**_UpperCAmelCase )
lowercase__: int = sd_pipe.to(_UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=_UpperCAmelCase )
lowercase__: str = self.get_dummy_inputs(_UpperCAmelCase )
lowercase__: Any = sd_pipe(**_UpperCAmelCase ).images
lowercase__: Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase__: List[str] = np.array([0.6_186, 0.5_374, 0.4_915, 0.4_135, 0.4_114, 0.4_563, 0.5_128, 0.4_977, 0.4_757] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _snake_case ( self ):
super().test_inference_batch_consistent(batch_sizes=[1, 2] )
def _snake_case ( self ):
super().test_inference_batch_single_identical(batch_size=2 , expected_max_diff=3.2_5e-3 )
def _snake_case ( self ):
lowercase__: Optional[Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowercase__: Union[str, Any] = self.get_dummy_components()
lowercase__: str = StableDiffusionPanoramaPipeline(**_UpperCAmelCase )
lowercase__: str = sd_pipe.to(_UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=_UpperCAmelCase )
lowercase__: str = self.get_dummy_inputs(_UpperCAmelCase )
lowercase__: Union[str, Any] = '''french fries'''
lowercase__: Union[str, Any] = sd_pipe(**_UpperCAmelCase , negative_prompt=_UpperCAmelCase )
lowercase__: Optional[Any] = output.images
lowercase__: str = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase__: Optional[int] = np.array([0.6_187, 0.5_375, 0.4_915, 0.4_136, 0.4_114, 0.4_563, 0.5_128, 0.4_976, 0.4_757] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _snake_case ( self ):
lowercase__: Optional[Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowercase__: Union[str, Any] = self.get_dummy_components()
lowercase__: Optional[Any] = StableDiffusionPanoramaPipeline(**_UpperCAmelCase )
lowercase__: str = sd_pipe.to(_UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=_UpperCAmelCase )
lowercase__: Optional[int] = self.get_dummy_inputs(_UpperCAmelCase )
lowercase__: Union[str, Any] = sd_pipe(**_UpperCAmelCase , view_batch_size=2 )
lowercase__: List[str] = output.images
lowercase__: List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase__: List[Any] = np.array([0.6_187, 0.5_375, 0.4_915, 0.4_136, 0.4_114, 0.4_563, 0.5_128, 0.4_976, 0.4_757] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _snake_case ( self ):
lowercase__: Optional[int] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowercase__: int = self.get_dummy_components()
lowercase__: List[str] = EulerAncestralDiscreteScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='''scaled_linear''' )
lowercase__: Any = StableDiffusionPanoramaPipeline(**_UpperCAmelCase )
lowercase__: Any = sd_pipe.to(_UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=_UpperCAmelCase )
lowercase__: int = self.get_dummy_inputs(_UpperCAmelCase )
lowercase__: Dict = sd_pipe(**_UpperCAmelCase ).images
lowercase__: Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase__: List[Any] = np.array([0.4_024, 0.6_510, 0.4_901, 0.5_378, 0.5_813, 0.5_622, 0.4_795, 0.4_467, 0.4_952] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _snake_case ( self ):
lowercase__: int = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowercase__: List[Any] = self.get_dummy_components()
lowercase__: Any = PNDMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , skip_prk_steps=_UpperCAmelCase )
lowercase__: Dict = StableDiffusionPanoramaPipeline(**_UpperCAmelCase )
lowercase__: int = sd_pipe.to(_UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=_UpperCAmelCase )
lowercase__: Optional[int] = self.get_dummy_inputs(_UpperCAmelCase )
lowercase__: Dict = sd_pipe(**_UpperCAmelCase ).images
lowercase__: str = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase__: List[Any] = np.array([0.6_391, 0.6_291, 0.4_861, 0.5_134, 0.5_552, 0.4_578, 0.5_032, 0.5_023, 0.4_539] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class UpperCAmelCase (unittest.TestCase ):
"""simple docstring"""
def _snake_case ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _snake_case ( self , _UpperCAmelCase=0 ):
lowercase__: Union[str, Any] = torch.manual_seed(_UpperCAmelCase )
lowercase__: int = {
'''prompt''': '''a photo of the dolomites''',
'''generator''': generator,
'''num_inference_steps''': 3,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def _snake_case ( self ):
lowercase__: Any = '''stabilityai/stable-diffusion-2-base'''
lowercase__: str = DDIMScheduler.from_pretrained(_UpperCAmelCase , subfolder='''scheduler''' )
lowercase__: Dict = StableDiffusionPanoramaPipeline.from_pretrained(_UpperCAmelCase , scheduler=_UpperCAmelCase , safety_checker=_UpperCAmelCase )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
pipe.enable_attention_slicing()
lowercase__: Tuple = self.get_inputs()
lowercase__: Optional[Any] = pipe(**_UpperCAmelCase ).images
lowercase__: Optional[Any] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 2048, 3)
lowercase__: List[Any] = np.array(
[
0.36_968_392,
0.27_025_372,
0.32_446_766,
0.28_379_387,
0.36_363_274,
0.30_733_347,
0.27_100_027,
0.27_054_125,
0.25_536_096,
] )
assert np.abs(expected_slice - image_slice ).max() < 1e-2
def _snake_case ( self ):
lowercase__: int = StableDiffusionPanoramaPipeline.from_pretrained(
'''stabilityai/stable-diffusion-2-base''' , safety_checker=_UpperCAmelCase )
lowercase__: Tuple = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
pipe.enable_attention_slicing()
lowercase__: List[str] = self.get_inputs()
lowercase__: Dict = pipe(**_UpperCAmelCase ).images
lowercase__: Tuple = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 2048, 3)
lowercase__: List[Any] = np.array(
[
[
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
]
] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def _snake_case ( self ):
lowercase__: int = 0
def callback_fn(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> None:
lowercase__: List[str] = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
lowercase__: Dict = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 256)
lowercase__: Any = latents[0, -3:, -3:, -1]
lowercase__: List[Any] = np.array(
[
0.18_681_869,
0.33_907_816,
0.5_361_276,
0.14_432_865,
-0.02_856_611,
-0.73_941_123,
0.23_397_987,
0.47_322_682,
-0.37_823_164,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
elif step == 2:
lowercase__: Tuple = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 256)
lowercase__: Optional[Any] = latents[0, -3:, -3:, -1]
lowercase__: Any = np.array(
[
0.18_539_645,
0.33_987_248,
0.5_378_559,
0.14_437_142,
-0.02_455_261,
-0.7_338_317,
0.23_990_755,
0.47_356_272,
-0.3_786_505,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
lowercase__: int = False
lowercase__: str = '''stabilityai/stable-diffusion-2-base'''
lowercase__: Union[str, Any] = DDIMScheduler.from_pretrained(_UpperCAmelCase , subfolder='''scheduler''' )
lowercase__: Tuple = StableDiffusionPanoramaPipeline.from_pretrained(_UpperCAmelCase , scheduler=_UpperCAmelCase , safety_checker=_UpperCAmelCase )
lowercase__: Optional[Any] = pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
pipe.enable_attention_slicing()
lowercase__: Tuple = self.get_inputs()
pipe(**_UpperCAmelCase , callback=_UpperCAmelCase , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def _snake_case ( self ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowercase__: List[Any] = '''stabilityai/stable-diffusion-2-base'''
lowercase__: Any = DDIMScheduler.from_pretrained(_UpperCAmelCase , subfolder='''scheduler''' )
lowercase__: int = StableDiffusionPanoramaPipeline.from_pretrained(_UpperCAmelCase , scheduler=_UpperCAmelCase , safety_checker=_UpperCAmelCase )
lowercase__: List[Any] = pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
lowercase__: Any = self.get_inputs()
lowercase__: List[str] = pipe(**_UpperCAmelCase )
lowercase__: Optional[int] = torch.cuda.max_memory_allocated()
# make sure that less than 5.2 GB is allocated
assert mem_bytes < 5.5 * 10**9
| 2 | 0 |
"""simple docstring"""
import itertools
import json
import os
import unittest
from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast
from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCAmelCase (__snake_case ,unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase :int = LongformerTokenizer
_UpperCAmelCase :Tuple = True
_UpperCAmelCase :Tuple = LongformerTokenizerFast
_UpperCAmelCase :List[Any] = True
def _snake_case ( self ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowercase__: List[str] = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
lowercase__: List[Any] = dict(zip(lowerCamelCase_ , range(len(lowerCamelCase_ ) ) ) )
lowercase__: List[str] = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
lowercase__: List[str] = {"""unk_token""": """<unk>"""}
lowercase__: Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
lowercase__: List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(lowerCamelCase_ ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(lowerCamelCase_ ) )
def _snake_case ( self , **_UpperCAmelCase ):
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowerCamelCase_ )
def _snake_case ( self , **_UpperCAmelCase ):
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **lowerCamelCase_ )
def _snake_case ( self , _UpperCAmelCase ):
lowercase__: str = """lower newer"""
lowercase__: Optional[Any] = """lower newer"""
return input_text, output_text
def _snake_case ( self ):
lowercase__: int = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
lowercase__: int = """lower newer"""
lowercase__: int = ["""l""", """o""", """w""", """er""", """\u0120""", """n""", """e""", """w""", """er"""]
lowercase__: List[str] = tokenizer.tokenize(lowerCamelCase_ ) # , add_prefix_space=True)
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
lowercase__: Union[str, Any] = tokens + [tokenizer.unk_token]
lowercase__: Dict = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase_ ) , lowerCamelCase_ )
def _snake_case ( self ):
lowercase__: Optional[Any] = self.get_tokenizer()
self.assertListEqual(tokenizer.encode('''Hello world!''' , add_special_tokens=lowerCamelCase_ ) , [0, 31414, 232, 328, 2] )
self.assertListEqual(
tokenizer.encode('''Hello world! cécé herlolip 418''' , add_special_tokens=lowerCamelCase_ ) , [0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2] , )
@slow
def _snake_case ( self ):
lowercase__: List[Any] = self.tokenizer_class.from_pretrained('''allenai/longformer-base-4096''' )
lowercase__: Dict = tokenizer.encode('''sequence builders''' , add_special_tokens=lowerCamelCase_ )
lowercase__: str = tokenizer.encode('''multi-sequence build''' , add_special_tokens=lowerCamelCase_ )
lowercase__: Optional[Any] = tokenizer.encode(
'''sequence builders''' , add_special_tokens=lowerCamelCase_ , add_prefix_space=lowerCamelCase_ )
lowercase__: List[str] = tokenizer.encode(
'''sequence builders''' , '''multi-sequence build''' , add_special_tokens=lowerCamelCase_ , add_prefix_space=lowerCamelCase_ )
lowercase__: Dict = tokenizer.build_inputs_with_special_tokens(lowerCamelCase_ )
lowercase__: str = tokenizer.build_inputs_with_special_tokens(lowerCamelCase_ , lowerCamelCase_ )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def _snake_case ( self ):
lowercase__: int = self.get_tokenizer()
lowercase__: List[str] = """Encode this sequence."""
lowercase__: Optional[int] = tokenizer.byte_encoder[""" """.encode('''utf-8''' )[0]]
# Testing encoder arguments
lowercase__: Dict = tokenizer.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ , add_prefix_space=lowerCamelCase_ )
lowercase__: Tuple = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(lowerCamelCase_ , lowerCamelCase_ )
lowercase__: Any = tokenizer.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ , add_prefix_space=lowerCamelCase_ )
lowercase__: Optional[Any] = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(lowerCamelCase_ , lowerCamelCase_ )
tokenizer.add_special_tokens({'''bos_token''': '''<s>'''} )
lowercase__: str = tokenizer.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ )
lowercase__: Optional[int] = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(lowerCamelCase_ , lowerCamelCase_ )
# Testing spaces after special tokens
lowercase__: List[str] = """<mask>"""
tokenizer.add_special_tokens(
{'''mask_token''': AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ )} ) # mask token has a left space
lowercase__: Optional[Any] = tokenizer.convert_tokens_to_ids(lowerCamelCase_ )
lowercase__: int = """Encode <mask> sequence"""
lowercase__: Optional[int] = """Encode <mask>sequence"""
lowercase__: Dict = tokenizer.encode(lowerCamelCase_ )
lowercase__: Optional[Any] = encoded.index(lowerCamelCase_ )
lowercase__: str = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(lowerCamelCase_ , lowerCamelCase_ )
lowercase__: Union[str, Any] = tokenizer.encode(lowerCamelCase_ )
lowercase__: Union[str, Any] = encoded.index(lowerCamelCase_ )
lowercase__: str = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(lowerCamelCase_ , lowerCamelCase_ )
def _snake_case ( self ):
pass
def _snake_case ( self ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
lowercase__: Optional[int] = self.rust_tokenizer_class.from_pretrained(lowerCamelCase_ , **lowerCamelCase_ )
lowercase__: str = self.tokenizer_class.from_pretrained(lowerCamelCase_ , **lowerCamelCase_ )
lowercase__: List[str] = """A, <mask> AllenNLP sentence."""
lowercase__: List[str] = tokenizer_r.encode_plus(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ , return_token_type_ids=lowerCamelCase_ )
lowercase__: List[Any] = tokenizer_p.encode_plus(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ , return_token_type_ids=lowerCamelCase_ )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['''token_type_ids'''] ) , sum(tokens_p['''token_type_ids'''] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['''attention_mask'''] ) / len(tokens_r['''attention_mask'''] ) , sum(tokens_p['''attention_mask'''] ) / len(tokens_p['''attention_mask'''] ) , )
lowercase__: List[str] = tokenizer_r.convert_ids_to_tokens(tokens_r['''input_ids'''] )
lowercase__: Tuple = tokenizer_p.convert_ids_to_tokens(tokens_p['''input_ids'''] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['''input_ids'''] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r['''input_ids'''] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(
lowerCamelCase_ , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
self.assertSequenceEqual(
lowerCamelCase_ , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
def _snake_case ( self ):
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
lowercase__: int = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=lowerCamelCase_ , add_prefix_space=lowerCamelCase_ , trim_offsets=lowerCamelCase_ )
lowercase__: Optional[int] = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
lowercase__: Tuple = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state['''add_prefix_space'''] , lowerCamelCase_ )
self.assertEqual(post_processor_state['''add_prefix_space'''] , lowerCamelCase_ )
self.assertEqual(post_processor_state['''trim_offsets'''] , lowerCamelCase_ )
def _snake_case ( self ):
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and
# `trim_offsets`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
lowercase__: int = """hello""" # `hello` is a token in the vocabulary of `pretrained_name`
lowercase__: List[str] = F"""{text_of_1_token} {text_of_1_token}"""
lowercase__: Union[str, Any] = self.rust_tokenizer_class.from_pretrained(
lowerCamelCase_ , use_fast=lowerCamelCase_ , add_prefix_space=lowerCamelCase_ , trim_offsets=lowerCamelCase_ )
lowercase__: Optional[int] = tokenizer_r(lowerCamelCase_ , return_offsets_mapping=lowerCamelCase_ , add_special_tokens=lowerCamelCase_ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowerCamelCase_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowerCamelCase_ ) + 1, len(lowerCamelCase_ ) + 1 + len(lowerCamelCase_ )) , )
lowercase__: Optional[int] = self.rust_tokenizer_class.from_pretrained(
lowerCamelCase_ , use_fast=lowerCamelCase_ , add_prefix_space=lowerCamelCase_ , trim_offsets=lowerCamelCase_ )
lowercase__: str = tokenizer_r(lowerCamelCase_ , return_offsets_mapping=lowerCamelCase_ , add_special_tokens=lowerCamelCase_ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowerCamelCase_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowerCamelCase_ ) + 1, len(lowerCamelCase_ ) + 1 + len(lowerCamelCase_ )) , )
lowercase__: str = self.rust_tokenizer_class.from_pretrained(
lowerCamelCase_ , use_fast=lowerCamelCase_ , add_prefix_space=lowerCamelCase_ , trim_offsets=lowerCamelCase_ )
lowercase__: List[str] = tokenizer_r(lowerCamelCase_ , return_offsets_mapping=lowerCamelCase_ , add_special_tokens=lowerCamelCase_ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowerCamelCase_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowerCamelCase_ ), len(lowerCamelCase_ ) + 1 + len(lowerCamelCase_ )) , )
lowercase__: Any = self.rust_tokenizer_class.from_pretrained(
lowerCamelCase_ , use_fast=lowerCamelCase_ , add_prefix_space=lowerCamelCase_ , trim_offsets=lowerCamelCase_ )
lowercase__: Optional[Any] = tokenizer_r(lowerCamelCase_ , return_offsets_mapping=lowerCamelCase_ , add_special_tokens=lowerCamelCase_ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowerCamelCase_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowerCamelCase_ ), len(lowerCamelCase_ ) + 1 + len(lowerCamelCase_ )) , )
lowercase__: List[Any] = F""" {text}"""
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
lowercase__: List[Any] = self.rust_tokenizer_class.from_pretrained(
lowerCamelCase_ , use_fast=lowerCamelCase_ , add_prefix_space=lowerCamelCase_ , trim_offsets=lowerCamelCase_ )
lowercase__: Any = tokenizer_r(lowerCamelCase_ , return_offsets_mapping=lowerCamelCase_ , add_special_tokens=lowerCamelCase_ )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(lowerCamelCase_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowerCamelCase_ ) + 1, 1 + len(lowerCamelCase_ ) + 1 + len(lowerCamelCase_ )) , )
lowercase__: str = self.rust_tokenizer_class.from_pretrained(
lowerCamelCase_ , use_fast=lowerCamelCase_ , add_prefix_space=lowerCamelCase_ , trim_offsets=lowerCamelCase_ )
lowercase__: Any = tokenizer_r(lowerCamelCase_ , return_offsets_mapping=lowerCamelCase_ , add_special_tokens=lowerCamelCase_ )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(lowerCamelCase_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowerCamelCase_ ), 1 + len(lowerCamelCase_ ) + 1 + len(lowerCamelCase_ )) , )
lowercase__: int = self.rust_tokenizer_class.from_pretrained(
lowerCamelCase_ , use_fast=lowerCamelCase_ , add_prefix_space=lowerCamelCase_ , trim_offsets=lowerCamelCase_ )
lowercase__: Any = tokenizer_r(lowerCamelCase_ , return_offsets_mapping=lowerCamelCase_ , add_special_tokens=lowerCamelCase_ )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(lowerCamelCase_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowerCamelCase_ ), 1 + len(lowerCamelCase_ ) + 1 + len(lowerCamelCase_ )) , )
| 364 | """simple docstring"""
import unittest
from transformers import DebertaVaTokenizer, DebertaVaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
__A = get_tests_dir("fixtures/spiece.model")
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase (_UpperCAmelCase ,unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase :Dict = DebertaVaTokenizer
_UpperCAmelCase :Tuple = DebertaVaTokenizerFast
_UpperCAmelCase :int = True
_UpperCAmelCase :int = True
def _snake_case ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
lowercase__: List[Any] = DebertaVaTokenizer(_UpperCAmelCase , unk_token='''<unk>''' )
tokenizer.save_pretrained(self.tmpdirname )
def _snake_case ( self , _UpperCAmelCase ):
lowercase__: List[str] = '''this is a test'''
lowercase__: int = '''this is a test'''
return input_text, output_text
def _snake_case ( self ):
lowercase__: Optional[int] = '''<pad>'''
lowercase__: Optional[int] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_UpperCAmelCase ) , _UpperCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_UpperCAmelCase ) , _UpperCAmelCase )
def _snake_case ( self ):
lowercase__: Union[str, Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<pad>''' )
self.assertEqual(vocab_keys[1] , '''<unk>''' )
self.assertEqual(vocab_keys[-1] , '''[PAD]''' )
self.assertEqual(len(_UpperCAmelCase ) , 30001 )
def _snake_case ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 30000 )
def _snake_case ( self ):
# fmt: off
lowercase__: int = ''' \tHeLLo!how \n Are yoU? '''
lowercase__: List[str] = ['''▁hello''', '''!''', '''how''', '''▁are''', '''▁you''', '''?''']
# fmt: on
lowercase__: Any = DebertaVaTokenizer(_UpperCAmelCase , do_lower_case=_UpperCAmelCase )
lowercase__: Union[str, Any] = tokenizer.convert_ids_to_tokens(tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
lowercase__: Optional[Any] = DebertaVaTokenizerFast(_UpperCAmelCase , do_lower_case=_UpperCAmelCase )
lowercase__: Optional[Any] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
@unittest.skip('''There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.''' )
def _snake_case ( self ):
pass
@unittest.skip('''There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.''' )
def _snake_case ( self ):
pass
def _snake_case ( self ):
# fmt: off
lowercase__: Dict = '''I was born in 92000, and this is falsé.'''
lowercase__: str = ['''▁''', '''<unk>''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', '''▁''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''▁''', '''.''', ]
# fmt: on
lowercase__: Union[str, Any] = DebertaVaTokenizer(_UpperCAmelCase , split_by_punct=_UpperCAmelCase )
lowercase__: str = tokenizer.convert_ids_to_tokens(tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
lowercase__: Tuple = DebertaVaTokenizerFast(_UpperCAmelCase , split_by_punct=_UpperCAmelCase )
lowercase__: Union[str, Any] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
def _snake_case ( self ):
# fmt: off
lowercase__: Any = '''I was born in 92000, and this is falsé.'''
lowercase__: str = ['''▁i''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', '''▁''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''▁''', '''.''', ]
# fmt: on
lowercase__: Optional[int] = DebertaVaTokenizer(_UpperCAmelCase , do_lower_case=_UpperCAmelCase , split_by_punct=_UpperCAmelCase )
lowercase__: List[Any] = tokenizer.convert_ids_to_tokens(tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
lowercase__: Optional[int] = DebertaVaTokenizerFast(_UpperCAmelCase , do_lower_case=_UpperCAmelCase , split_by_punct=_UpperCAmelCase )
lowercase__: Tuple = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
def _snake_case ( self ):
# fmt: off
lowercase__: List[str] = '''I was born in 92000, and this is falsé.'''
lowercase__: List[str] = ['''▁i''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''.''', ]
# fmt: on
lowercase__: Union[str, Any] = DebertaVaTokenizer(_UpperCAmelCase , do_lower_case=_UpperCAmelCase , split_by_punct=_UpperCAmelCase )
lowercase__: Union[str, Any] = tokenizer.convert_ids_to_tokens(tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
lowercase__: int = DebertaVaTokenizerFast(_UpperCAmelCase , do_lower_case=_UpperCAmelCase , split_by_punct=_UpperCAmelCase )
lowercase__: Tuple = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
def _snake_case ( self ):
# fmt: off
lowercase__: Union[str, Any] = '''I was born in 92000, and this is falsé.'''
lowercase__: int = ['''▁''', '''<unk>''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', '''▁''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''▁''', '''.''', ]
# fmt: on
lowercase__: Optional[int] = DebertaVaTokenizer(_UpperCAmelCase , do_lower_case=_UpperCAmelCase , split_by_punct=_UpperCAmelCase )
lowercase__: Dict = tokenizer.convert_ids_to_tokens(tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
lowercase__: Union[str, Any] = DebertaVaTokenizerFast(_UpperCAmelCase , do_lower_case=_UpperCAmelCase , split_by_punct=_UpperCAmelCase )
lowercase__: Dict = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
def _snake_case ( self ):
# fmt: off
lowercase__: Optional[int] = ''' \tHeLLo!how \n Are yoU? '''
lowercase__: str = ['''▁''', '''<unk>''', '''e''', '''<unk>''', '''o''', '''!''', '''how''', '''▁''', '''<unk>''', '''re''', '''▁yo''', '''<unk>''', '''?''']
# fmt: on
lowercase__: Dict = DebertaVaTokenizer(_UpperCAmelCase , do_lower_case=_UpperCAmelCase , split_by_punct=_UpperCAmelCase )
lowercase__: List[str] = tokenizer.convert_ids_to_tokens(tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
lowercase__: Optional[Any] = DebertaVaTokenizerFast(_UpperCAmelCase , do_lower_case=_UpperCAmelCase , split_by_punct=_UpperCAmelCase )
lowercase__: List[str] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
def _snake_case ( self ):
lowercase__: int = self.get_tokenizer()
lowercase__: List[Any] = self.get_rust_tokenizer()
lowercase__: List[str] = '''I was born in 92000, and this is falsé.'''
lowercase__: Any = tokenizer.convert_ids_to_tokens(tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) )
lowercase__: List[str] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
lowercase__: Dict = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
lowercase__: Tuple = rust_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
lowercase__: Any = self.get_rust_tokenizer()
lowercase__: str = tokenizer.encode(_UpperCAmelCase )
lowercase__: Any = rust_tokenizer.encode(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
def _snake_case ( self ):
lowercase__: Optional[Any] = '''This is a test'''
lowercase__: str = [13, 1, 4398, 25, 21, 1289]
lowercase__: List[Any] = ['''▁''', '''T''', '''his''', '''▁is''', '''▁a''', '''▁test''']
lowercase__: Any = ['''▁''', '''<unk>''', '''his''', '''▁is''', '''▁a''', '''▁test''']
lowercase__: int = DebertaVaTokenizer(_UpperCAmelCase , keep_accents=_UpperCAmelCase )
lowercase__: int = DebertaVaTokenizerFast(_UpperCAmelCase , keep_accents=_UpperCAmelCase )
lowercase__: Any = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
lowercase__: str = tokenizer.tokenize(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
lowercase__: Any = tokenizer.convert_ids_to_tokens(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
lowercase__: Union[str, Any] = rust_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
lowercase__: List[Any] = rust_tokenizer.tokenize(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
lowercase__: str = rust_tokenizer.convert_ids_to_tokens(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
# fmt: off
lowercase__: str = '''I was born in 92000, and this is falsé.'''
lowercase__: Dict = [13, 1, 23, 386, 19, 561, 3050, 15, 17, 48, 25, 8256, 18, 1, 9]
lowercase__: Tuple = ['''▁''', '''I''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''é''', '''.''', ]
lowercase__: Dict = ['''▁''', '''<unk>''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''.''', ]
# fmt: on
lowercase__: Optional[Any] = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
lowercase__: Dict = tokenizer.tokenize(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
lowercase__: Optional[Any] = tokenizer.convert_ids_to_tokens(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
lowercase__: List[Any] = rust_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
lowercase__: Dict = rust_tokenizer.tokenize(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
lowercase__: Optional[Any] = rust_tokenizer.convert_ids_to_tokens(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
def _snake_case ( self ):
lowercase__: Optional[int] = DebertaVaTokenizer(_UpperCAmelCase )
lowercase__: Optional[int] = tokenizer.encode('''sequence builders''' )
lowercase__: Optional[Any] = tokenizer.encode('''multi-sequence build''' )
lowercase__: Union[str, Any] = tokenizer.build_inputs_with_special_tokens(_UpperCAmelCase )
lowercase__: Dict = tokenizer.build_inputs_with_special_tokens(_UpperCAmelCase , _UpperCAmelCase )
self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] , _UpperCAmelCase )
self.assertEqual(
[tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [tokenizer.sep_token_id] , _UpperCAmelCase , )
@slow
def _snake_case ( self ):
# fmt: off
lowercase__: List[Any] = {'''input_ids''': [[1, 39867, 36, 19390, 486, 27, 35052, 81436, 18, 60685, 1225, 7, 35052, 81436, 18, 9367, 16899, 18, 15937, 53, 594, 773, 18, 16287, 30465, 36, 15937, 6, 41139, 38, 36979, 60763, 191, 6, 34132, 99, 6, 50538, 390, 43230, 6, 34132, 2779, 20850, 14, 699, 1072, 1194, 36, 382, 10901, 53, 7, 699, 1072, 2084, 36, 20422, 630, 53, 19, 105, 3049, 1896, 1053, 16899, 1506, 11, 37978, 4243, 7, 1237, 31869, 200, 16566, 654, 6, 35052, 81436, 7, 55630, 13593, 4, 2], [1, 26, 15011, 13, 667, 8, 1053, 18, 23611, 1237, 72356, 12820, 34, 104134, 1209, 35, 13313, 6627, 21, 202, 347, 7, 164, 2399, 11, 46, 4485, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 1232, 2864, 15785, 14951, 105, 5, 8581, 1250, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''token_type_ids''': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_UpperCAmelCase , model_name='''microsoft/deberta-v2-xlarge''' , revision='''ad6e42c1532ddf3a15c39246b63f5559d558b670''' , )
| 2 | 0 |
"""simple docstring"""
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .tokenization_wavaveca import WavaVecaCTCTokenizer
class UpperCAmelCase (_UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase :Any = """Wav2Vec2FeatureExtractor"""
_UpperCAmelCase :Tuple = """AutoTokenizer"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase ):
super().__init__(_lowercase , _lowercase )
lowercase__: Dict = self.feature_extractor
lowercase__: Optional[Any] = False
@classmethod
def _snake_case ( cls , _UpperCAmelCase , **_UpperCAmelCase ):
try:
return super().from_pretrained(_lowercase , **_lowercase )
except OSError:
warnings.warn(
F"""Loading a tokenizer inside {cls.__name__} from a config that does not"""
''' include a `tokenizer_class` attribute is deprecated and will be '''
'''removed in v5. Please add `\'tokenizer_class\': \'Wav2Vec2CTCTokenizer\'`'''
''' attribute to either your `config.json` or `tokenizer_config.json` '''
'''file to suppress this warning: ''' , _lowercase , )
lowercase__: Optional[Any] = WavaVecaFeatureExtractor.from_pretrained(_lowercase , **_lowercase )
lowercase__: List[Any] = WavaVecaCTCTokenizer.from_pretrained(_lowercase , **_lowercase )
return cls(feature_extractor=_lowercase , tokenizer=_lowercase )
def __call__( self , *_UpperCAmelCase , **_UpperCAmelCase ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*_lowercase , **_lowercase )
if "raw_speech" in kwargs:
warnings.warn('''Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.''' )
lowercase__: Tuple = kwargs.pop('''raw_speech''' )
else:
lowercase__: str = kwargs.pop('''audio''' , _lowercase )
lowercase__: int = kwargs.pop('''sampling_rate''' , _lowercase )
lowercase__: str = kwargs.pop('''text''' , _lowercase )
if len(_lowercase ) > 0:
lowercase__: int = args[0]
lowercase__: int = args[1:]
if audio is None and text is None:
raise ValueError('''You need to specify either an `audio` or `text` input to process.''' )
if audio is not None:
lowercase__: Any = self.feature_extractor(_lowercase , *_lowercase , sampling_rate=_lowercase , **_lowercase )
if text is not None:
lowercase__: Optional[int] = self.tokenizer(_lowercase , **_lowercase )
if text is None:
return inputs
elif audio is None:
return encodings
else:
lowercase__: List[Any] = encodings['''input_ids''']
return inputs
def _snake_case ( self , *_UpperCAmelCase , **_UpperCAmelCase ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor.pad(*_lowercase , **_lowercase )
lowercase__: Optional[Any] = kwargs.pop('''input_features''' , _lowercase )
lowercase__: Optional[Any] = kwargs.pop('''labels''' , _lowercase )
if len(_lowercase ) > 0:
lowercase__: Tuple = args[0]
lowercase__: List[str] = args[1:]
if input_features is not None:
lowercase__: List[str] = self.feature_extractor.pad(_lowercase , *_lowercase , **_lowercase )
if labels is not None:
lowercase__: Dict = self.tokenizer.pad(_lowercase , **_lowercase )
if labels is None:
return input_features
elif input_features is None:
return labels
else:
lowercase__: Tuple = labels['''input_ids''']
return input_features
def _snake_case ( self , *_UpperCAmelCase , **_UpperCAmelCase ):
return self.tokenizer.batch_decode(*_lowercase , **_lowercase )
def _snake_case ( self , *_UpperCAmelCase , **_UpperCAmelCase ):
return self.tokenizer.decode(*_lowercase , **_lowercase )
@contextmanager
def _snake_case ( self ):
warnings.warn(
'''`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '''
'''labels by using the argument `text` of the regular `__call__` method (either in the same call as '''
'''your audio inputs, or in a separate call.''' )
lowercase__: str = True
lowercase__: Optional[Any] = self.tokenizer
yield
lowercase__: Optional[int] = self.feature_extractor
lowercase__: Union[str, Any] = False
| 365 | """simple docstring"""
import unittest
from transformers import DonutProcessor
__A = "naver-clova-ix/donut-base"
class UpperCAmelCase (unittest.TestCase ):
"""simple docstring"""
def _snake_case ( self ):
lowercase__: int = DonutProcessor.from_pretrained(_UpperCAmelCase )
def _snake_case ( self ):
lowercase__: Tuple = {
'''name''': '''John Doe''',
'''age''': '''99''',
'''city''': '''Atlanta''',
'''state''': '''GA''',
'''zip''': '''30301''',
'''phone''': '''123-4567''',
'''nicknames''': [{'''nickname''': '''Johnny'''}, {'''nickname''': '''JD'''}],
}
lowercase__: Union[str, Any] = (
'''<s_name>John Doe</s_name><s_age>99</s_age><s_city>Atlanta</s_city>'''
'''<s_state>GA</s_state><s_zip>30301</s_zip><s_phone>123-4567</s_phone>'''
'''<s_nicknames><s_nickname>Johnny</s_nickname>'''
'''<sep/><s_nickname>JD</s_nickname></s_nicknames>'''
)
lowercase__: str = self.processor.tokenajson(_UpperCAmelCase )
self.assertDictEqual(_UpperCAmelCase , _UpperCAmelCase )
| 2 | 0 |
import logging
import os
import sys
import warnings
from dataclasses import dataclass, field
from random import randint
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import DatasetDict, load_dataset
import transformers
from transformers import (
AutoConfig,
AutoFeatureExtractor,
AutoModelForAudioClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
__A = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.31.0")
require_version("datasets>=1.14.0", "To fix: pip install -r examples/pytorch/audio-classification/requirements.txt")
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = 1_6_0_0_0 ) -> Optional[Any]:
lowercase__: Dict = int(round(sample_rate * max_length ) )
if len(SCREAMING_SNAKE_CASE__ ) <= sample_length:
return wav
lowercase__: Dict = randint(0 , len(SCREAMING_SNAKE_CASE__ ) - sample_length - 1 )
return wav[random_offset : random_offset + sample_length]
@dataclass
class UpperCAmelCase :
"""simple docstring"""
_UpperCAmelCase :Optional[str] = field(default=lowerCamelCase_ ,metadata={"help": "Name of a dataset from the datasets package"} )
_UpperCAmelCase :Optional[str] = field(
default=lowerCamelCase_ ,metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} )
_UpperCAmelCase :Optional[str] = field(
default=lowerCamelCase_ ,metadata={"help": "A file containing the training audio paths and labels."} )
_UpperCAmelCase :Optional[str] = field(
default=lowerCamelCase_ ,metadata={"help": "A file containing the validation audio paths and labels."} )
_UpperCAmelCase :str = field(
default="train" ,metadata={
"help": "The name of the training data set split to use (via the datasets library). Defaults to 'train'"
} ,)
_UpperCAmelCase :str = field(
default="validation" ,metadata={
"help": (
"The name of the training data set split to use (via the datasets library). Defaults to 'validation'"
)
} ,)
_UpperCAmelCase :str = field(
default="audio" ,metadata={"help": "The name of the dataset column containing the audio data. Defaults to 'audio'"} ,)
_UpperCAmelCase :str = field(
default="label" ,metadata={"help": "The name of the dataset column containing the labels. Defaults to 'label'"} )
_UpperCAmelCase :Optional[int] = field(
default=lowerCamelCase_ ,metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} ,)
_UpperCAmelCase :Optional[int] = field(
default=lowerCamelCase_ ,metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} ,)
_UpperCAmelCase :float = field(
default=20 ,metadata={"help": "Audio clips will be randomly cut to this length during training if the value is set."} ,)
@dataclass
class UpperCAmelCase :
"""simple docstring"""
_UpperCAmelCase :str = field(
default="facebook/wav2vec2-base" ,metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} ,)
_UpperCAmelCase :Optional[str] = field(
default=lowerCamelCase_ ,metadata={"help": "Pretrained config name or path if not the same as model_name"} )
_UpperCAmelCase :Optional[str] = field(
default=lowerCamelCase_ ,metadata={"help": "Where do you want to store the pretrained models downloaded from the Hub"} )
_UpperCAmelCase :str = field(
default="main" ,metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} ,)
_UpperCAmelCase :Optional[str] = field(
default=lowerCamelCase_ ,metadata={"help": "Name or path of preprocessor config."} )
_UpperCAmelCase :bool = field(
default=lowerCamelCase_ ,metadata={"help": "Whether to freeze the feature encoder layers of the model."} )
_UpperCAmelCase :bool = field(
default=lowerCamelCase_ ,metadata={"help": "Whether to generate an attention mask in the feature extractor."} )
_UpperCAmelCase :bool = field(
default=lowerCamelCase_ ,metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} ,)
_UpperCAmelCase :Optional[bool] = field(
default=lowerCamelCase_ ,metadata={"help": "Whether to freeze the feature extractor layers of the model."} )
_UpperCAmelCase :bool = field(
default=lowerCamelCase_ ,metadata={"help": "Will enable to load a pretrained model whose head dimensions are different."} ,)
def _snake_case ( self ):
if not self.freeze_feature_extractor and self.freeze_feature_encoder:
warnings.warn(
'''The argument `--freeze_feature_extractor` is deprecated and '''
'''will be removed in a future version. Use `--freeze_feature_encoder`'''
'''instead. Setting `freeze_feature_encoder==True`.''' , _UpperCAmelCase , )
if self.freeze_feature_extractor and not self.freeze_feature_encoder:
raise ValueError(
'''The argument `--freeze_feature_extractor` is deprecated and '''
'''should not be used in combination with `--freeze_feature_encoder`.'''
'''Only make use of `--freeze_feature_encoder`.''' )
def SCREAMING_SNAKE_CASE__ ( ) -> Optional[int]:
lowercase__: Union[str, Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowercase__, lowercase__, lowercase__: List[str] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowercase__, lowercase__, lowercase__: int = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('''run_audio_classification''' , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
lowercase__: Optional[int] = training_args.get_process_log_level()
logger.setLevel(SCREAMING_SNAKE_CASE__ )
transformers.utils.logging.set_verbosity(SCREAMING_SNAKE_CASE__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu} """
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Set seed before initializing model.
set_seed(training_args.seed )
# Detecting last checkpoint.
lowercase__: List[str] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowercase__: List[Any] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'''Use --overwrite_output_dir to train from scratch.''' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Initialize our dataset and prepare it for the audio classification task.
lowercase__: Optional[int] = DatasetDict()
lowercase__: str = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.train_split_name , use_auth_token=True if model_args.use_auth_token else None , )
lowercase__: List[str] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.eval_split_name , use_auth_token=True if model_args.use_auth_token else None , )
if data_args.audio_column_name not in raw_datasets["train"].column_names:
raise ValueError(
F"""--audio_column_name {data_args.audio_column_name} not found in dataset \'{data_args.dataset_name}\'. """
'''Make sure to set `--audio_column_name` to the correct audio column - one of '''
F"""{', '.join(raw_datasets['train'].column_names )}.""" )
if data_args.label_column_name not in raw_datasets["train"].column_names:
raise ValueError(
F"""--label_column_name {data_args.label_column_name} not found in dataset \'{data_args.dataset_name}\'. """
'''Make sure to set `--label_column_name` to the correct text column - one of '''
F"""{', '.join(raw_datasets['train'].column_names )}.""" )
# Setting `return_attention_mask=True` is the way to get a correctly masked mean-pooling over
# transformer outputs in the classifier, but it doesn't always lead to better accuracy
lowercase__: str = AutoFeatureExtractor.from_pretrained(
model_args.feature_extractor_name or model_args.model_name_or_path , return_attention_mask=model_args.attention_mask , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# `datasets` takes care of automatically loading and resampling the audio,
# so we just need to set the correct target sampling rate.
lowercase__: Any = raw_datasets.cast_column(
data_args.audio_column_name , datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate ) )
lowercase__: Optional[Any] = feature_extractor.model_input_names[0]
def train_transforms(__UpperCAmelCase ):
lowercase__: Any = []
for audio in batch[data_args.audio_column_name]:
lowercase__: Optional[int] = random_subsample(
audio['''array'''] , max_length=data_args.max_length_seconds , sample_rate=feature_extractor.sampling_rate )
subsampled_wavs.append(SCREAMING_SNAKE_CASE__ )
lowercase__: Optional[Any] = feature_extractor(SCREAMING_SNAKE_CASE__ , sampling_rate=feature_extractor.sampling_rate )
lowercase__: Tuple = {model_input_name: inputs.get(SCREAMING_SNAKE_CASE__ )}
lowercase__: List[Any] = list(batch[data_args.label_column_name] )
return output_batch
def val_transforms(__UpperCAmelCase ):
lowercase__: Tuple = [audio['''array'''] for audio in batch[data_args.audio_column_name]]
lowercase__: int = feature_extractor(SCREAMING_SNAKE_CASE__ , sampling_rate=feature_extractor.sampling_rate )
lowercase__: Optional[int] = {model_input_name: inputs.get(SCREAMING_SNAKE_CASE__ )}
lowercase__: Optional[int] = list(batch[data_args.label_column_name] )
return output_batch
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
lowercase__: Any = raw_datasets['''train'''].features[data_args.label_column_name].names
lowercase__, lowercase__: int = {}, {}
for i, label in enumerate(SCREAMING_SNAKE_CASE__ ):
lowercase__: Union[str, Any] = str(SCREAMING_SNAKE_CASE__ )
lowercase__: Union[str, Any] = label
# Load the accuracy metric from the datasets package
lowercase__: Optional[Any] = evaluate.load('''accuracy''' )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with
# `predictions` and `label_ids` fields) and has to return a dictionary string to float.
def compute_metrics(__UpperCAmelCase ):
lowercase__: List[Any] = np.argmax(eval_pred.predictions , axis=1 )
return metric.compute(predictions=SCREAMING_SNAKE_CASE__ , references=eval_pred.label_ids )
lowercase__: List[str] = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path , num_labels=len(SCREAMING_SNAKE_CASE__ ) , labelaid=SCREAMING_SNAKE_CASE__ , idalabel=SCREAMING_SNAKE_CASE__ , finetuning_task='''audio-classification''' , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
lowercase__: int = AutoModelForAudioClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=SCREAMING_SNAKE_CASE__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
# freeze the convolutional waveform encoder
if model_args.freeze_feature_encoder:
model.freeze_feature_encoder()
if training_args.do_train:
if data_args.max_train_samples is not None:
lowercase__: Any = (
raw_datasets['''train'''].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
raw_datasets["train"].set_transform(SCREAMING_SNAKE_CASE__ , output_all_columns=SCREAMING_SNAKE_CASE__ )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
lowercase__: Dict = (
raw_datasets['''eval'''].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
raw_datasets["eval"].set_transform(SCREAMING_SNAKE_CASE__ , output_all_columns=SCREAMING_SNAKE_CASE__ )
# Initialize our trainer
lowercase__: Optional[Any] = Trainer(
model=SCREAMING_SNAKE_CASE__ , args=SCREAMING_SNAKE_CASE__ , train_dataset=raw_datasets['''train'''] if training_args.do_train else None , eval_dataset=raw_datasets['''eval'''] if training_args.do_eval else None , compute_metrics=SCREAMING_SNAKE_CASE__ , tokenizer=SCREAMING_SNAKE_CASE__ , )
# Training
if training_args.do_train:
lowercase__: Tuple = None
if training_args.resume_from_checkpoint is not None:
lowercase__: Optional[int] = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowercase__: Tuple = last_checkpoint
lowercase__: Union[str, Any] = trainer.train(resume_from_checkpoint=SCREAMING_SNAKE_CASE__ )
trainer.save_model()
trainer.log_metrics('''train''' , train_result.metrics )
trainer.save_metrics('''train''' , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
lowercase__: Any = trainer.evaluate()
trainer.log_metrics('''eval''' , SCREAMING_SNAKE_CASE__ )
trainer.save_metrics('''eval''' , SCREAMING_SNAKE_CASE__ )
# Write model card and (optionally) push to hub
lowercase__: List[Any] = {
'''finetuned_from''': model_args.model_name_or_path,
'''tasks''': '''audio-classification''',
'''dataset''': data_args.dataset_name,
'''tags''': ['''audio-classification'''],
}
if training_args.push_to_hub:
trainer.push_to_hub(**SCREAMING_SNAKE_CASE__ )
else:
trainer.create_model_card(**SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
main()
| 366 | """simple docstring"""
import warnings
from ...utils import logging
from .image_processing_videomae import VideoMAEImageProcessor
__A = logging.get_logger(__name__)
class UpperCAmelCase (_UpperCAmelCase ):
"""simple docstring"""
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ):
warnings.warn(
'''The class VideoMAEFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use VideoMAEImageProcessor instead.''' , _UpperCAmelCase , )
super().__init__(*_UpperCAmelCase , **_UpperCAmelCase )
| 2 | 0 |
from __future__ import annotations
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase ) -> list[list[int]]:
lowercase__: list[list[int]] = []
lowercase__: list[int] = []
lowercase__: str = 0
lowercase__: Dict = sum(__UpperCAmelCase )
create_state_space_tree(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
return result
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) -> None:
if sum(__UpperCAmelCase ) > max_sum or (remaining_nums_sum + sum(__UpperCAmelCase )) < max_sum:
return
if sum(__UpperCAmelCase ) == max_sum:
result.append(__UpperCAmelCase )
return
for index in range(__UpperCAmelCase , len(__UpperCAmelCase ) ):
create_state_space_tree(
__UpperCAmelCase , __UpperCAmelCase , index + 1 , [*path, nums[index]] , __UpperCAmelCase , remaining_nums_sum - nums[index] , )
__A = [3, 3_4, 4, 1_2, 5, 2]
__A = 9
__A = generate_sum_of_subsets_soln(nums, max_sum)
print(*result)
| 367 | """simple docstring"""
import math
from typing import Any, Callable, List, Optional, Tuple, Union
import numpy as np
import torch
from ...models import TaFilmDecoder
from ...schedulers import DDPMScheduler
from ...utils import is_onnx_available, logging, randn_tensor
if is_onnx_available():
from ..onnx_utils import OnnxRuntimeModel
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
from .continous_encoder import SpectrogramContEncoder
from .notes_encoder import SpectrogramNotesEncoder
__A = logging.get_logger(__name__) # pylint: disable=invalid-name
__A = 2_5_6
class UpperCAmelCase (_UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase :int = ["melgan"]
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ):
super().__init__()
# From MELGAN
lowercase__: Union[str, Any] = math.log(1e-5 ) # Matches MelGAN training.
lowercase__: Union[str, Any] = 4.0 # Largest value for most examples
lowercase__: Union[str, Any] = 128
self.register_modules(
notes_encoder=_UpperCAmelCase , continuous_encoder=_UpperCAmelCase , decoder=_UpperCAmelCase , scheduler=_UpperCAmelCase , melgan=_UpperCAmelCase , )
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase=(-1.0, 1.0) , _UpperCAmelCase=False ):
lowercase__, lowercase__: int = output_range
if clip:
lowercase__: Any = torch.clip(_UpperCAmelCase , self.min_value , self.max_value )
# Scale to [0, 1].
lowercase__: Optional[int] = (features - self.min_value) / (self.max_value - self.min_value)
# Scale to [min_out, max_out].
return zero_one * (max_out - min_out) + min_out
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase=(-1.0, 1.0) , _UpperCAmelCase=False ):
lowercase__, lowercase__: str = input_range
lowercase__: Dict = torch.clip(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) if clip else outputs
# Scale to [0, 1].
lowercase__: Tuple = (outputs - min_out) / (max_out - min_out)
# Scale to [self.min_value, self.max_value].
return zero_one * (self.max_value - self.min_value) + self.min_value
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
lowercase__: List[str] = input_tokens > 0
lowercase__, lowercase__: str = self.notes_encoder(
encoder_input_tokens=_UpperCAmelCase , encoder_inputs_mask=_UpperCAmelCase )
lowercase__, lowercase__: Optional[int] = self.continuous_encoder(
encoder_inputs=_UpperCAmelCase , encoder_inputs_mask=_UpperCAmelCase )
return [(tokens_encoded, tokens_mask), (continuous_encoded, continuous_mask)]
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
lowercase__: Tuple = noise_time
if not torch.is_tensor(_UpperCAmelCase ):
lowercase__: Tuple = torch.tensor([timesteps] , dtype=torch.long , device=input_tokens.device )
elif torch.is_tensor(_UpperCAmelCase ) and len(timesteps.shape ) == 0:
lowercase__: str = timesteps[None].to(input_tokens.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
lowercase__: Dict = timesteps * torch.ones(input_tokens.shape[0] , dtype=timesteps.dtype , device=timesteps.device )
lowercase__: Union[str, Any] = self.decoder(
encodings_and_masks=_UpperCAmelCase , decoder_input_tokens=_UpperCAmelCase , decoder_noise_time=_UpperCAmelCase )
return logits
@torch.no_grad()
def __call__( self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = 100 , _UpperCAmelCase = True , _UpperCAmelCase = "numpy" , _UpperCAmelCase = None , _UpperCAmelCase = 1 , ):
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(_UpperCAmelCase , _UpperCAmelCase ) or callback_steps <= 0)
):
raise ValueError(
F"""`callback_steps` has to be a positive integer but is {callback_steps} of type"""
F""" {type(_UpperCAmelCase )}.""" )
lowercase__: List[str] = np.zeros([1, TARGET_FEATURE_LENGTH, self.n_dims] , dtype=np.floataa )
lowercase__: Any = np.zeros([1, 0, self.n_dims] , np.floataa )
lowercase__: Tuple = torch.ones((1, TARGET_FEATURE_LENGTH) , dtype=_UpperCAmelCase , device=self.device )
for i, encoder_input_tokens in enumerate(_UpperCAmelCase ):
if i == 0:
lowercase__: str = torch.from_numpy(pred_mel[:1].copy() ).to(
device=self.device , dtype=self.decoder.dtype )
# The first chunk has no previous context.
lowercase__: Optional[int] = torch.zeros((1, TARGET_FEATURE_LENGTH) , dtype=_UpperCAmelCase , device=self.device )
else:
# The full song pipeline does not feed in a context feature, so the mask
# will be all 0s after the feature converter. Because we know we're
# feeding in a full context chunk from the previous prediction, set it
# to all 1s.
lowercase__: Union[str, Any] = ones
lowercase__: str = self.scale_features(
_UpperCAmelCase , output_range=[-1.0, 1.0] , clip=_UpperCAmelCase )
lowercase__: Dict = self.encode(
input_tokens=torch.IntTensor([encoder_input_tokens] ).to(device=self.device ) , continuous_inputs=_UpperCAmelCase , continuous_mask=_UpperCAmelCase , )
# Sample encoder_continuous_inputs shaped gaussian noise to begin loop
lowercase__: int = randn_tensor(
shape=encoder_continuous_inputs.shape , generator=_UpperCAmelCase , device=self.device , dtype=self.decoder.dtype , )
# set step values
self.scheduler.set_timesteps(_UpperCAmelCase )
# Denoising diffusion loop
for j, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
lowercase__: List[Any] = self.decode(
encodings_and_masks=_UpperCAmelCase , input_tokens=_UpperCAmelCase , noise_time=t / self.scheduler.config.num_train_timesteps , )
# Compute previous output: x_t -> x_t-1
lowercase__: Union[str, Any] = self.scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , generator=_UpperCAmelCase ).prev_sample
lowercase__: int = self.scale_to_features(_UpperCAmelCase , input_range=[-1.0, 1.0] )
lowercase__: Dict = mel[:1]
lowercase__: List[Any] = mel.cpu().float().numpy()
lowercase__: Optional[int] = np.concatenate([full_pred_mel, pred_mel[:1]] , axis=1 )
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(_UpperCAmelCase , _UpperCAmelCase )
logger.info('''Generated segment''' , _UpperCAmelCase )
if output_type == "numpy" and not is_onnx_available():
raise ValueError(
'''Cannot return output in \'np\' format if ONNX is not available. Make sure to have ONNX installed or set \'output_type\' to \'mel\'.''' )
elif output_type == "numpy" and self.melgan is None:
raise ValueError(
'''Cannot return output in \'np\' format if melgan component is not defined. Make sure to define `self.melgan` or set \'output_type\' to \'mel\'.''' )
if output_type == "numpy":
lowercase__: Tuple = self.melgan(input_features=full_pred_mel.astype(np.floataa ) )
else:
lowercase__: Dict = full_pred_mel
if not return_dict:
return (output,)
return AudioPipelineOutput(audios=_UpperCAmelCase )
| 2 | 0 |
from __future__ import annotations
from decimal import Decimal
from numpy import array
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> list[list[float]]:
lowercase__: Tuple = Decimal
# Check if the provided matrix has 2 rows and 2 columns
# since this implementation only works for 2x2 matrices
if len(a__ ) == 2 and len(matrix[0] ) == 2 and len(matrix[1] ) == 2:
# Calculate the determinant of the matrix
lowercase__: Union[str, Any] = float(
d(matrix[0][0] ) * d(matrix[1][1] ) - d(matrix[1][0] ) * d(matrix[0][1] ) )
if determinant == 0:
raise ValueError('''This matrix has no inverse.''' )
# Creates a copy of the matrix with swapped positions of the elements
lowercase__: str = [[0.0, 0.0], [0.0, 0.0]]
lowercase__, lowercase__: Union[str, Any] = matrix[1][1], matrix[0][0]
lowercase__, lowercase__: str = -matrix[1][0], -matrix[0][1]
# Calculate the inverse of the matrix
return [
[(float(d(a__ ) ) / determinant) or 0.0 for n in row] for row in swapped_matrix
]
elif (
len(a__ ) == 3
and len(matrix[0] ) == 3
and len(matrix[1] ) == 3
and len(matrix[2] ) == 3
):
# Calculate the determinant of the matrix using Sarrus rule
lowercase__: Union[str, Any] = float(
(
(d(matrix[0][0] ) * d(matrix[1][1] ) * d(matrix[2][2] ))
+ (d(matrix[0][1] ) * d(matrix[1][2] ) * d(matrix[2][0] ))
+ (d(matrix[0][2] ) * d(matrix[1][0] ) * d(matrix[2][1] ))
)
- (
(d(matrix[0][2] ) * d(matrix[1][1] ) * d(matrix[2][0] ))
+ (d(matrix[0][1] ) * d(matrix[1][0] ) * d(matrix[2][2] ))
+ (d(matrix[0][0] ) * d(matrix[1][2] ) * d(matrix[2][1] ))
) )
if determinant == 0:
raise ValueError('''This matrix has no inverse.''' )
# Creating cofactor matrix
lowercase__: Dict = [
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
]
lowercase__: Optional[Any] = (d(matrix[1][1] ) * d(matrix[2][2] )) - (
d(matrix[1][2] ) * d(matrix[2][1] )
)
lowercase__: List[Any] = -(
(d(matrix[1][0] ) * d(matrix[2][2] )) - (d(matrix[1][2] ) * d(matrix[2][0] ))
)
lowercase__: List[str] = (d(matrix[1][0] ) * d(matrix[2][1] )) - (
d(matrix[1][1] ) * d(matrix[2][0] )
)
lowercase__: Dict = -(
(d(matrix[0][1] ) * d(matrix[2][2] )) - (d(matrix[0][2] ) * d(matrix[2][1] ))
)
lowercase__: List[Any] = (d(matrix[0][0] ) * d(matrix[2][2] )) - (
d(matrix[0][2] ) * d(matrix[2][0] )
)
lowercase__: str = -(
(d(matrix[0][0] ) * d(matrix[2][1] )) - (d(matrix[0][1] ) * d(matrix[2][0] ))
)
lowercase__: List[str] = (d(matrix[0][1] ) * d(matrix[1][2] )) - (
d(matrix[0][2] ) * d(matrix[1][1] )
)
lowercase__: str = -(
(d(matrix[0][0] ) * d(matrix[1][2] )) - (d(matrix[0][2] ) * d(matrix[1][0] ))
)
lowercase__: Optional[int] = (d(matrix[0][0] ) * d(matrix[1][1] )) - (
d(matrix[0][1] ) * d(matrix[1][0] )
)
# Transpose the cofactor matrix (Adjoint matrix)
lowercase__: Union[str, Any] = array(a__ )
for i in range(3 ):
for j in range(3 ):
lowercase__: Optional[Any] = cofactor_matrix[j][i]
# Inverse of the matrix using the formula (1/determinant) * adjoint matrix
lowercase__: Optional[int] = array(a__ )
for i in range(3 ):
for j in range(3 ):
inverse_matrix[i][j] /= d(a__ )
# Calculate the inverse of the matrix
return [[float(d(a__ ) ) or 0.0 for n in row] for row in inverse_matrix]
raise ValueError('''Please provide a matrix of size 2x2 or 3x3.''' )
| 368 | """simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional
from packaging import version
if TYPE_CHECKING:
from ... import PreTrainedTokenizer, TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import is_torch_available, logging
__A = logging.get_logger(__name__)
__A = {
"bigscience/bloom": "https://huggingface.co/bigscience/bloom/resolve/main/config.json",
"bigscience/bloom-560m": "https://huggingface.co/bigscience/bloom-560m/blob/main/config.json",
"bigscience/bloom-1b1": "https://huggingface.co/bigscience/bloom-1b1/blob/main/config.json",
"bigscience/bloom-1b7": "https://huggingface.co/bigscience/bloom-1b7/blob/main/config.json",
"bigscience/bloom-3b": "https://huggingface.co/bigscience/bloom-3b/blob/main/config.json",
"bigscience/bloom-7b1": "https://huggingface.co/bigscience/bloom-7b1/blob/main/config.json",
}
class UpperCAmelCase (_UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase :str = "bloom"
_UpperCAmelCase :List[str] = ["past_key_values"]
_UpperCAmelCase :Optional[Any] = {
"num_hidden_layers": "n_layer",
"num_attention_heads": "n_head",
}
def __init__( self , _UpperCAmelCase=250880 , _UpperCAmelCase=64 , _UpperCAmelCase=2 , _UpperCAmelCase=8 , _UpperCAmelCase=1e-5 , _UpperCAmelCase=0.02 , _UpperCAmelCase=True , _UpperCAmelCase=1 , _UpperCAmelCase=2 , _UpperCAmelCase=False , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.0 , _UpperCAmelCase=1 , _UpperCAmelCase=False , **_UpperCAmelCase , ):
lowercase__: Any = vocab_size
# Backward compatibility with n_embed kwarg
lowercase__: Optional[Any] = kwargs.pop('''n_embed''' , _UpperCAmelCase )
lowercase__: int = hidden_size if n_embed is None else n_embed
lowercase__: int = n_layer
lowercase__: int = n_head
lowercase__: Optional[Any] = layer_norm_epsilon
lowercase__: int = initializer_range
lowercase__: List[Any] = use_cache
lowercase__: str = pretraining_tp
lowercase__: Tuple = apply_residual_connection_post_layernorm
lowercase__: int = hidden_dropout
lowercase__: Optional[Any] = attention_dropout
lowercase__: int = bos_token_id
lowercase__: Union[str, Any] = eos_token_id
lowercase__: Any = slow_but_exact
super().__init__(bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase )
class UpperCAmelCase (_UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase :int = version.parse("1.12" )
def __init__( self , _UpperCAmelCase , _UpperCAmelCase = "default" , _UpperCAmelCase = None , _UpperCAmelCase = False , ):
super().__init__(_UpperCAmelCase , task=_UpperCAmelCase , patching_specs=_UpperCAmelCase , use_past=_UpperCAmelCase )
if not getattr(self._config , '''pad_token_id''' , _UpperCAmelCase ):
# TODO: how to do that better?
lowercase__: Any = 0
@property
def _snake_case ( self ):
lowercase__: str = OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}} )
if self.use_past:
# BLOOM stores values on dynamic axis 2. For more details see: https://github.com/huggingface/transformers/pull/18344
self.fill_with_past_key_values_(_UpperCAmelCase , direction='''inputs''' , inverted_values_shape=_UpperCAmelCase )
lowercase__: List[str] = {0: '''batch''', 1: '''past_sequence + sequence'''}
else:
lowercase__: str = {0: '''batch''', 1: '''sequence'''}
return common_inputs
@property
def _snake_case ( self ):
return self._config.n_layer
@property
def _snake_case ( self ):
return self._config.n_head
@property
def _snake_case ( self ):
return 1e-3
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase = -1 , _UpperCAmelCase = -1 , _UpperCAmelCase = False , _UpperCAmelCase = None , ):
lowercase__: str = super(_UpperCAmelCase , self ).generate_dummy_inputs(
_UpperCAmelCase , batch_size=_UpperCAmelCase , seq_length=_UpperCAmelCase , is_pair=_UpperCAmelCase , framework=_UpperCAmelCase )
# We need to order the input in the way they appears in the forward()
lowercase__: List[Any] = OrderedDict({'''input_ids''': common_inputs['''input_ids''']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
lowercase__, lowercase__: Optional[Any] = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
lowercase__: Tuple = seqlen + 2
lowercase__: str = self._config.hidden_size // self.num_attention_heads
lowercase__: Optional[int] = (
batch * self.num_attention_heads,
head_dim,
past_key_values_length,
)
lowercase__: Union[str, Any] = (
batch * self.num_attention_heads,
past_key_values_length,
head_dim,
)
lowercase__: str = [
(torch.zeros(_UpperCAmelCase ), torch.zeros(_UpperCAmelCase )) for _ in range(self.num_layers )
]
lowercase__: Tuple = common_inputs['''attention_mask''']
if self.use_past:
lowercase__: int = ordered_inputs['''attention_mask'''].dtype
lowercase__: List[str] = torch.cat(
[ordered_inputs['''attention_mask'''], torch.ones(_UpperCAmelCase , _UpperCAmelCase , dtype=_UpperCAmelCase )] , dim=1 )
return ordered_inputs
@property
def _snake_case ( self ):
return 13
| 2 | 0 |
"""simple docstring"""
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
torch.set_grad_enabled(False)
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase=False ) -> Any:
lowercase__: Any = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"""module.blocks.{i}.norm1.weight""", F"""vit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""module.blocks.{i}.norm1.bias""", F"""vit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append(
(F"""module.blocks.{i}.attn.proj.weight""", F"""vit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((F"""module.blocks.{i}.attn.proj.bias""", F"""vit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((F"""module.blocks.{i}.norm2.weight""", F"""vit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""module.blocks.{i}.norm2.bias""", F"""vit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((F"""module.blocks.{i}.mlp.fc1.weight""", F"""vit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((F"""module.blocks.{i}.mlp.fc1.bias""", F"""vit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((F"""module.blocks.{i}.mlp.fc2.weight""", F"""vit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""module.blocks.{i}.mlp.fc2.bias""", F"""vit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
('''module.cls_token''', '''vit.embeddings.cls_token'''),
('''module.patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight'''),
('''module.patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias'''),
('''module.pos_embed''', '''vit.embeddings.position_embeddings'''),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('''module.norm.weight''', '''layernorm.weight'''),
('''module.norm.bias''', '''layernorm.bias'''),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
lowercase__: str = [(pair[0], pair[1][4:]) if pair[1].startswith('''vit''' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('''norm.weight''', '''vit.layernorm.weight'''),
('''norm.bias''', '''vit.layernorm.bias'''),
('''head.weight''', '''classifier.weight'''),
('''head.bias''', '''classifier.bias'''),
] )
return rename_keys
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=False ) -> Union[str, Any]:
for i in range(config.num_hidden_layers ):
if base_model:
lowercase__: Any = ""
else:
lowercase__: Optional[int] = "vit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowercase__: Optional[int] = state_dict.pop(F"""module.blocks.{i}.attn.qkv.weight""" )
lowercase__: List[str] = state_dict.pop(F"""module.blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
lowercase__: Union[str, Any] = in_proj_weight[
: config.hidden_size, :
]
lowercase__: str = in_proj_bias[: config.hidden_size]
lowercase__: List[str] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowercase__: Any = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowercase__: Any = in_proj_weight[
-config.hidden_size :, :
]
lowercase__: str = in_proj_bias[-config.hidden_size :]
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> List[Any]:
lowercase__: Tuple = ["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(_lowerCamelCase , _lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> List[str]:
lowercase__: List[Any] = [
"module.fc.fc1.weight",
"module.fc.fc1.bias",
"module.fc.bn1.weight",
"module.fc.bn1.bias",
"module.fc.bn1.running_mean",
"module.fc.bn1.running_var",
"module.fc.bn1.num_batches_tracked",
"module.fc.fc2.weight",
"module.fc.fc2.bias",
"module.fc.bn2.weight",
"module.fc.bn2.bias",
"module.fc.bn2.running_mean",
"module.fc.bn2.running_var",
"module.fc.bn2.num_batches_tracked",
"module.fc.fc3.weight",
"module.fc.fc3.bias",
]
for k in ignore_keys:
state_dict.pop(_lowerCamelCase , _lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Any:
lowercase__: str = dct.pop(_lowerCamelCase )
lowercase__: Union[str, Any] = val
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase ) -> List[Any]:
lowercase__: int = ViTMSNConfig()
lowercase__: Dict = 1_0_0_0
lowercase__: str = "datasets/huggingface/label-files"
lowercase__: Optional[int] = "imagenet-1k-id2label.json"
lowercase__: List[str] = json.load(open(hf_hub_download(_lowerCamelCase , _lowerCamelCase ) , '''r''' ) )
lowercase__: str = {int(_lowerCamelCase ): v for k, v in idalabel.items()}
lowercase__: int = idalabel
lowercase__: List[str] = {v: k for k, v in idalabel.items()}
if "s16" in checkpoint_url:
lowercase__: int = 3_8_4
lowercase__: Optional[int] = 1_5_3_6
lowercase__: str = 6
elif "l16" in checkpoint_url:
lowercase__: Optional[Any] = 1_0_2_4
lowercase__: str = 4_0_9_6
lowercase__: Any = 2_4
lowercase__: Optional[int] = 1_6
lowercase__: Union[str, Any] = 0.1
elif "b4" in checkpoint_url:
lowercase__: Optional[Any] = 4
elif "l7" in checkpoint_url:
lowercase__: str = 7
lowercase__: int = 1_0_2_4
lowercase__: int = 4_0_9_6
lowercase__: Union[str, Any] = 2_4
lowercase__: Optional[int] = 1_6
lowercase__: List[Any] = 0.1
lowercase__: str = ViTMSNModel(_lowerCamelCase )
lowercase__: Union[str, Any] = torch.hub.load_state_dict_from_url(_lowerCamelCase , map_location='''cpu''' )["target_encoder"]
lowercase__: Any = ViTImageProcessor(size=config.image_size )
remove_projection_head(_lowerCamelCase )
lowercase__: Tuple = create_rename_keys(_lowerCamelCase , base_model=_lowerCamelCase )
for src, dest in rename_keys:
rename_key(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
read_in_q_k_v(_lowerCamelCase , _lowerCamelCase , base_model=_lowerCamelCase )
model.load_state_dict(_lowerCamelCase )
model.eval()
lowercase__: Any = "http://images.cocodataset.org/val2017/000000039769.jpg"
lowercase__: Tuple = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw )
lowercase__: List[str] = ViTImageProcessor(
size=config.image_size , image_mean=_lowerCamelCase , image_std=_lowerCamelCase )
lowercase__: Tuple = image_processor(images=_lowerCamelCase , return_tensors='''pt''' )
# forward pass
torch.manual_seed(2 )
lowercase__: Optional[int] = model(**_lowerCamelCase )
lowercase__: List[str] = outputs.last_hidden_state
# The following Colab Notebook was used to generate these outputs:
# https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb
if "s16" in checkpoint_url:
lowercase__: Any = torch.tensor([[-1.0_9_1_5, -1.4_8_7_6, -1.1_8_0_9]] )
elif "b16" in checkpoint_url:
lowercase__: Optional[Any] = torch.tensor([[1_4.2_8_8_9, -1_8.9_0_4_5, 1_1.7_2_8_1]] )
elif "l16" in checkpoint_url:
lowercase__: List[str] = torch.tensor([[4_1.5_0_2_8, -2_2.8_6_8_1, 4_5.6_4_7_5]] )
elif "b4" in checkpoint_url:
lowercase__: str = torch.tensor([[-4.3_8_6_8, 5.2_9_3_2, -0.4_1_3_7]] )
else:
lowercase__: Optional[int] = torch.tensor([[-0.1_7_9_2, -0.6_4_6_5, 2.4_2_6_3]] )
# verify logits
assert torch.allclose(last_hidden_state[:, 0, :3] , _lowerCamelCase , atol=1e-4 )
print(F"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(_lowerCamelCase )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar",
type=str,
help="URL of the checkpoint you\'d like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
__A = parser.parse_args()
convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 369 | """simple docstring"""
import unittest
from transformers import MobileBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertModel,
)
class UpperCAmelCase :
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=7 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=99 , _UpperCAmelCase=64 , _UpperCAmelCase=32 , _UpperCAmelCase=5 , _UpperCAmelCase=4 , _UpperCAmelCase=37 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=512 , _UpperCAmelCase=16 , _UpperCAmelCase=2 , _UpperCAmelCase=0.02 , _UpperCAmelCase=3 , _UpperCAmelCase=4 , _UpperCAmelCase=None , ):
lowercase__: Dict = parent
lowercase__: Optional[int] = batch_size
lowercase__: List[str] = seq_length
lowercase__: Optional[int] = is_training
lowercase__: Dict = use_input_mask
lowercase__: List[Any] = use_token_type_ids
lowercase__: List[str] = use_labels
lowercase__: Union[str, Any] = vocab_size
lowercase__: str = hidden_size
lowercase__: Any = embedding_size
lowercase__: Any = num_hidden_layers
lowercase__: Any = num_attention_heads
lowercase__: List[Any] = intermediate_size
lowercase__: Dict = hidden_act
lowercase__: List[Any] = hidden_dropout_prob
lowercase__: Dict = attention_probs_dropout_prob
lowercase__: Optional[int] = max_position_embeddings
lowercase__: List[Any] = type_vocab_size
lowercase__: Tuple = type_sequence_label_size
lowercase__: Optional[int] = initializer_range
lowercase__: Dict = num_labels
lowercase__: int = num_choices
lowercase__: int = scope
def _snake_case ( self ):
lowercase__: List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase__: List[Any] = None
if self.use_input_mask:
lowercase__: Any = random_attention_mask([self.batch_size, self.seq_length] )
lowercase__: List[Any] = None
if self.use_token_type_ids:
lowercase__: Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase__: Optional[Any] = None
lowercase__: Any = None
lowercase__: str = None
if self.use_labels:
lowercase__: Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase__: List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase__: Tuple = ids_tensor([self.batch_size] , self.num_choices )
lowercase__: Any = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _snake_case ( self ):
return MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_UpperCAmelCase , initializer_range=self.initializer_range , )
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
lowercase__: int = MobileBertModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
lowercase__: List[str] = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase )
lowercase__: Dict = model(_UpperCAmelCase , token_type_ids=_UpperCAmelCase )
lowercase__: str = model(_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
lowercase__: Any = MobileBertForMaskedLM(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
lowercase__: Union[str, Any] = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
lowercase__: List[Any] = MobileBertForNextSentencePrediction(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
lowercase__: List[str] = model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
lowercase__: Any = MobileBertForPreTraining(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
lowercase__: str = model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase , next_sentence_label=_UpperCAmelCase , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
lowercase__: Any = MobileBertForQuestionAnswering(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
lowercase__: int = model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , start_positions=_UpperCAmelCase , end_positions=_UpperCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
lowercase__: str = self.num_labels
lowercase__: Any = MobileBertForSequenceClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
lowercase__: Tuple = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
lowercase__: str = self.num_labels
lowercase__: Union[str, Any] = MobileBertForTokenClassification(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
lowercase__: Optional[int] = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
lowercase__: Dict = self.num_choices
lowercase__: Union[str, Any] = MobileBertForMultipleChoice(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
lowercase__: List[str] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase__: Optional[Any] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase__: Optional[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase__: Optional[Any] = model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _snake_case ( self ):
lowercase__: Optional[int] = self.prepare_config_and_inputs()
(
(
lowercase__
), (
lowercase__
), (
lowercase__
), (
lowercase__
), (
lowercase__
), (
lowercase__
), (
lowercase__
),
): Union[str, Any] = config_and_inputs
lowercase__: Optional[Any] = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase (_UpperCAmelCase ,_UpperCAmelCase ,unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase :Tuple = (
(
MobileBertModel,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
)
if is_torch_available()
else ()
)
_UpperCAmelCase :Optional[Any] = (
{
"feature-extraction": MobileBertModel,
"fill-mask": MobileBertForMaskedLM,
"question-answering": MobileBertForQuestionAnswering,
"text-classification": MobileBertForSequenceClassification,
"token-classification": MobileBertForTokenClassification,
"zero-shot": MobileBertForSequenceClassification,
}
if is_torch_available()
else {}
)
_UpperCAmelCase :Optional[Any] = True
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=False ):
lowercase__: int = super()._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase , return_labels=_UpperCAmelCase )
if return_labels:
if model_class in get_values(_UpperCAmelCase ):
lowercase__: Any = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=_UpperCAmelCase )
lowercase__: Dict = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_UpperCAmelCase )
return inputs_dict
def _snake_case ( self ):
lowercase__: int = MobileBertModelTester(self )
lowercase__: Dict = ConfigTester(self , config_class=_UpperCAmelCase , hidden_size=37 )
def _snake_case ( self ):
self.config_tester.run_common_tests()
def _snake_case ( self ):
lowercase__: str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*_UpperCAmelCase )
def _snake_case ( self ):
lowercase__: int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*_UpperCAmelCase )
def _snake_case ( self ):
lowercase__: List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*_UpperCAmelCase )
def _snake_case ( self ):
lowercase__: str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*_UpperCAmelCase )
def _snake_case ( self ):
lowercase__: List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*_UpperCAmelCase )
def _snake_case ( self ):
lowercase__: Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*_UpperCAmelCase )
def _snake_case ( self ):
lowercase__: Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*_UpperCAmelCase )
def _snake_case ( self ):
lowercase__: Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> List[Any]:
return torch.tensor(
__UpperCAmelCase , dtype=torch.long , device=__UpperCAmelCase , )
__A = 1E-3
@require_torch
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase (unittest.TestCase ):
"""simple docstring"""
@slow
def _snake_case ( self ):
lowercase__: Tuple = MobileBertModel.from_pretrained('''google/mobilebert-uncased''' ).to(_UpperCAmelCase )
lowercase__: Tuple = _long_tensor([[101, 7110, 1005, 1056, 2023, 11333, 17413, 1029, 102]] )
with torch.no_grad():
lowercase__: Tuple = model(_UpperCAmelCase )[0]
lowercase__: Dict = torch.Size((1, 9, 512) )
self.assertEqual(output.shape , _UpperCAmelCase )
lowercase__: List[Any] = torch.tensor(
[
[
[-2.4_7_3_6_5_2_6e0_7, 8.2_6_9_1_6_5_6e0_4, 1.6_5_2_1_8_3_8e0_5],
[-5.7_5_4_1_7_0_4e-0_1, 3.9_0_5_6_0_2_2e0_0, 4.4_0_1_1_5_0_7e0_0],
[2.6_0_4_7_3_5_9e0_0, 1.5_6_7_7_6_5_2e0_0, -1.7_3_2_4_1_8_8e-0_1],
]
] , device=_UpperCAmelCase , )
# MobileBERT results range from 10e0 to 10e8. Even a 0.0000001% difference with a value of 10e8 results in a
# ~1 difference, it's therefore not a good idea to measure using addition.
# Here, we instead divide the expected result with the result in order to obtain ~1. We then check that the
# result is held between bounds: 1 - TOLERANCE < expected_result / result < 1 + TOLERANCE
lowercase__: int = torch.all((expected_slice / output[..., :3, :3]) >= 1 - TOLERANCE )
lowercase__: Optional[int] = torch.all((expected_slice / output[..., :3, :3]) <= 1 + TOLERANCE )
self.assertTrue(lower_bound and upper_bound )
| 2 | 0 |
"""simple docstring"""
from __future__ import annotations
class UpperCAmelCase :
"""simple docstring"""
def __init__( self , _UpperCAmelCase ):
lowercase__: List[str] = TypeError(
'''Matrices must be formed from a list of zero or more lists containing at '''
'''least one and the same number of values, each of which must be of type '''
'''int or float.''' )
if len(_SCREAMING_SNAKE_CASE ) != 0:
lowercase__: Union[str, Any] = len(rows[0] )
if cols == 0:
raise error
for row in rows:
if len(_SCREAMING_SNAKE_CASE ) != cols:
raise error
for value in row:
if not isinstance(_SCREAMING_SNAKE_CASE , (int, float) ):
raise error
lowercase__: int = rows
else:
lowercase__: Union[str, Any] = []
def _snake_case ( self ):
return [[row[i] for row in self.rows] for i in range(len(self.rows[0] ) )]
@property
def _snake_case ( self ):
return len(self.rows )
@property
def _snake_case ( self ):
return len(self.rows[0] )
@property
def _snake_case ( self ):
return (self.num_rows, self.num_columns)
@property
def _snake_case ( self ):
return self.order[0] == self.order[1]
def _snake_case ( self ):
lowercase__: Optional[Any] = [
[0 if column_num != row_num else 1 for column_num in range(self.num_rows )]
for row_num in range(self.num_rows )
]
return Matrix(_SCREAMING_SNAKE_CASE )
def _snake_case ( self ):
if not self.is_square:
return 0
if self.order == (0, 0):
return 1
if self.order == (1, 1):
return int(self.rows[0][0] )
if self.order == (2, 2):
return int(
(self.rows[0][0] * self.rows[1][1])
- (self.rows[0][1] * self.rows[1][0]) )
else:
return sum(
self.rows[0][column] * self.cofactors().rows[0][column]
for column in range(self.num_columns ) )
def _snake_case ( self ):
return bool(self.determinant() )
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase ):
lowercase__: Tuple = [
[
self.rows[other_row][other_column]
for other_column in range(self.num_columns )
if other_column != column
]
for other_row in range(self.num_rows )
if other_row != row
]
return Matrix(_SCREAMING_SNAKE_CASE ).determinant()
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase ):
if (row + column) % 2 == 0:
return self.get_minor(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return -1 * self.get_minor(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def _snake_case ( self ):
return Matrix(
[
[self.get_minor(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for column in range(self.num_columns )]
for row in range(self.num_rows )
] )
def _snake_case ( self ):
return Matrix(
[
[
self.minors().rows[row][column]
if (row + column) % 2 == 0
else self.minors().rows[row][column] * -1
for column in range(self.minors().num_columns )
]
for row in range(self.minors().num_rows )
] )
def _snake_case ( self ):
lowercase__: Optional[int] = [
[self.cofactors().rows[column][row] for column in range(self.num_columns )]
for row in range(self.num_rows )
]
return Matrix(_SCREAMING_SNAKE_CASE )
def _snake_case ( self ):
lowercase__: Dict = self.determinant()
if not determinant:
raise TypeError('''Only matrices with a non-zero determinant have an inverse''' )
return self.adjugate() * (1 / determinant)
def __repr__( self ):
return str(self.rows )
def __str__( self ):
if self.num_rows == 0:
return "[]"
if self.num_rows == 1:
return "[[" + ". ".join(str(self.rows[0] ) ) + "]]"
return (
"["
+ "\n ".join(
[
'''[''' + '''. '''.join([str(_SCREAMING_SNAKE_CASE ) for value in row] ) + '''.]'''
for row in self.rows
] )
+ "]"
)
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase = None ):
lowercase__: Optional[int] = TypeError('''Row must be a list containing all ints and/or floats''' )
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise type_error
for value in row:
if not isinstance(_SCREAMING_SNAKE_CASE , (int, float) ):
raise type_error
if len(_SCREAMING_SNAKE_CASE ) != self.num_columns:
raise ValueError(
'''Row must be equal in length to the other rows in the matrix''' )
if position is None:
self.rows.append(_SCREAMING_SNAKE_CASE )
else:
lowercase__: List[str] = self.rows[0:position] + [row] + self.rows[position:]
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase = None ):
lowercase__: Any = TypeError(
'''Column must be a list containing all ints and/or floats''' )
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise type_error
for value in column:
if not isinstance(_SCREAMING_SNAKE_CASE , (int, float) ):
raise type_error
if len(_SCREAMING_SNAKE_CASE ) != self.num_rows:
raise ValueError(
'''Column must be equal in length to the other columns in the matrix''' )
if position is None:
lowercase__: Optional[Any] = [self.rows[i] + [column[i]] for i in range(self.num_rows )]
else:
lowercase__: Optional[Any] = [
self.rows[i][0:position] + [column[i]] + self.rows[i][position:]
for i in range(self.num_rows )
]
def __eq__( self , _UpperCAmelCase ):
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
return NotImplemented
return self.rows == other.rows
def __ne__( self , _UpperCAmelCase ):
return not self == other
def __neg__( self ):
return self * -1
def __add__( self , _UpperCAmelCase ):
if self.order != other.order:
raise ValueError('''Addition requires matrices of the same order''' )
return Matrix(
[
[self.rows[i][j] + other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __sub__( self , _UpperCAmelCase ):
if self.order != other.order:
raise ValueError('''Subtraction requires matrices of the same order''' )
return Matrix(
[
[self.rows[i][j] - other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __mul__( self , _UpperCAmelCase ):
if isinstance(_SCREAMING_SNAKE_CASE , (int, float) ):
return Matrix(
[[int(element * other ) for element in row] for row in self.rows] )
elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
if self.num_columns != other.num_rows:
raise ValueError(
'''The number of columns in the first matrix must '''
'''be equal to the number of rows in the second''' )
return Matrix(
[
[Matrix.dot_product(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for column in other.columns()]
for row in self.rows
] )
else:
raise TypeError(
'''A Matrix can only be multiplied by an int, float, or another matrix''' )
def __pow__( self , _UpperCAmelCase ):
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise TypeError('''A Matrix can only be raised to the power of an int''' )
if not self.is_square:
raise ValueError('''Only square matrices can be raised to a power''' )
if other == 0:
return self.identity()
if other < 0:
if self.is_invertable():
return self.inverse() ** (-other)
raise ValueError(
'''Only invertable matrices can be raised to a negative power''' )
lowercase__: str = self
for _ in range(other - 1 ):
result *= self
return result
@classmethod
def _snake_case ( cls , _UpperCAmelCase , _UpperCAmelCase ):
return sum(row[i] * column[i] for i in range(len(_SCREAMING_SNAKE_CASE ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 370 | """simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
"microsoft/unispeech-sat-base-100h-libri-ft": (
"https://huggingface.co/microsoft/unispeech-sat-base-100h-libri-ft/resolve/main/config.json"
),
# See all UniSpeechSat models at https://huggingface.co/models?filter=unispeech_sat
}
class UpperCAmelCase (_UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase :Any = "unispeech-sat"
def __init__( self , _UpperCAmelCase=32 , _UpperCAmelCase=768 , _UpperCAmelCase=12 , _UpperCAmelCase=12 , _UpperCAmelCase=3072 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.02 , _UpperCAmelCase=1e-5 , _UpperCAmelCase="group" , _UpperCAmelCase="gelu" , _UpperCAmelCase=(512, 512, 512, 512, 512, 512, 512) , _UpperCAmelCase=(5, 2, 2, 2, 2, 2, 2) , _UpperCAmelCase=(10, 3, 3, 3, 3, 2, 2) , _UpperCAmelCase=False , _UpperCAmelCase=128 , _UpperCAmelCase=16 , _UpperCAmelCase=False , _UpperCAmelCase=True , _UpperCAmelCase=0.05 , _UpperCAmelCase=10 , _UpperCAmelCase=2 , _UpperCAmelCase=0.0 , _UpperCAmelCase=10 , _UpperCAmelCase=0 , _UpperCAmelCase=320 , _UpperCAmelCase=2 , _UpperCAmelCase=0.1 , _UpperCAmelCase=100 , _UpperCAmelCase=256 , _UpperCAmelCase=256 , _UpperCAmelCase=0.1 , _UpperCAmelCase="mean" , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=256 , _UpperCAmelCase=(512, 512, 512, 512, 1500) , _UpperCAmelCase=(5, 3, 3, 1, 1) , _UpperCAmelCase=(1, 2, 3, 1, 1) , _UpperCAmelCase=512 , _UpperCAmelCase=0 , _UpperCAmelCase=1 , _UpperCAmelCase=2 , _UpperCAmelCase=504 , **_UpperCAmelCase , ):
super().__init__(**_UpperCAmelCase , pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase )
lowercase__: Union[str, Any] = hidden_size
lowercase__: Union[str, Any] = feat_extract_norm
lowercase__: Any = feat_extract_activation
lowercase__: List[Any] = list(_UpperCAmelCase )
lowercase__: Optional[int] = list(_UpperCAmelCase )
lowercase__: int = list(_UpperCAmelCase )
lowercase__: Any = conv_bias
lowercase__: List[str] = num_conv_pos_embeddings
lowercase__: List[str] = num_conv_pos_embedding_groups
lowercase__: int = len(self.conv_dim )
lowercase__: Dict = num_hidden_layers
lowercase__: List[Any] = intermediate_size
lowercase__: Dict = hidden_act
lowercase__: Optional[Any] = num_attention_heads
lowercase__: Union[str, Any] = hidden_dropout
lowercase__: List[Any] = attention_dropout
lowercase__: str = activation_dropout
lowercase__: Optional[Any] = feat_proj_dropout
lowercase__: Optional[int] = final_dropout
lowercase__: Any = layerdrop
lowercase__: int = layer_norm_eps
lowercase__: Any = initializer_range
lowercase__: Union[str, Any] = vocab_size
lowercase__: Optional[Any] = num_clusters
lowercase__: Dict = do_stable_layer_norm
lowercase__: List[str] = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
F""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"""
F""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowercase__: Dict = apply_spec_augment
lowercase__: Union[str, Any] = mask_time_prob
lowercase__: List[str] = mask_time_length
lowercase__: Union[str, Any] = mask_time_min_masks
lowercase__: str = mask_feature_prob
lowercase__: Dict = mask_feature_length
lowercase__: List[Any] = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
lowercase__: Tuple = num_codevectors_per_group
lowercase__: Optional[Any] = num_codevector_groups
lowercase__: int = contrastive_logits_temperature
lowercase__: Any = feat_quantizer_dropout
lowercase__: int = num_negatives
lowercase__: Optional[Any] = codevector_dim
lowercase__: int = proj_codevector_dim
lowercase__: str = diversity_loss_weight
# ctc loss
lowercase__: int = ctc_loss_reduction
lowercase__: Union[str, Any] = ctc_zero_infinity
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
lowercase__: Optional[Any] = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
lowercase__: Union[str, Any] = list(_UpperCAmelCase )
lowercase__: Tuple = list(_UpperCAmelCase )
lowercase__: Union[str, Any] = list(_UpperCAmelCase )
lowercase__: Tuple = xvector_output_dim
@property
def _snake_case ( self ):
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 2 | 0 |
"""simple docstring"""
__A = [
[0, 1_6, 1_3, 0, 0, 0],
[0, 0, 1_0, 1_2, 0, 0],
[0, 4, 0, 0, 1_4, 0],
[0, 0, 9, 0, 0, 2_0],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> int:
lowercase__: Optional[Any] = [False] * len(lowerCamelCase__ )
lowercase__: Optional[Any] = [s]
lowercase__: int = True
while queue:
lowercase__: List[str] = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(lowerCamelCase__ )
lowercase__: int = True
lowercase__: List[str] = u
return visited[t]
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> List[Any]:
lowercase__: List[str] = [-1] * (len(lowerCamelCase__ ))
lowercase__: Any = 0
lowercase__: Dict = []
lowercase__: int = [i[:] for i in graph] # Record original cut, copy.
while bfs(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
lowercase__: Dict = float('''Inf''' )
lowercase__: Optional[Any] = sink
while s != source:
# Find the minimum value in select path
lowercase__: Union[str, Any] = min(lowerCamelCase__ , graph[parent[s]][s] )
lowercase__: Tuple = parent[s]
max_flow += path_flow
lowercase__: Optional[int] = sink
while v != source:
lowercase__: Optional[int] = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
lowercase__: Tuple = parent[v]
for i in range(len(lowerCamelCase__ ) ):
for j in range(len(graph[0] ) ):
if graph[i][j] == 0 and temp[i][j] > 0:
res.append((i, j) )
return res
if __name__ == "__main__":
print(mincut(test_graph, source=0, sink=5))
| 371 | """simple docstring"""
import argparse
import torch
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_from_original_stable_diffusion_ckpt
if __name__ == "__main__":
__A = argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_path", default=None, type=str, required=True, help="Path to the checkpoint to convert."
)
# !wget https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml
parser.add_argument(
"--original_config_file",
default=None,
type=str,
help="The YAML config file corresponding to the original architecture.",
)
parser.add_argument(
"--num_in_channels",
default=None,
type=int,
help="The number of input channels. If `None` number of input channels will be automatically inferred.",
)
parser.add_argument(
"--scheduler_type",
default="pndm",
type=str,
help="Type of scheduler to use. Should be one of ['pndm', 'lms', 'ddim', 'euler', 'euler-ancestral', 'dpm']",
)
parser.add_argument(
"--pipeline_type",
default=None,
type=str,
help=(
"The pipeline type. One of 'FrozenOpenCLIPEmbedder', 'FrozenCLIPEmbedder', 'PaintByExample'"
". If `None` pipeline will be automatically inferred."
),
)
parser.add_argument(
"--image_size",
default=None,
type=int,
help=(
"The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2"
" Base. Use 768 for Stable Diffusion v2."
),
)
parser.add_argument(
"--prediction_type",
default=None,
type=str,
help=(
"The prediction type that the model was trained on. Use 'epsilon' for Stable Diffusion v1.X and Stable"
" Diffusion v2 Base. Use 'v_prediction' for Stable Diffusion v2."
),
)
parser.add_argument(
"--extract_ema",
action="store_true",
help=(
"Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights"
" or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield"
" higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning."
),
)
parser.add_argument(
"--upcast_attention",
action="store_true",
help=(
"Whether the attention computation should always be upcasted. This is necessary when running stable"
" diffusion 2.1."
),
)
parser.add_argument(
"--from_safetensors",
action="store_true",
help="If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.",
)
parser.add_argument(
"--to_safetensors",
action="store_true",
help="Whether to store pipeline in safetensors format or not.",
)
parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.")
parser.add_argument("--device", type=str, help="Device to use (e.g. cpu, cuda:0, cuda:1, etc.)")
parser.add_argument(
"--stable_unclip",
type=str,
default=None,
required=False,
help="Set if this is a stable unCLIP model. One of 'txt2img' or 'img2img'.",
)
parser.add_argument(
"--stable_unclip_prior",
type=str,
default=None,
required=False,
help="Set if this is a stable unCLIP txt2img model. Selects which prior to use. If `--stable_unclip` is set to `txt2img`, the karlo prior (https://huggingface.co/kakaobrain/karlo-v1-alpha/tree/main/prior) is selected by default.",
)
parser.add_argument(
"--clip_stats_path",
type=str,
help="Path to the clip stats file. Only required if the stable unclip model's config specifies `model.params.noise_aug_config.params.clip_stats_path`.",
required=False,
)
parser.add_argument(
"--controlnet", action="store_true", default=None, help="Set flag if this is a controlnet checkpoint."
)
parser.add_argument("--half", action="store_true", help="Save weights in half precision.")
parser.add_argument(
"--vae_path",
type=str,
default=None,
required=False,
help="Set to a path, hub id to an already converted vae to not convert it again.",
)
__A = parser.parse_args()
__A = download_from_original_stable_diffusion_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
prediction_type=args.prediction_type,
model_type=args.pipeline_type,
extract_ema=args.extract_ema,
scheduler_type=args.scheduler_type,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
stable_unclip=args.stable_unclip,
stable_unclip_prior=args.stable_unclip_prior,
clip_stats_path=args.clip_stats_path,
controlnet=args.controlnet,
vae_path=args.vae_path,
)
if args.half:
pipe.to(torch_dtype=torch.floataa)
if args.controlnet:
# only save the controlnet model
pipe.controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
else:
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 2 | 0 |
"""simple docstring"""
from collections.abc import Sequence
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase ) -> float:
return sum(c * (x**i) for i, c in enumerate(_SCREAMING_SNAKE_CASE ) )
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase ) -> float:
lowercase__: Union[str, Any] = 0.0
for coeff in reversed(_SCREAMING_SNAKE_CASE ):
lowercase__: Union[str, Any] = result * x + coeff
return result
if __name__ == "__main__":
__A = (0.0, 0.0, 5.0, 9.3, 7.0)
__A = 10.0
print(evaluate_poly(poly, x))
print(horner(poly, x))
| 350 | """simple docstring"""
import warnings
from diffusers import StableDiffusionImgaImgPipeline # noqa F401
warnings.warn(
"The `image_to_image.py` script is outdated. Please use directly `from diffusers import"
" StableDiffusionImg2ImgPipeline` instead."
)
| 2 | 0 |
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase = 1_0_0_0 ) -> Tuple:
lowercase__: Any = 2**power
lowercase__: Optional[Any] = str(UpperCamelCase__ )
lowercase__: Union[str, Any] = list(UpperCamelCase__ )
lowercase__: Optional[int] = 0
for i in list_num:
sum_of_num += int(UpperCamelCase__ )
return sum_of_num
if __name__ == "__main__":
__A : str = int(input("Enter the power of 2: ").strip())
print("2 ^ ", power, " = ", 2**power)
__A : Any = solution(power)
print("Sum of the digits is: ", result)
| 351 | """simple docstring"""
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
"Salesforce/codegen-350M-nl": "https://huggingface.co/Salesforce/codegen-350M-nl/resolve/main/config.json",
"Salesforce/codegen-350M-multi": "https://huggingface.co/Salesforce/codegen-350M-multi/resolve/main/config.json",
"Salesforce/codegen-350M-mono": "https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/config.json",
"Salesforce/codegen-2B-nl": "https://huggingface.co/Salesforce/codegen-2B-nl/resolve/main/config.json",
"Salesforce/codegen-2B-multi": "https://huggingface.co/Salesforce/codegen-2B-multi/resolve/main/config.json",
"Salesforce/codegen-2B-mono": "https://huggingface.co/Salesforce/codegen-2B-mono/resolve/main/config.json",
"Salesforce/codegen-6B-nl": "https://huggingface.co/Salesforce/codegen-6B-nl/resolve/main/config.json",
"Salesforce/codegen-6B-multi": "https://huggingface.co/Salesforce/codegen-6B-multi/resolve/main/config.json",
"Salesforce/codegen-6B-mono": "https://huggingface.co/Salesforce/codegen-6B-mono/resolve/main/config.json",
"Salesforce/codegen-16B-nl": "https://huggingface.co/Salesforce/codegen-16B-nl/resolve/main/config.json",
"Salesforce/codegen-16B-multi": "https://huggingface.co/Salesforce/codegen-16B-multi/resolve/main/config.json",
"Salesforce/codegen-16B-mono": "https://huggingface.co/Salesforce/codegen-16B-mono/resolve/main/config.json",
}
class UpperCAmelCase (_UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase :List[str] = "codegen"
_UpperCAmelCase :Optional[int] = {
"max_position_embeddings": "n_positions",
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self , _UpperCAmelCase=50400 , _UpperCAmelCase=2048 , _UpperCAmelCase=2048 , _UpperCAmelCase=4096 , _UpperCAmelCase=28 , _UpperCAmelCase=16 , _UpperCAmelCase=64 , _UpperCAmelCase=None , _UpperCAmelCase="gelu_new" , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.0 , _UpperCAmelCase=1e-5 , _UpperCAmelCase=0.02 , _UpperCAmelCase=True , _UpperCAmelCase=50256 , _UpperCAmelCase=50256 , _UpperCAmelCase=False , **_UpperCAmelCase , ):
lowercase__: int = vocab_size
lowercase__: str = n_ctx
lowercase__: List[Any] = n_positions
lowercase__: Union[str, Any] = n_embd
lowercase__: Optional[Any] = n_layer
lowercase__: str = n_head
lowercase__: List[Any] = n_inner
lowercase__: Union[str, Any] = rotary_dim
lowercase__: Optional[Any] = activation_function
lowercase__: Union[str, Any] = resid_pdrop
lowercase__: Optional[int] = embd_pdrop
lowercase__: Optional[Any] = attn_pdrop
lowercase__: Optional[int] = layer_norm_epsilon
lowercase__: List[Any] = initializer_range
lowercase__: Tuple = use_cache
lowercase__: Any = bos_token_id
lowercase__: Any = eos_token_id
super().__init__(
bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , tie_word_embeddings=_UpperCAmelCase , **_UpperCAmelCase )
class UpperCAmelCase (_UpperCAmelCase ):
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase = "default" , _UpperCAmelCase = None , _UpperCAmelCase = False , ):
super().__init__(_UpperCAmelCase , task=_UpperCAmelCase , patching_specs=_UpperCAmelCase , use_past=_UpperCAmelCase )
if not getattr(self._config , '''pad_token_id''' , _UpperCAmelCase ):
# TODO: how to do that better?
lowercase__: Any = 0
@property
def _snake_case ( self ):
lowercase__: int = OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}} )
if self.use_past:
self.fill_with_past_key_values_(_UpperCAmelCase , direction='''inputs''' )
lowercase__: int = {0: '''batch''', 1: '''past_sequence + sequence'''}
else:
lowercase__: Tuple = {0: '''batch''', 1: '''sequence'''}
return common_inputs
@property
def _snake_case ( self ):
return self._config.n_layer
@property
def _snake_case ( self ):
return self._config.n_head
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase = -1 , _UpperCAmelCase = -1 , _UpperCAmelCase = False , _UpperCAmelCase = None , ):
lowercase__: Optional[int] = super(_UpperCAmelCase , self ).generate_dummy_inputs(
_UpperCAmelCase , batch_size=_UpperCAmelCase , seq_length=_UpperCAmelCase , is_pair=_UpperCAmelCase , framework=_UpperCAmelCase )
# We need to order the input in the way they appears in the forward()
lowercase__: List[Any] = OrderedDict({'''input_ids''': common_inputs['''input_ids''']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
lowercase__, lowercase__: Union[str, Any] = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
lowercase__: Any = seqlen + 2
lowercase__: List[str] = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
lowercase__: Optional[Any] = [
(torch.zeros(_UpperCAmelCase ), torch.zeros(_UpperCAmelCase )) for _ in range(self.num_layers )
]
lowercase__: Optional[Any] = common_inputs['''attention_mask''']
if self.use_past:
lowercase__: List[str] = ordered_inputs['''attention_mask'''].dtype
lowercase__: List[Any] = torch.cat(
[ordered_inputs['''attention_mask'''], torch.ones(_UpperCAmelCase , _UpperCAmelCase , dtype=_UpperCAmelCase )] , dim=1 )
return ordered_inputs
@property
def _snake_case ( self ):
return 13
| 2 | 0 |
"""simple docstring"""
from __future__ import annotations
import random
import unittest
from transformers import TransfoXLConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLModel,
)
class UpperCAmelCase :
"""simple docstring"""
def __init__( self , _UpperCAmelCase , ):
lowercase__: Dict = parent
lowercase__: Any = 13
lowercase__: Optional[int] = 7
lowercase__: Union[str, Any] = 30
lowercase__: Optional[Any] = self.seq_length + self.mem_len
lowercase__: Any = 15
lowercase__: Union[str, Any] = True
lowercase__: Tuple = True
lowercase__: str = 99
lowercase__: List[str] = [10, 50, 80]
lowercase__: Any = 32
lowercase__: List[str] = 32
lowercase__: Optional[Any] = 4
lowercase__: Any = 8
lowercase__: Dict = 128
lowercase__: Any = 2
lowercase__: Dict = 2
lowercase__: Union[str, Any] = None
lowercase__: Dict = 1
lowercase__: List[str] = 0
lowercase__: Dict = 3
lowercase__: List[Any] = self.vocab_size - 1
lowercase__: Optional[Any] = 0.01
def _snake_case ( self ):
lowercase__: Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase__: Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase__: List[Any] = None
if self.use_labels:
lowercase__: List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase__: Any = TransfoXLConfig(
vocab_size=self.vocab_size , mem_len=self.mem_len , clamp_len=self.clamp_len , cutoffs=self.cutoffs , d_model=self.hidden_size , d_embed=self.d_embed , n_head=self.num_attention_heads , d_head=self.d_head , d_inner=self.d_inner , div_val=self.div_val , n_layer=self.num_hidden_layers , eos_token_id=self.eos_token_id , pad_token_id=self.vocab_size - 1 , init_range=self.init_range , num_labels=self.num_labels , )
return (config, input_ids_a, input_ids_a, lm_labels)
def _snake_case ( self ):
random.seed(self.seed )
tf.random.set_seed(self.seed )
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
lowercase__: Any = TFTransfoXLModel(_UpperCAmelCase )
lowercase__, lowercase__: Union[str, Any] = model(_UpperCAmelCase ).to_tuple()
lowercase__: Optional[int] = {'''input_ids''': input_ids_a, '''mems''': mems_a}
lowercase__, lowercase__: Tuple = model(_UpperCAmelCase ).to_tuple()
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
lowercase__: Dict = TFTransfoXLLMHeadModel(_UpperCAmelCase )
lowercase__, lowercase__: Optional[Any] = model(_UpperCAmelCase ).to_tuple()
lowercase__: Tuple = {'''input_ids''': input_ids_a, '''labels''': lm_labels}
lowercase__, lowercase__: Any = model(_UpperCAmelCase ).to_tuple()
lowercase__, lowercase__: Dict = model([input_ids_a, mems_a] ).to_tuple()
lowercase__: Optional[Any] = {'''input_ids''': input_ids_a, '''mems''': mems_a, '''labels''': lm_labels}
lowercase__, lowercase__: int = model(_UpperCAmelCase ).to_tuple()
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
lowercase__: int = TFTransfoXLForSequenceClassification(_UpperCAmelCase )
lowercase__: Optional[Any] = model(_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _snake_case ( self ):
lowercase__: str = self.prepare_config_and_inputs()
((lowercase__), (lowercase__), (lowercase__), (lowercase__)): Dict = config_and_inputs
lowercase__: Tuple = {'''input_ids''': input_ids_a}
return config, inputs_dict
@require_tf
class UpperCAmelCase (_lowerCamelCase ,_lowerCamelCase ,unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase :Tuple = (
(TFTransfoXLModel, TFTransfoXLLMHeadModel, TFTransfoXLForSequenceClassification) if is_tf_available() else ()
)
_UpperCAmelCase :str = () if is_tf_available() else ()
_UpperCAmelCase :Union[str, Any] = (
{
"feature-extraction": TFTransfoXLModel,
"text-classification": TFTransfoXLForSequenceClassification,
"text-generation": TFTransfoXLLMHeadModel,
"zero-shot": TFTransfoXLForSequenceClassification,
}
if is_tf_available()
else {}
)
# TODO: add this test when TFTransfoXLLMHead has a linear output layer implemented
_UpperCAmelCase :Any = False
_UpperCAmelCase :str = False
_UpperCAmelCase :Tuple = False
_UpperCAmelCase :Optional[Any] = False
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
if pipeline_test_casse_name == "TextGenerationPipelineTests":
# Get `ValueError: AttributeError: 'NoneType' object has no attribute 'new_ones'` or `AssertionError`.
# `TransfoXLConfig` was never used in pipeline tests: cannot create a simple
# tokenizer.
return True
return False
def _snake_case ( self ):
lowercase__: List[str] = TFTransfoXLModelTester(self )
lowercase__: Optional[Any] = ConfigTester(self , config_class=_UpperCAmelCase , d_embed=37 )
def _snake_case ( self ):
self.config_tester.run_common_tests()
def _snake_case ( self ):
self.model_tester.set_seed()
lowercase__: List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_model(*_UpperCAmelCase )
def _snake_case ( self ):
self.model_tester.set_seed()
lowercase__: Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_lm_head(*_UpperCAmelCase )
def _snake_case ( self ):
lowercase__: List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_for_sequence_classification(*_UpperCAmelCase )
def _snake_case ( self ):
lowercase__, lowercase__: int = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__: str = [TFTransfoXLForSequenceClassification]
for model_class in self.all_model_classes:
lowercase__: Any = model_class(_UpperCAmelCase )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class in list_other_models_with_output_ebd:
lowercase__: Union[str, Any] = model.get_output_embeddings()
assert isinstance(_UpperCAmelCase , tf.keras.layers.Layer )
lowercase__: List[Any] = model.get_bias()
assert name is None
else:
lowercase__: List[str] = model.get_output_embeddings()
assert x is None
lowercase__: Tuple = model.get_bias()
assert name is None
def _snake_case ( self ):
pass
@slow
def _snake_case ( self ):
for model_name in TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__: List[Any] = TFTransfoXLModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
@unittest.skip(reason='''This model doesn\'t play well with fit() due to not returning a single loss.''' )
def _snake_case ( self ):
pass
@require_tf
class UpperCAmelCase (unittest.TestCase ):
"""simple docstring"""
@unittest.skip('''Skip test until #12651 is resolved.''' )
@slow
def _snake_case ( self ):
lowercase__: Any = TFTransfoXLLMHeadModel.from_pretrained('''transfo-xl-wt103''' )
# fmt: off
lowercase__: Dict = tf.convert_to_tensor([[33,1297,2,1,1009,4,1109,11739,4762,358,5,25,245,22,1706,17,20098,5,3215,21,37,1110,3,13,1041,4,24,603,490,2,71477,20098,104447,2,20961,1,2604,4,1,329,3,6224,831,16002,2,8,603,78967,29546,23,803,20,25,416,5,8,232,4,277,6,1855,4601,3,29546,54,8,3609,5,57211,49,4,1,277,18,8,1755,15691,3,341,25,416,693,42573,71,17,401,94,31,17919,2,29546,7873,18,1,435,23,11011,755,5,5167,3,7983,98,84,2,29546,3267,8,3609,4,1,4865,1075,2,6087,71,6,346,8,5854,3,29546,824,1400,1868,2,19,160,2,311,8,5496,2,20920,17,25,15097,3,24,24,0]] , dtype=tf.intaa ) # noqa: E231
# fmt: on
# In 1991 , the remains of Russian Tsar Nicholas II and his family
# ( except for Alexei and Maria ) are discovered .
# The voice of Nicholas's young son , Tsarevich Alexei Nikolaevich , narrates the
# remainder of the story . 1883 Western Siberia ,
# a young Grigori Rasputin is asked by his father and a group of men to perform magic .
# Rasputin has a vision and denounces one of the men as a horse thief . Although his
# father initially slaps him for making such an accusation , Rasputin watches as the
# man is chased outside and beaten . Twenty years later , Rasputin sees a vision of
# the Virgin Mary , prompting him to become a priest . Rasputin quickly becomes famous ,
# with people , even a bishop , begging for his blessing . <eod> </s> <eos>
# fmt: off
lowercase__: int = [33,1297,2,1,1009,4,1109,11739,4762,358,5,25,245,22,1706,17,20098,5,3215,21,37,1110,3,13,1041,4,24,603,490,2,71477,20098,104447,2,20961,1,2604,4,1,329,3,6224,831,16002,2,8,603,78967,29546,23,803,20,25,416,5,8,232,4,277,6,1855,4601,3,29546,54,8,3609,5,57211,49,4,1,277,18,8,1755,15691,3,341,25,416,693,42573,71,17,401,94,31,17919,2,29546,7873,18,1,435,23,11011,755,5,5167,3,7983,98,84,2,29546,3267,8,3609,4,1,4865,1075,2,6087,71,6,346,8,5854,3,29546,824,1400,1868,2,19,160,2,311,8,5496,2,20920,17,25,15097,3,24,24,0,33,1,1857,2,1,1009,4,1109,11739,4762,358,5,25,245,28,1110,3,13,1041,4,24,603,490,2,71477,20098,104447,2,20961,1,2604,4,1,329,3,0] # noqa: E231
# fmt: on
# In 1991, the remains of Russian Tsar Nicholas II and his family (
# except for Alexei and Maria ) are discovered. The voice of young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.
# 1883 Western Siberia, a young Grigori Rasputin is asked by his father
# and a group of men to perform magic. Rasputin has a vision and
# denounces one of the men as a horse thief. Although his father initially
# slaps him for making such an accusation, Rasputin watches as the man
# is chased outside and beaten. Twenty years later, Rasputin sees a vision
# of the Virgin Mary, prompting him to become a priest.
# Rasputin quickly becomes famous, with people, even a bishop, begging for
# his blessing. <unk> <unk> <eos> In the 1990s, the remains of Russian Tsar
# Nicholas II and his family were discovered. The voice of <unk> young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.<eos>
lowercase__: Optional[Any] = model.generate(_UpperCAmelCase , max_length=200 , do_sample=_UpperCAmelCase )
self.assertListEqual(output_ids[0].numpy().tolist() , _UpperCAmelCase )
| 352 | """simple docstring"""
from dataclasses import dataclass, field
from typing import Optional
from transformers import AutoConfig, AutoImageProcessor, AutoTokenizer, FlaxVisionEncoderDecoderModel, HfArgumentParser
@dataclass
class UpperCAmelCase :
"""simple docstring"""
_UpperCAmelCase :str = field(
metadata={"help": "The output directory where the model will be written."} ,)
_UpperCAmelCase :str = field(
metadata={
"help": (
"The encoder model checkpoint for weights initialization."
"Don't set if you want to train an encoder model from scratch."
)
} ,)
_UpperCAmelCase :str = field(
metadata={
"help": (
"The decoder model checkpoint for weights initialization."
"Don't set if you want to train a decoder model from scratch."
)
} ,)
_UpperCAmelCase :Optional[str] = field(
default=_UpperCAmelCase ,metadata={"help": "Pretrained encoder config name or path if not the same as encoder_model_name"} )
_UpperCAmelCase :Optional[str] = field(
default=_UpperCAmelCase ,metadata={"help": "Pretrained decoder config name or path if not the same as decoder_model_name"} )
def SCREAMING_SNAKE_CASE__ ( ) -> Tuple:
lowercase__: Dict = HfArgumentParser((ModelArguments,) )
((lowercase__), ): List[str] = parser.parse_args_into_dataclasses()
# Load pretrained model and tokenizer
# Use explicit specified encoder config
if model_args.encoder_config_name:
lowercase__: List[Any] = AutoConfig.from_pretrained(model_args.encoder_config_name )
# Use pretrained encoder model's config
else:
lowercase__: int = AutoConfig.from_pretrained(model_args.encoder_model_name_or_path )
# Use explicit specified decoder config
if model_args.decoder_config_name:
lowercase__: str = AutoConfig.from_pretrained(model_args.decoder_config_name )
# Use pretrained decoder model's config
else:
lowercase__: Union[str, Any] = AutoConfig.from_pretrained(model_args.decoder_model_name_or_path )
# necessary for `from_encoder_decoder_pretrained` when `decoder_config` is passed
lowercase__: Tuple = True
lowercase__: int = True
lowercase__: Any = FlaxVisionEncoderDecoderModel.from_encoder_decoder_pretrained(
encoder_pretrained_model_name_or_path=model_args.encoder_model_name_or_path , decoder_pretrained_model_name_or_path=model_args.decoder_model_name_or_path , encoder_config=__UpperCAmelCase , decoder_config=__UpperCAmelCase , )
# GPT2 only has bos/eos tokens but not decoder_start/pad tokens
lowercase__: int = decoder_config.decoder_start_token_id
lowercase__: Tuple = decoder_config.pad_token_id
if decoder_start_token_id is None:
lowercase__: Tuple = decoder_config.bos_token_id
if pad_token_id is None:
lowercase__: Optional[int] = decoder_config.eos_token_id
# This is necessary to make Flax's generate() work
lowercase__: Optional[Any] = decoder_config.eos_token_id
lowercase__: Tuple = decoder_start_token_id
lowercase__: Dict = pad_token_id
lowercase__: Optional[int] = AutoImageProcessor.from_pretrained(model_args.encoder_model_name_or_path )
lowercase__: Union[str, Any] = AutoTokenizer.from_pretrained(model_args.decoder_model_name_or_path )
lowercase__: Tuple = tokenizer.convert_ids_to_tokens(model.config.pad_token_id )
model.save_pretrained(model_args.output_dir )
image_processor.save_pretrained(model_args.output_dir )
tokenizer.save_pretrained(model_args.output_dir )
if __name__ == "__main__":
main()
| 2 | 0 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.utils import is_vision_available
from transformers.utils.generic import TensorType
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import logging
if is_vision_available():
import PIL
__A = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> List[List[ImageInput]]:
if isinstance(_lowerCAmelCase , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(_lowerCAmelCase , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(_lowerCAmelCase ):
return [[videos]]
raise ValueError(F"""Could not make batched video from {videos}""" )
class UpperCAmelCase (lowerCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase :List[str] = ["pixel_values"]
def __init__( self , _UpperCAmelCase = True , _UpperCAmelCase = None , _UpperCAmelCase = PILImageResampling.BILINEAR , _UpperCAmelCase = True , _UpperCAmelCase = None , _UpperCAmelCase = True , _UpperCAmelCase = 1 / 255 , _UpperCAmelCase = True , _UpperCAmelCase = True , _UpperCAmelCase = None , _UpperCAmelCase = None , **_UpperCAmelCase , ):
super().__init__(**_UpperCAmelCase )
lowercase__: str = size if size is not None else {"shortest_edge": 256}
lowercase__: str = get_size_dict(_UpperCAmelCase , default_to_square=_UpperCAmelCase )
lowercase__: Any = crop_size if crop_size is not None else {"height": 224, "width": 224}
lowercase__: Union[str, Any] = get_size_dict(_UpperCAmelCase , param_name='''crop_size''' )
lowercase__: int = do_resize
lowercase__: Union[str, Any] = size
lowercase__: List[Any] = do_center_crop
lowercase__: Dict = crop_size
lowercase__: List[Any] = resample
lowercase__: Optional[int] = do_rescale
lowercase__: Union[str, Any] = rescale_factor
lowercase__: Tuple = offset
lowercase__: Optional[int] = do_normalize
lowercase__: Dict = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowercase__: int = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = PILImageResampling.BILINEAR , _UpperCAmelCase = None , **_UpperCAmelCase , ):
lowercase__: Optional[Any] = get_size_dict(_UpperCAmelCase , default_to_square=_UpperCAmelCase )
if "shortest_edge" in size:
lowercase__: Any = get_resize_output_image_size(_UpperCAmelCase , size['''shortest_edge'''] , default_to_square=_UpperCAmelCase )
elif "height" in size and "width" in size:
lowercase__: Optional[int] = (size["height"], size["width"])
else:
raise ValueError(F"""Size must have 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}""" )
return resize(_UpperCAmelCase , size=_UpperCAmelCase , resample=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase )
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None , **_UpperCAmelCase , ):
lowercase__: str = get_size_dict(_UpperCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(F"""Size must have 'height' and 'width' as keys. Got {size.keys()}""" )
return center_crop(_UpperCAmelCase , size=(size['''height'''], size['''width''']) , data_format=_UpperCAmelCase , **_UpperCAmelCase )
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = True , _UpperCAmelCase = None , **_UpperCAmelCase , ):
lowercase__: List[Any] = image.astype(np.floataa )
if offset:
lowercase__: Optional[Any] = image - (scale / 2)
return rescale(_UpperCAmelCase , scale=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase )
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None , **_UpperCAmelCase , ):
return normalize(_UpperCAmelCase , mean=_UpperCAmelCase , std=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase )
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = ChannelDimension.FIRST , ):
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
if offset and not do_rescale:
raise ValueError('''For offset, do_rescale must also be set to True.''' )
# All transformations expect numpy arrays.
lowercase__: Dict = to_numpy_array(_UpperCAmelCase )
if do_resize:
lowercase__: Any = self.resize(image=_UpperCAmelCase , size=_UpperCAmelCase , resample=_UpperCAmelCase )
if do_center_crop:
lowercase__: Tuple = self.center_crop(_UpperCAmelCase , size=_UpperCAmelCase )
if do_rescale:
lowercase__: Optional[Any] = self.rescale(image=_UpperCAmelCase , scale=_UpperCAmelCase , offset=_UpperCAmelCase )
if do_normalize:
lowercase__: List[str] = self.normalize(image=_UpperCAmelCase , mean=_UpperCAmelCase , std=_UpperCAmelCase )
lowercase__: str = to_channel_dimension_format(_UpperCAmelCase , _UpperCAmelCase )
return image
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = ChannelDimension.FIRST , **_UpperCAmelCase , ):
lowercase__: str = do_resize if do_resize is not None else self.do_resize
lowercase__: int = resample if resample is not None else self.resample
lowercase__: Union[str, Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
lowercase__: Dict = do_rescale if do_rescale is not None else self.do_rescale
lowercase__: int = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase__: Any = offset if offset is not None else self.offset
lowercase__: Any = do_normalize if do_normalize is not None else self.do_normalize
lowercase__: str = image_mean if image_mean is not None else self.image_mean
lowercase__: str = image_std if image_std is not None else self.image_std
lowercase__: Optional[Any] = size if size is not None else self.size
lowercase__: List[Any] = get_size_dict(_UpperCAmelCase , default_to_square=_UpperCAmelCase )
lowercase__: Any = crop_size if crop_size is not None else self.crop_size
lowercase__: Dict = get_size_dict(_UpperCAmelCase , param_name='''crop_size''' )
if not valid_images(_UpperCAmelCase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
lowercase__: str = make_batched(_UpperCAmelCase )
lowercase__: str = [
[
self._preprocess_image(
image=_UpperCAmelCase , do_resize=_UpperCAmelCase , size=_UpperCAmelCase , resample=_UpperCAmelCase , do_center_crop=_UpperCAmelCase , crop_size=_UpperCAmelCase , do_rescale=_UpperCAmelCase , rescale_factor=_UpperCAmelCase , offset=_UpperCAmelCase , do_normalize=_UpperCAmelCase , image_mean=_UpperCAmelCase , image_std=_UpperCAmelCase , data_format=_UpperCAmelCase , )
for img in video
]
for video in videos
]
lowercase__: Tuple = {"pixel_values": videos}
return BatchFeature(data=_UpperCAmelCase , tensor_type=_UpperCAmelCase )
| 353 | """simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {"ctrl": "https://huggingface.co/ctrl/resolve/main/config.json"}
class UpperCAmelCase (_UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase :Optional[Any] = "ctrl"
_UpperCAmelCase :int = ["past_key_values"]
_UpperCAmelCase :Dict = {
"max_position_embeddings": "n_positions",
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self , _UpperCAmelCase=246534 , _UpperCAmelCase=256 , _UpperCAmelCase=1280 , _UpperCAmelCase=8192 , _UpperCAmelCase=48 , _UpperCAmelCase=16 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=1e-6 , _UpperCAmelCase=0.02 , _UpperCAmelCase=True , **_UpperCAmelCase , ):
lowercase__: Union[str, Any] = vocab_size
lowercase__: Optional[int] = n_positions
lowercase__: Optional[int] = n_embd
lowercase__: Any = n_layer
lowercase__: Any = n_head
lowercase__: int = dff
lowercase__: Dict = resid_pdrop
lowercase__: Any = embd_pdrop
lowercase__: Any = layer_norm_epsilon
lowercase__: Optional[int] = initializer_range
lowercase__: Dict = use_cache
super().__init__(**_UpperCAmelCase )
| 2 | 0 |
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase ) -> int:
return number | (1 << position)
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase ) -> int:
return number & ~(1 << position)
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase ) -> int:
return number ^ (1 << position)
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase ) -> bool:
return ((number >> position) & 1) == 1
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase ) -> int:
return int((number & (1 << position)) != 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 354 | """simple docstring"""
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase = 5_0 ) -> int:
lowercase__: str = [[0] * 3 for _ in range(length + 1 )]
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
different_colour_ways_number[row_length][tile_length - 2] += (
different_colour_ways_number[row_length - tile_start - tile_length][
tile_length - 2
]
+ 1
)
return sum(different_colour_ways_number[length] )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 2 | 0 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AlignProcessor, EfficientNetImageProcessor
@require_vision
class UpperCAmelCase (unittest.TestCase ):
"""simple docstring"""
def _snake_case ( self ):
lowercase__: Optional[Any] = tempfile.mkdtemp()
lowercase__: int = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
lowercase__: Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
lowercase__: Any = {
'''do_resize''': True,
'''size''': 20,
'''do_center_crop''': True,
'''crop_size''': 18,
'''do_normalize''': True,
'''image_mean''': [0.48_145_466, 0.4_578_275, 0.40_821_073],
'''image_std''': [0.26_862_954, 0.26_130_258, 0.27_577_711],
}
lowercase__: List[Any] = os.path.join(self.tmpdirname , lowercase_ )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(lowercase_ , lowercase_ )
def _snake_case ( self , **_UpperCAmelCase ):
return BertTokenizer.from_pretrained(self.tmpdirname , **lowercase_ )
def _snake_case ( self , **_UpperCAmelCase ):
return BertTokenizerFast.from_pretrained(self.tmpdirname , **lowercase_ )
def _snake_case ( self , **_UpperCAmelCase ):
return EfficientNetImageProcessor.from_pretrained(self.tmpdirname , **lowercase_ )
def _snake_case ( self ):
shutil.rmtree(self.tmpdirname )
def _snake_case ( self ):
lowercase__: Union[str, Any] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
lowercase__: Optional[int] = [Image.fromarray(np.moveaxis(lowercase_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _snake_case ( self ):
lowercase__: Optional[Any] = self.get_tokenizer()
lowercase__: str = self.get_rust_tokenizer()
lowercase__: Tuple = self.get_image_processor()
lowercase__: Any = AlignProcessor(tokenizer=lowercase_ , image_processor=lowercase_ )
processor_slow.save_pretrained(self.tmpdirname )
lowercase__: Any = AlignProcessor.from_pretrained(self.tmpdirname , use_fast=lowercase_ )
lowercase__: Any = AlignProcessor(tokenizer=lowercase_ , image_processor=lowercase_ )
processor_fast.save_pretrained(self.tmpdirname )
lowercase__: Any = AlignProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , lowercase_ )
self.assertIsInstance(processor_fast.tokenizer , lowercase_ )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , lowercase_ )
self.assertIsInstance(processor_fast.image_processor , lowercase_ )
def _snake_case ( self ):
lowercase__: List[Any] = AlignProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowercase__: int = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
lowercase__: Union[str, Any] = self.get_image_processor(do_normalize=lowercase_ , padding_value=1.0 )
lowercase__: Dict = AlignProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=lowercase_ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , lowercase_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowercase_ )
def _snake_case ( self ):
lowercase__: Union[str, Any] = self.get_image_processor()
lowercase__: str = self.get_tokenizer()
lowercase__: Optional[int] = AlignProcessor(tokenizer=lowercase_ , image_processor=lowercase_ )
lowercase__: Union[str, Any] = self.prepare_image_inputs()
lowercase__: Tuple = image_processor(lowercase_ , return_tensors='''np''' )
lowercase__: int = processor(images=lowercase_ , return_tensors='''np''' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def _snake_case ( self ):
lowercase__: Any = self.get_image_processor()
lowercase__: Optional[Any] = self.get_tokenizer()
lowercase__: Optional[int] = AlignProcessor(tokenizer=lowercase_ , image_processor=lowercase_ )
lowercase__: Optional[Any] = '''lower newer'''
lowercase__: str = processor(text=lowercase_ )
lowercase__: Optional[Any] = tokenizer(lowercase_ , padding='''max_length''' , max_length=64 )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _snake_case ( self ):
lowercase__: List[str] = self.get_image_processor()
lowercase__: Optional[Any] = self.get_tokenizer()
lowercase__: Optional[int] = AlignProcessor(tokenizer=lowercase_ , image_processor=lowercase_ )
lowercase__: Any = '''lower newer'''
lowercase__: Optional[Any] = self.prepare_image_inputs()
lowercase__: List[Any] = processor(text=lowercase_ , images=lowercase_ )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(lowercase_ ):
processor()
def _snake_case ( self ):
lowercase__: str = self.get_image_processor()
lowercase__: List[Any] = self.get_tokenizer()
lowercase__: Tuple = AlignProcessor(tokenizer=lowercase_ , image_processor=lowercase_ )
lowercase__: List[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowercase__: Any = processor.batch_decode(lowercase_ )
lowercase__: Optional[Any] = tokenizer.batch_decode(lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
def _snake_case ( self ):
lowercase__: Union[str, Any] = self.get_image_processor()
lowercase__: Optional[int] = self.get_tokenizer()
lowercase__: Dict = AlignProcessor(tokenizer=lowercase_ , image_processor=lowercase_ )
lowercase__: Tuple = '''lower newer'''
lowercase__: List[str] = self.prepare_image_inputs()
lowercase__: Optional[int] = processor(text=lowercase_ , images=lowercase_ )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 355 | """simple docstring"""
import pickle
import numpy as np
from matplotlib import pyplot as plt
class UpperCAmelCase :
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=0.2 , _UpperCAmelCase=0.2 ):
lowercase__: int = bp_numa
lowercase__: Union[str, Any] = bp_numa
lowercase__: List[str] = bp_numa
lowercase__: str = conva_get[:2]
lowercase__: Union[str, Any] = conva_get[2]
lowercase__: Any = size_pa
lowercase__: Optional[Any] = rate_w
lowercase__: Tuple = rate_t
lowercase__: List[str] = [
np.mat(-1 * np.random.rand(self.conva[0] , self.conva[0] ) + 0.5 )
for i in range(self.conva[1] )
]
lowercase__: Dict = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 )
lowercase__: str = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 )
lowercase__: Union[str, Any] = -2 * np.random.rand(self.conva[1] ) + 1
lowercase__: Any = -2 * np.random.rand(self.num_bpa ) + 1
lowercase__: Any = -2 * np.random.rand(self.num_bpa ) + 1
def _snake_case ( self , _UpperCAmelCase ):
# save model dict with pickle
lowercase__: int = {
'''num_bp1''': self.num_bpa,
'''num_bp2''': self.num_bpa,
'''num_bp3''': self.num_bpa,
'''conv1''': self.conva,
'''step_conv1''': self.step_conva,
'''size_pooling1''': self.size_poolinga,
'''rate_weight''': self.rate_weight,
'''rate_thre''': self.rate_thre,
'''w_conv1''': self.w_conva,
'''wkj''': self.wkj,
'''vji''': self.vji,
'''thre_conv1''': self.thre_conva,
'''thre_bp2''': self.thre_bpa,
'''thre_bp3''': self.thre_bpa,
}
with open(_UpperCAmelCase , '''wb''' ) as f:
pickle.dump(_UpperCAmelCase , _UpperCAmelCase )
print(F"""Model saved: {save_path}""" )
@classmethod
def _snake_case ( cls , _UpperCAmelCase ):
# read saved model
with open(_UpperCAmelCase , '''rb''' ) as f:
lowercase__: Optional[int] = pickle.load(_UpperCAmelCase ) # noqa: S301
lowercase__: Tuple = model_dic.get('''conv1''' )
conv_get.append(model_dic.get('''step_conv1''' ) )
lowercase__: Any = model_dic.get('''size_pooling1''' )
lowercase__: int = model_dic.get('''num_bp1''' )
lowercase__: Optional[int] = model_dic.get('''num_bp2''' )
lowercase__: str = model_dic.get('''num_bp3''' )
lowercase__: Any = model_dic.get('''rate_weight''' )
lowercase__: Union[str, Any] = model_dic.get('''rate_thre''' )
# create model instance
lowercase__: str = CNN(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# modify model parameter
lowercase__: Dict = model_dic.get('''w_conv1''' )
lowercase__: Dict = model_dic.get('''wkj''' )
lowercase__: str = model_dic.get('''vji''' )
lowercase__: List[Any] = model_dic.get('''thre_conv1''' )
lowercase__: Optional[int] = model_dic.get('''thre_bp2''' )
lowercase__: Tuple = model_dic.get('''thre_bp3''' )
return conv_ins
def _snake_case ( self , _UpperCAmelCase ):
return 1 / (1 + np.exp(-1 * x ))
def _snake_case ( self , _UpperCAmelCase ):
return round(_UpperCAmelCase , 3 )
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
# convolution process
lowercase__: Any = convs[0]
lowercase__: Tuple = convs[1]
lowercase__: List[Any] = np.shape(_UpperCAmelCase )[0]
# get the data slice of original image data, data_focus
lowercase__: List[Any] = []
for i_focus in range(0 , size_data - size_conv + 1 , _UpperCAmelCase ):
for j_focus in range(0 , size_data - size_conv + 1 , _UpperCAmelCase ):
lowercase__: Tuple = data[
i_focus : i_focus + size_conv, j_focus : j_focus + size_conv
]
data_focus.append(_UpperCAmelCase )
# calculate the feature map of every single kernel, and saved as list of matrix
lowercase__: Optional[int] = []
lowercase__: Optional[int] = int((size_data - size_conv) / conv_step + 1 )
for i_map in range(_UpperCAmelCase ):
lowercase__: str = []
for i_focus in range(len(_UpperCAmelCase ) ):
lowercase__: Any = (
np.sum(np.multiply(data_focus[i_focus] , w_convs[i_map] ) )
- thre_convs[i_map]
)
featuremap.append(self.sig(_UpperCAmelCase ) )
lowercase__: str = np.asmatrix(_UpperCAmelCase ).reshape(
_UpperCAmelCase , _UpperCAmelCase )
data_featuremap.append(_UpperCAmelCase )
# expanding the data slice to One dimenssion
lowercase__: Union[str, Any] = []
for each_focus in data_focus:
focusa_list.extend(self.Expand_Mat(_UpperCAmelCase ) )
lowercase__: Any = np.asarray(_UpperCAmelCase )
return focus_list, data_featuremap
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase="average_pool" ):
# pooling process
lowercase__: List[Any] = len(featuremaps[0] )
lowercase__: Any = int(size_map / size_pooling )
lowercase__: List[Any] = []
for i_map in range(len(_UpperCAmelCase ) ):
lowercase__: Any = featuremaps[i_map]
lowercase__: Tuple = []
for i_focus in range(0 , _UpperCAmelCase , _UpperCAmelCase ):
for j_focus in range(0 , _UpperCAmelCase , _UpperCAmelCase ):
lowercase__: Optional[Any] = feature_map[
i_focus : i_focus + size_pooling,
j_focus : j_focus + size_pooling,
]
if pooling_type == "average_pool":
# average pooling
map_pooled.append(np.average(_UpperCAmelCase ) )
elif pooling_type == "max_pooling":
# max pooling
map_pooled.append(np.max(_UpperCAmelCase ) )
lowercase__: str = np.asmatrix(_UpperCAmelCase ).reshape(_UpperCAmelCase , _UpperCAmelCase )
featuremap_pooled.append(_UpperCAmelCase )
return featuremap_pooled
def _snake_case ( self , _UpperCAmelCase ):
# expanding three dimension data to one dimension list
lowercase__: Optional[Any] = []
for i in range(len(_UpperCAmelCase ) ):
lowercase__: Any = np.shape(data[i] )
lowercase__: List[Any] = data[i].reshape(1 , shapes[0] * shapes[1] )
lowercase__: List[str] = data_listed.getA().tolist()[0]
data_expanded.extend(_UpperCAmelCase )
lowercase__: List[str] = np.asarray(_UpperCAmelCase )
return data_expanded
def _snake_case ( self , _UpperCAmelCase ):
# expanding matrix to one dimension list
lowercase__: Union[str, Any] = np.asarray(_UpperCAmelCase )
lowercase__: List[str] = np.shape(_UpperCAmelCase )
lowercase__: List[Any] = data_mat.reshape(1 , shapes[0] * shapes[1] )
return data_expanded
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
lowercase__: str = []
lowercase__: List[str] = 0
for i_map in range(_UpperCAmelCase ):
lowercase__: Union[str, Any] = np.ones((size_map, size_map) )
for i in range(0 , _UpperCAmelCase , _UpperCAmelCase ):
for j in range(0 , _UpperCAmelCase , _UpperCAmelCase ):
lowercase__: Optional[Any] = pd_pool[
i_pool
]
lowercase__: List[Any] = i_pool + 1
lowercase__: str = np.multiply(
_UpperCAmelCase , np.multiply(out_map[i_map] , (1 - out_map[i_map]) ) )
pd_all.append(_UpperCAmelCase )
return pd_all
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=bool ):
# model traning
print('''----------------------Start Training-------------------------''' )
print((''' - - Shape: Train_Data ''', np.shape(_UpperCAmelCase )) )
print((''' - - Shape: Teach_Data ''', np.shape(_UpperCAmelCase )) )
lowercase__: Tuple = 0
lowercase__: Tuple = []
lowercase__: Optional[int] = 10000
while rp < n_repeat and mse >= error_accuracy:
lowercase__: Tuple = 0
print(F"""-------------Learning Time {rp}--------------""" )
for p in range(len(_UpperCAmelCase ) ):
# print('------------Learning Image: %d--------------'%p)
lowercase__: List[Any] = np.asmatrix(datas_train[p] )
lowercase__: Optional[int] = np.asarray(datas_teach[p] )
lowercase__, lowercase__: List[str] = self.convolute(
_UpperCAmelCase , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
lowercase__: Optional[int] = self.pooling(_UpperCAmelCase , self.size_poolinga )
lowercase__: int = np.shape(_UpperCAmelCase )
lowercase__: Optional[Any] = self._expand(_UpperCAmelCase )
lowercase__: Any = data_bp_input
lowercase__: Any = np.dot(_UpperCAmelCase , self.vji.T ) - self.thre_bpa
lowercase__: str = self.sig(_UpperCAmelCase )
lowercase__: Optional[Any] = np.dot(_UpperCAmelCase , self.wkj.T ) - self.thre_bpa
lowercase__: Dict = self.sig(_UpperCAmelCase )
# --------------Model Leaning ------------------------
# calculate error and gradient---------------
lowercase__: str = np.multiply(
(data_teach - bp_outa) , np.multiply(_UpperCAmelCase , (1 - bp_outa) ) )
lowercase__: str = np.multiply(
np.dot(_UpperCAmelCase , self.wkj ) , np.multiply(_UpperCAmelCase , (1 - bp_outa) ) )
lowercase__: Dict = np.dot(_UpperCAmelCase , self.vji )
lowercase__: Any = pd_i_all / (self.size_poolinga * self.size_poolinga)
lowercase__: List[str] = pd_conva_pooled.T.getA().tolist()
lowercase__: Optional[Any] = self._calculate_gradient_from_pool(
_UpperCAmelCase , _UpperCAmelCase , shape_featuremapa[0] , shape_featuremapa[1] , self.size_poolinga , )
# weight and threshold learning process---------
# convolution layer
for k_conv in range(self.conva[1] ):
lowercase__: str = self._expand_mat(pd_conva_all[k_conv] )
lowercase__: str = self.rate_weight * np.dot(_UpperCAmelCase , _UpperCAmelCase )
lowercase__: Any = self.w_conva[k_conv] + delta_w.reshape(
(self.conva[0], self.conva[0]) )
lowercase__: List[Any] = (
self.thre_conva[k_conv]
- np.sum(pd_conva_all[k_conv] ) * self.rate_thre
)
# all connected layer
lowercase__: Optional[int] = self.wkj + pd_k_all.T * bp_outa * self.rate_weight
lowercase__: List[Any] = self.vji + pd_j_all.T * bp_outa * self.rate_weight
lowercase__: List[str] = self.thre_bpa - pd_k_all * self.rate_thre
lowercase__: Optional[Any] = self.thre_bpa - pd_j_all * self.rate_thre
# calculate the sum error of all single image
lowercase__: Optional[Any] = np.sum(abs(data_teach - bp_outa ) )
error_count += errors
# print(' ----Teach ',data_teach)
# print(' ----BP_output ',bp_out3)
lowercase__: str = rp + 1
lowercase__: Optional[Any] = error_count / patterns
all_mse.append(_UpperCAmelCase )
def draw_error():
lowercase__: Union[str, Any] = [error_accuracy for i in range(int(n_repeat * 1.2 ) )]
plt.plot(_UpperCAmelCase , '''+-''' )
plt.plot(_UpperCAmelCase , '''r--''' )
plt.xlabel('''Learning Times''' )
plt.ylabel('''All_mse''' )
plt.grid(_UpperCAmelCase , alpha=0.5 )
plt.show()
print('''------------------Training Complished---------------------''' )
print((''' - - Training epoch: ''', rp, F""" - - Mse: {mse:.6f}""") )
if draw_e:
draw_error()
return mse
def _snake_case ( self , _UpperCAmelCase ):
# model predict
lowercase__: Union[str, Any] = []
print('''-------------------Start Testing-------------------------''' )
print((''' - - Shape: Test_Data ''', np.shape(_UpperCAmelCase )) )
for p in range(len(_UpperCAmelCase ) ):
lowercase__: Union[str, Any] = np.asmatrix(datas_test[p] )
lowercase__, lowercase__: Any = self.convolute(
_UpperCAmelCase , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
lowercase__: List[str] = self.pooling(_UpperCAmelCase , self.size_poolinga )
lowercase__: str = self._expand(_UpperCAmelCase )
lowercase__: List[Any] = data_bp_input
lowercase__: List[str] = bp_outa * self.vji.T - self.thre_bpa
lowercase__: Any = self.sig(_UpperCAmelCase )
lowercase__: Optional[int] = bp_outa * self.wkj.T - self.thre_bpa
lowercase__: Any = self.sig(_UpperCAmelCase )
produce_out.extend(bp_outa.getA().tolist() )
lowercase__: str = [list(map(self.do_round , _UpperCAmelCase ) ) for each in produce_out]
return np.asarray(_UpperCAmelCase )
def _snake_case ( self , _UpperCAmelCase ):
# return the data of image after convoluting process so we can check it out
lowercase__: int = np.asmatrix(_UpperCAmelCase )
lowercase__, lowercase__: Optional[int] = self.convolute(
_UpperCAmelCase , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
lowercase__: List[Any] = self.pooling(_UpperCAmelCase , self.size_poolinga )
return data_conveda, data_pooleda
if __name__ == "__main__":
pass
| 2 | 0 |
import torch
from transformers import CamembertForMaskedLM, CamembertTokenizer
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=5 ) -> Optional[Any]:
# Adapted from https://github.com/pytorch/fairseq/blob/master/fairseq/models/roberta/hub_interface.py
assert masked_input.count('''<mask>''' ) == 1
lowercase__: str = torch.tensor(tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ ) ).unsqueeze(0 ) # Batch size 1
lowercase__: int = model(lowercase_ )[0] # The last hidden-state is the first element of the output tuple
lowercase__: Union[str, Any] = (input_ids.squeeze() == tokenizer.mask_token_id).nonzero().item()
lowercase__: Optional[Any] = logits[0, masked_index, :]
lowercase__: List[str] = logits.softmax(dim=0 )
lowercase__, lowercase__: Union[str, Any] = prob.topk(k=lowercase_ , dim=0 )
lowercase__: List[str] = ''' '''.join(
[tokenizer.convert_ids_to_tokens(indices[i].item() ) for i in range(len(lowercase_ ) )] )
lowercase__: str = tokenizer.mask_token
lowercase__: Optional[int] = []
for index, predicted_token_bpe in enumerate(topk_predicted_token_bpe.split(''' ''' ) ):
lowercase__: Optional[Any] = predicted_token_bpe.replace('''\u2581''' , ''' ''' )
if " {0}".format(lowercase_ ) in masked_input:
topk_filled_outputs.append(
(
masked_input.replace(''' {0}'''.format(lowercase_ ) , lowercase_ ),
values[index].item(),
predicted_token,
) )
else:
topk_filled_outputs.append(
(
masked_input.replace(lowercase_ , lowercase_ ),
values[index].item(),
predicted_token,
) )
return topk_filled_outputs
__A = CamembertTokenizer.from_pretrained("camembert-base")
__A = CamembertForMaskedLM.from_pretrained("camembert-base")
model.eval()
__A = '''Le camembert est <mask> :)'''
print(fill_mask(masked_input, model, tokenizer, topk=3))
| 356 | """simple docstring"""
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class UpperCAmelCase (_UpperCAmelCase ,unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase :Union[str, Any] = CTRLTokenizer
_UpperCAmelCase :Any = False
_UpperCAmelCase :List[Any] = False
def _snake_case ( self ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowercase__: Dict = ['''adapt''', '''re@@''', '''a@@''', '''apt''', '''c@@''', '''t''', '''<unk>''']
lowercase__: Any = dict(zip(_UpperCAmelCase , range(len(_UpperCAmelCase ) ) ) )
lowercase__: Optional[int] = ['''#version: 0.2''', '''a p''', '''ap t</w>''', '''r e''', '''a d''', '''ad apt</w>''', '''''']
lowercase__: Optional[Any] = {'''unk_token''': '''<unk>'''}
lowercase__: Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
lowercase__: int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_UpperCAmelCase ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(_UpperCAmelCase ) )
def _snake_case ( self , **_UpperCAmelCase ):
kwargs.update(self.special_tokens_map )
return CTRLTokenizer.from_pretrained(self.tmpdirname , **_UpperCAmelCase )
def _snake_case ( self , _UpperCAmelCase ):
lowercase__: Optional[int] = '''adapt react readapt apt'''
lowercase__: Optional[int] = '''adapt react readapt apt'''
return input_text, output_text
def _snake_case ( self ):
lowercase__: List[str] = CTRLTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
lowercase__: Optional[int] = '''adapt react readapt apt'''
lowercase__: Any = '''adapt re@@ a@@ c@@ t re@@ adapt apt'''.split()
lowercase__: Optional[Any] = tokenizer.tokenize(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
lowercase__: int = tokens + [tokenizer.unk_token]
lowercase__: str = [0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) , _UpperCAmelCase )
| 2 | 0 |
"""simple docstring"""
import inspect
import unittest
import numpy as np
from transformers import ViTConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
from transformers.models.vit.modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel
class UpperCAmelCase (unittest.TestCase ):
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=30 , _UpperCAmelCase=2 , _UpperCAmelCase=3 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=32 , _UpperCAmelCase=5 , _UpperCAmelCase=4 , _UpperCAmelCase=37 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=10 , _UpperCAmelCase=0.02 , ):
lowercase__: Optional[int] = parent
lowercase__: int = batch_size
lowercase__: str = image_size
lowercase__: Dict = patch_size
lowercase__: Dict = num_channels
lowercase__: Union[str, Any] = is_training
lowercase__: Optional[int] = use_labels
lowercase__: List[Any] = hidden_size
lowercase__: Dict = num_hidden_layers
lowercase__: int = num_attention_heads
lowercase__: Any = intermediate_size
lowercase__: List[Any] = hidden_act
lowercase__: Optional[int] = hidden_dropout_prob
lowercase__: List[str] = attention_probs_dropout_prob
lowercase__: Any = type_sequence_label_size
lowercase__: Optional[int] = initializer_range
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
lowercase__: int = (image_size // patch_size) ** 2
lowercase__: Dict = num_patches + 1
def _snake_case ( self ):
lowercase__: Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase__: List[Any] = ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowercase_ , initializer_range=self.initializer_range , )
return config, pixel_values
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase ):
lowercase__: Optional[Any] = FlaxViTModel(config=lowercase_ )
lowercase__: Dict = model(lowercase_ )
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token)
lowercase__: int = (self.image_size, self.image_size)
lowercase__: int = (self.patch_size, self.patch_size)
lowercase__: Union[str, Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, num_patches + 1, self.hidden_size) )
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase ):
lowercase__: Optional[int] = self.type_sequence_label_size
lowercase__: Any = FlaxViTForImageClassification(config=lowercase_ )
lowercase__: Any = model(lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowercase__: List[Any] = 1
lowercase__: Tuple = FlaxViTForImageClassification(lowercase_ )
lowercase__: Any = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase__: str = model(lowercase_ )
def _snake_case ( self ):
lowercase__: Any = self.prepare_config_and_inputs()
(
lowercase__
): Any = config_and_inputs
lowercase__: List[str] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_flax
class UpperCAmelCase (a_ ,unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase :List[str] = (FlaxViTModel, FlaxViTForImageClassification) if is_flax_available() else ()
def _snake_case ( self ):
lowercase__: Tuple = FlaxViTModelTester(self )
lowercase__: List[str] = ConfigTester(self , config_class=lowercase_ , has_text_modality=lowercase_ , hidden_size=37 )
def _snake_case ( self ):
self.config_tester.run_common_tests()
def _snake_case ( self ):
lowercase__: List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_ )
def _snake_case ( self ):
lowercase__: Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase_ )
def _snake_case ( self ):
lowercase__: Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__: Union[str, Any] = model_class(lowercase_ )
lowercase__: List[str] = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__: List[Any] = [*signature.parameters.keys()]
lowercase__: Any = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowercase_ )
def _snake_case ( self ):
lowercase__: Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
lowercase__: Dict = self._prepare_for_class(lowercase_ , lowercase_ )
lowercase__: Tuple = model_class(lowercase_ )
@jax.jit
def model_jitted(_UpperCAmelCase , **_UpperCAmelCase ):
return model(pixel_values=lowercase_ , **lowercase_ )
with self.subTest('''JIT Enabled''' ):
lowercase__: Optional[Any] = model_jitted(**lowercase_ ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
lowercase__: Optional[int] = model_jitted(**lowercase_ ).to_tuple()
self.assertEqual(len(lowercase_ ) , len(lowercase_ ) )
for jitted_output, output in zip(lowercase_ , lowercase_ ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def _snake_case ( self ):
for model_class_name in self.all_model_classes:
lowercase__: List[Any] = model_class_name.from_pretrained('''google/vit-base-patch16-224''' )
lowercase__: Optional[int] = model(np.ones((1, 3, 224, 224) ) )
self.assertIsNotNone(lowercase_ )
| 357 | """simple docstring"""
import os
import re
import shutil
from argparse import ArgumentParser, Namespace
from datasets.commands import BaseDatasetsCLICommand
from datasets.utils.logging import get_logger
__A = "<<<<<<< This should probably be modified because it mentions: "
__A = "=======\n>>>>>>>\n"
__A = [
"TextEncoderConfig",
"ByteTextEncoder",
"SubwordTextEncoder",
"encoder_config",
"maybe_build_from_corpus",
"manual_dir",
]
__A = [
# (pattern, replacement)
# Order is important here for some replacements
(R"tfds\.core", R"datasets"),
(R"tf\.io\.gfile\.GFile", R"open"),
(R"tf\.([\w\d]+)", R"datasets.Value('\1')"),
(R"tfds\.features\.Text\(\)", R"datasets.Value('string')"),
(R"tfds\.features\.Text\(", R"datasets.Value('string'),"),
(R"features\s*=\s*tfds.features.FeaturesDict\(", R"features=datasets.Features("),
(R"tfds\.features\.FeaturesDict\(", R"dict("),
(R"The TensorFlow Datasets Authors", R"The TensorFlow Datasets Authors and the HuggingFace Datasets Authors"),
(R"tfds\.", R"datasets."),
(R"dl_manager\.manual_dir", R"self.config.data_dir"),
(R"self\.builder_config", R"self.config"),
]
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> Tuple:
return ConvertCommand(args.tfds_path , args.datasets_directory )
class UpperCAmelCase (_UpperCAmelCase ):
"""simple docstring"""
@staticmethod
def _snake_case ( _UpperCAmelCase ):
lowercase__: int = parser.add_parser(
'''convert''' , help='''Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset.''' , )
train_parser.add_argument(
'''--tfds_path''' , type=_UpperCAmelCase , required=_UpperCAmelCase , help='''Path to a TensorFlow Datasets folder to convert or a single tfds file to convert.''' , )
train_parser.add_argument(
'''--datasets_directory''' , type=_UpperCAmelCase , required=_UpperCAmelCase , help='''Path to the HuggingFace Datasets folder.''' )
train_parser.set_defaults(func=_UpperCAmelCase )
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , *_UpperCAmelCase ):
lowercase__: List[str] = get_logger('''datasets-cli/converting''' )
lowercase__: Optional[Any] = tfds_path
lowercase__: Dict = datasets_directory
def _snake_case ( self ):
if os.path.isdir(self._tfds_path ):
lowercase__: Optional[Any] = os.path.abspath(self._tfds_path )
elif os.path.isfile(self._tfds_path ):
lowercase__: Optional[int] = os.path.dirname(self._tfds_path )
else:
raise ValueError('''--tfds_path is neither a directory nor a file. Please check path.''' )
lowercase__: int = os.path.abspath(self._datasets_directory )
self._logger.info(F"""Converting datasets from {abs_tfds_path} to {abs_datasets_path}""" )
lowercase__: Tuple = []
lowercase__: Dict = []
lowercase__: Any = {}
if os.path.isdir(self._tfds_path ):
lowercase__: Dict = os.listdir(_UpperCAmelCase )
else:
lowercase__: Dict = [os.path.basename(self._tfds_path )]
for f_name in file_names:
self._logger.info(F"""Looking at file {f_name}""" )
lowercase__: Tuple = os.path.join(_UpperCAmelCase , _UpperCAmelCase )
lowercase__: Optional[int] = os.path.join(_UpperCAmelCase , _UpperCAmelCase )
if not os.path.isfile(_UpperCAmelCase ) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name:
self._logger.info('''Skipping file''' )
continue
with open(_UpperCAmelCase , encoding='''utf-8''' ) as f:
lowercase__: Tuple = f.readlines()
lowercase__: Optional[Any] = []
lowercase__: Dict = False
lowercase__: List[str] = False
lowercase__: List[Any] = []
for line in lines:
lowercase__: List[str] = line
# Convert imports
if "import tensorflow.compat.v2 as tf" in out_line:
continue
elif "@tfds.core" in out_line:
continue
elif "builder=self" in out_line:
continue
elif "import tensorflow_datasets.public_api as tfds" in out_line:
lowercase__: Optional[int] = '''import datasets\n'''
elif "import tensorflow" in out_line:
# order is important here
lowercase__: Dict = ''''''
continue
elif "from absl import logging" in out_line:
lowercase__: Tuple = '''from datasets import logging\n'''
elif "getLogger" in out_line:
lowercase__: Optional[Any] = out_line.replace('''getLogger''' , '''get_logger''' )
elif any(expression in out_line for expression in TO_HIGHLIGHT ):
lowercase__: Any = True
lowercase__: str = list(filter(lambda _UpperCAmelCase : e in out_line , _UpperCAmelCase ) )
out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(_UpperCAmelCase ) + '''\n''' )
out_lines.append(_UpperCAmelCase )
out_lines.append(_UpperCAmelCase )
continue
else:
for pattern, replacement in TO_CONVERT:
lowercase__: List[Any] = re.sub(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# Take care of saving utilities (to later move them together with main script)
if "tensorflow_datasets" in out_line:
lowercase__: Any = re.match(r'''from\stensorflow_datasets.*import\s([^\.\r\n]+)''' , _UpperCAmelCase )
tfds_imports.extend(imp.strip() for imp in match.group(1 ).split(''',''' ) )
lowercase__: List[str] = '''from . import ''' + match.group(1 )
# Check we have not forget anything
if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line:
raise ValueError(F"""Error converting {out_line.strip()}""" )
if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line:
lowercase__: Optional[Any] = True
out_lines.append(_UpperCAmelCase )
if is_builder or "wmt" in f_name:
# We create a new directory for each dataset
lowercase__: Dict = f_name.replace('''.py''' , '''''' )
lowercase__: Dict = os.path.join(_UpperCAmelCase , _UpperCAmelCase )
lowercase__: Optional[Any] = os.path.join(_UpperCAmelCase , _UpperCAmelCase )
os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase )
self._logger.info(F"""Adding directory {output_dir}""" )
imports_to_builder_map.update({imp: output_dir for imp in tfds_imports} )
else:
# Utilities will be moved at the end
utils_files.append(_UpperCAmelCase )
if needs_manual_update:
with_manual_update.append(_UpperCAmelCase )
with open(_UpperCAmelCase , '''w''' , encoding='''utf-8''' ) as f:
f.writelines(_UpperCAmelCase )
self._logger.info(F"""Converted in {output_file}""" )
for utils_file in utils_files:
try:
lowercase__: str = os.path.basename(_UpperCAmelCase )
lowercase__: Union[str, Any] = imports_to_builder_map[f_name.replace('''.py''' , '''''' )]
self._logger.info(F"""Moving {dest_folder} to {utils_file}""" )
shutil.copy(_UpperCAmelCase , _UpperCAmelCase )
except KeyError:
self._logger.error(F"""Cannot find destination folder for {utils_file}. Please copy manually.""" )
if with_manual_update:
for file_path in with_manual_update:
self._logger.warning(
F"""You need to manually update file {file_path} to remove configurations using 'TextEncoderConfig'.""" )
| 2 | 0 |
# flake8: noqa
# Lint as: python3
__A = [
"VerificationMode",
"Version",
"disable_progress_bar",
"enable_progress_bar",
"is_progress_bar_enabled",
"experimental",
]
from .info_utils import VerificationMode
from .logging import disable_progress_bar, enable_progress_bar, is_progress_bar_enabled
from .version import Version
from .experimental import experimental
| 358 | """simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
"microsoft/cvt-13": "https://huggingface.co/microsoft/cvt-13/resolve/main/config.json",
# See all Cvt models at https://huggingface.co/models?filter=cvt
}
class UpperCAmelCase (_UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase :Tuple = "cvt"
def __init__( self , _UpperCAmelCase=3 , _UpperCAmelCase=[7, 3, 3] , _UpperCAmelCase=[4, 2, 2] , _UpperCAmelCase=[2, 1, 1] , _UpperCAmelCase=[64, 192, 384] , _UpperCAmelCase=[1, 3, 6] , _UpperCAmelCase=[1, 2, 10] , _UpperCAmelCase=[4.0, 4.0, 4.0] , _UpperCAmelCase=[0.0, 0.0, 0.0] , _UpperCAmelCase=[0.0, 0.0, 0.0] , _UpperCAmelCase=[0.0, 0.0, 0.1] , _UpperCAmelCase=[True, True, True] , _UpperCAmelCase=[False, False, True] , _UpperCAmelCase=["dw_bn", "dw_bn", "dw_bn"] , _UpperCAmelCase=[3, 3, 3] , _UpperCAmelCase=[1, 1, 1] , _UpperCAmelCase=[2, 2, 2] , _UpperCAmelCase=[1, 1, 1] , _UpperCAmelCase=[1, 1, 1] , _UpperCAmelCase=0.02 , _UpperCAmelCase=1e-1_2 , **_UpperCAmelCase , ):
super().__init__(**_UpperCAmelCase )
lowercase__: Dict = num_channels
lowercase__: str = patch_sizes
lowercase__: Optional[Any] = patch_stride
lowercase__: List[str] = patch_padding
lowercase__: Optional[Any] = embed_dim
lowercase__: Optional[int] = num_heads
lowercase__: Any = depth
lowercase__: str = mlp_ratio
lowercase__: Any = attention_drop_rate
lowercase__: Any = drop_rate
lowercase__: Optional[Any] = drop_path_rate
lowercase__: Dict = qkv_bias
lowercase__: Dict = cls_token
lowercase__: Any = qkv_projection_method
lowercase__: List[str] = kernel_qkv
lowercase__: Union[str, Any] = padding_kv
lowercase__: Optional[int] = stride_kv
lowercase__: int = padding_q
lowercase__: Dict = stride_q
lowercase__: Any = initializer_range
lowercase__: Union[str, Any] = layer_norm_eps
| 2 | 0 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import DistilBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.distilbert.modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertModel,
)
class UpperCAmelCase :
"""simple docstring"""
def __init__( self , _UpperCAmelCase , ):
lowercase__: List[Any] = parent
lowercase__: str = 13
lowercase__: Tuple = 7
lowercase__: Tuple = True
lowercase__: Union[str, Any] = True
lowercase__: Optional[Any] = False
lowercase__: Union[str, Any] = True
lowercase__: Union[str, Any] = 99
lowercase__: Any = 32
lowercase__: Optional[Any] = 2
lowercase__: Union[str, Any] = 4
lowercase__: int = 37
lowercase__: Any = """gelu"""
lowercase__: Tuple = 0.1
lowercase__: Optional[Any] = 0.1
lowercase__: List[str] = 512
lowercase__: Tuple = 16
lowercase__: Optional[Any] = 2
lowercase__: Any = 0.02
lowercase__: List[str] = 3
lowercase__: Dict = 4
lowercase__: Dict = None
def _snake_case ( self ):
lowercase__: List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase__: Dict = None
if self.use_input_mask:
lowercase__: List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
lowercase__: Tuple = None
lowercase__: Optional[Any] = None
lowercase__: Any = None
if self.use_labels:
lowercase__: List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase__: List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase__: str = ids_tensor([self.batch_size] , self.num_choices )
lowercase__: List[str] = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
lowercase__: List[str] = TFDistilBertModel(config=_SCREAMING_SNAKE_CASE )
lowercase__: List[Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask}
lowercase__: List[Any] = model(_SCREAMING_SNAKE_CASE )
lowercase__: List[str] = [input_ids, input_mask]
lowercase__: int = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
lowercase__: List[str] = TFDistilBertForMaskedLM(config=_SCREAMING_SNAKE_CASE )
lowercase__: Optional[Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask}
lowercase__: Union[str, Any] = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
lowercase__: Union[str, Any] = TFDistilBertForQuestionAnswering(config=_SCREAMING_SNAKE_CASE )
lowercase__: Optional[int] = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
}
lowercase__: Optional[int] = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
lowercase__: Union[str, Any] = self.num_labels
lowercase__: Optional[int] = TFDistilBertForSequenceClassification(_SCREAMING_SNAKE_CASE )
lowercase__: List[str] = {"""input_ids""": input_ids, """attention_mask""": input_mask}
lowercase__: Optional[int] = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
lowercase__: List[str] = self.num_choices
lowercase__: Tuple = TFDistilBertForMultipleChoice(_SCREAMING_SNAKE_CASE )
lowercase__: List[str] = tf.tile(tf.expand_dims(_SCREAMING_SNAKE_CASE , 1 ) , (1, self.num_choices, 1) )
lowercase__: Union[str, Any] = tf.tile(tf.expand_dims(_SCREAMING_SNAKE_CASE , 1 ) , (1, self.num_choices, 1) )
lowercase__: int = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
}
lowercase__: Optional[int] = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
lowercase__: Tuple = self.num_labels
lowercase__: Any = TFDistilBertForTokenClassification(_SCREAMING_SNAKE_CASE )
lowercase__: Any = {"""input_ids""": input_ids, """attention_mask""": input_mask}
lowercase__: List[Any] = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _snake_case ( self ):
lowercase__: Tuple = self.prepare_config_and_inputs()
(lowercase__): Dict = config_and_inputs
lowercase__: Any = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class UpperCAmelCase (UpperCAmelCase__ ,UpperCAmelCase__ ,unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase :List[Any] = (
(
TFDistilBertModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertForMultipleChoice,
)
if is_tf_available()
else None
)
_UpperCAmelCase :Tuple = (
{
'feature-extraction': TFDistilBertModel,
'fill-mask': TFDistilBertForMaskedLM,
'question-answering': TFDistilBertForQuestionAnswering,
'text-classification': TFDistilBertForSequenceClassification,
'token-classification': TFDistilBertForTokenClassification,
'zero-shot': TFDistilBertForSequenceClassification,
}
if is_tf_available()
else {}
)
_UpperCAmelCase :List[Any] = False
_UpperCAmelCase :int = False
def _snake_case ( self ):
lowercase__: Tuple = TFDistilBertModelTester(self )
lowercase__: Dict = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , dim=37 )
def _snake_case ( self ):
self.config_tester.run_common_tests()
def _snake_case ( self ):
lowercase__: str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*_SCREAMING_SNAKE_CASE )
def _snake_case ( self ):
lowercase__: str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*_SCREAMING_SNAKE_CASE )
def _snake_case ( self ):
lowercase__: str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*_SCREAMING_SNAKE_CASE )
def _snake_case ( self ):
lowercase__: List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*_SCREAMING_SNAKE_CASE )
def _snake_case ( self ):
lowercase__: str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*_SCREAMING_SNAKE_CASE )
def _snake_case ( self ):
lowercase__: int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*_SCREAMING_SNAKE_CASE )
@slow
def _snake_case ( self ):
for model_name in list(TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1] ):
lowercase__: Tuple = TFDistilBertModel.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
@require_tf
class UpperCAmelCase (unittest.TestCase ):
"""simple docstring"""
@slow
def _snake_case ( self ):
lowercase__: str = TFDistilBertModel.from_pretrained('''distilbert-base-uncased''' )
lowercase__: Union[str, Any] = tf.constant([[0, 1, 2, 3, 4, 5]] )
lowercase__: int = model(_SCREAMING_SNAKE_CASE )[0]
lowercase__: List[Any] = [1, 6, 768]
self.assertEqual(output.shape , _SCREAMING_SNAKE_CASE )
lowercase__: List[Any] = tf.constant(
[
[
[0.19_261_885, -0.13_732_955, 0.4_119_799],
[0.22_150_156, -0.07_422_661, 0.39_037_204],
[0.22_756_018, -0.0_896_414, 0.3_701_467],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , _SCREAMING_SNAKE_CASE , atol=1e-4 )
| 359 | """simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import add_start_docstrings
__A = R"\n [`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and\n can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.\n\n Args:\n title_sep (`str`, *optional*, defaults to `\" / \"`):\n Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].\n doc_sep (`str`, *optional*, defaults to `\" // \"`):\n Separator inserted between the text of the retrieved document and the original input when calling\n [`RagRetriever`].\n n_docs (`int`, *optional*, defaults to 5):\n Number of documents to retrieve.\n max_combined_length (`int`, *optional*, defaults to 300):\n Max length of contextualized input returned by [`~RagRetriever.__call__`].\n retrieval_vector_size (`int`, *optional*, defaults to 768):\n Dimensionality of the document embeddings indexed by [`RagRetriever`].\n retrieval_batch_size (`int`, *optional*, defaults to 8):\n Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated\n [`RagRetriever`].\n dataset (`str`, *optional*, defaults to `\"wiki_dpr\"`):\n A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids\n using `datasets.list_datasets()`).\n dataset_split (`str`, *optional*, defaults to `\"train\"`)\n Which split of the `dataset` to load.\n index_name (`str`, *optional*, defaults to `\"compressed\"`)\n The index name of the index associated with the `dataset`. One can choose between `\"legacy\"`, `\"exact\"` and\n `\"compressed\"`.\n index_path (`str`, *optional*)\n The path to the serialized faiss index on disk.\n passages_path (`str`, *optional*):\n A path to text passages compatible with the faiss index. Required if using\n [`~models.rag.retrieval_rag.LegacyIndex`]\n use_dummy_dataset (`bool`, *optional*, defaults to `False`)\n Whether to load a \"dummy\" variant of the dataset specified by `dataset`.\n label_smoothing (`float`, *optional*, defaults to 0.0):\n Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing\n in the loss calculation. If set to 0, no label smoothing is performed.\n do_marginalize (`bool`, *optional*, defaults to `False`):\n If `True`, the logits are marginalized over all documents by making use of\n `torch.nn.functional.log_softmax`.\n reduce_loss (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.\n do_deduplication (`bool`, *optional*, defaults to `True`):\n Whether or not to deduplicate the generations from different context documents for a given input. Has to be\n set to `False` if used while training with distributed backend.\n exclude_bos_score (`bool`, *optional*, defaults to `False`):\n Whether or not to disregard the BOS token when computing the loss.\n output_retrieved(`bool`, *optional*, defaults to `False`):\n If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and\n `context_attention_mask` are returned. See returned tensors for more detail.\n use_cache (`bool`, *optional*, defaults to `True`):\n Whether or not the model should return the last key/values attentions (not used by all models).\n forced_eos_token_id (`int`, *optional*):\n The id of the token to force as the last generated token when `max_length` is reached. Usually set to\n `eos_token_id`.\n"
@add_start_docstrings(_UpperCAmelCase )
class UpperCAmelCase (_UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase :Optional[int] = "rag"
_UpperCAmelCase :List[Any] = True
def __init__( self , _UpperCAmelCase=None , _UpperCAmelCase=True , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=" / " , _UpperCAmelCase=" // " , _UpperCAmelCase=5 , _UpperCAmelCase=300 , _UpperCAmelCase=768 , _UpperCAmelCase=8 , _UpperCAmelCase="wiki_dpr" , _UpperCAmelCase="train" , _UpperCAmelCase="compressed" , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=0.0 , _UpperCAmelCase=True , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=True , _UpperCAmelCase=None , **_UpperCAmelCase , ):
super().__init__(
bos_token_id=_UpperCAmelCase , pad_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , decoder_start_token_id=_UpperCAmelCase , forced_eos_token_id=_UpperCAmelCase , is_encoder_decoder=_UpperCAmelCase , prefix=_UpperCAmelCase , vocab_size=_UpperCAmelCase , **_UpperCAmelCase , )
assert (
"question_encoder" in kwargs and "generator" in kwargs
), "Config has to be initialized with question_encoder and generator config"
lowercase__: Optional[Any] = kwargs.pop('''question_encoder''' )
lowercase__: Any = question_encoder_config.pop('''model_type''' )
lowercase__: Tuple = kwargs.pop('''generator''' )
lowercase__: Union[str, Any] = decoder_config.pop('''model_type''' )
from ..auto.configuration_auto import AutoConfig
lowercase__: Optional[int] = AutoConfig.for_model(_UpperCAmelCase , **_UpperCAmelCase )
lowercase__: Any = AutoConfig.for_model(_UpperCAmelCase , **_UpperCAmelCase )
lowercase__: str = reduce_loss
lowercase__: str = label_smoothing
lowercase__: Dict = exclude_bos_score
lowercase__: Any = do_marginalize
lowercase__: Optional[int] = title_sep
lowercase__: Any = doc_sep
lowercase__: Any = n_docs
lowercase__: List[Any] = max_combined_length
lowercase__: int = dataset
lowercase__: int = dataset_split
lowercase__: str = index_name
lowercase__: Dict = retrieval_vector_size
lowercase__: Dict = retrieval_batch_size
lowercase__: List[str] = passages_path
lowercase__: str = index_path
lowercase__: Optional[Any] = use_dummy_dataset
lowercase__: str = output_retrieved
lowercase__: List[str] = do_deduplication
lowercase__: List[Any] = use_cache
if self.forced_eos_token_id is None:
lowercase__: int = getattr(self.generator , '''forced_eos_token_id''' , _UpperCAmelCase )
@classmethod
def _snake_case ( cls , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ):
return cls(question_encoder=question_encoder_config.to_dict() , generator=generator_config.to_dict() , **_UpperCAmelCase )
def _snake_case ( self ):
lowercase__: List[str] = copy.deepcopy(self.__dict__ )
lowercase__: str = self.question_encoder.to_dict()
lowercase__: str = self.generator.to_dict()
lowercase__: str = self.__class__.model_type
return output
| 2 | 0 |
"""simple docstring"""
from random import randint, random
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = False , __UpperCAmelCase = False , __UpperCAmelCase = 5 , ) -> Union[str, Any]:
lowercase__ = [[-1] * number_of_cells] # Create a highway without any car
lowercase__ = 0
lowercase__ = max(_a , 0 )
while i < number_of_cells:
lowercase__ = (
randint(0 , _a ) if random_speed else initial_speed
) # Place the cars
i += (
randint(1 , max_speed * 2 ) if random_frequency else frequency
) # Arbitrary number, may need tuning
return highway
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase ) -> Optional[Any]:
lowercase__ = 0
lowercase__ = highway_now[car_index + 1 :]
for cell in range(len(_a ) ): # May need a better name for this
if cells[cell] != -1: # If the cell is not empty then
return distance # we have the distance we wanted
distance += 1
# Here if the car is near the end of the highway
return distance + get_distance(_a , -1 )
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Union[str, Any]:
lowercase__ = len(_a )
# Beforce calculations, the highway is empty
lowercase__ = [-1] * number_of_cells
for car_index in range(_a ):
if highway_now[car_index] != -1:
# Add 1 to the current speed of the car and cap the speed
lowercase__ = min(highway_now[car_index] + 1 , _a )
# Number of empty cell before the next car
lowercase__ = get_distance(_a , _a ) - 1
# We can't have the car causing an accident
lowercase__ = min(next_highway[car_index] , _a )
if random() < probability:
# Randomly, a driver will slow down
lowercase__ = max(next_highway[car_index] - 1 , 0 )
return next_highway
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Union[str, Any]:
lowercase__ = len(highway[0] )
for i in range(_a ):
lowercase__ = update(highway[i] , _a , _a )
lowercase__ = [-1] * number_of_cells
for car_index in range(_a ):
lowercase__ = next_speeds_calculated[car_index]
if speed != -1:
# Change the position based on the speed (with % to create the loop)
lowercase__ = (car_index + speed) % number_of_cells
# Commit the change of position
lowercase__ = speed
highway.append(_a )
return highway
if __name__ == "__main__":
import doctest
doctest.testmod()
| 360 | """simple docstring"""
import json
import os
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from requests.exceptions import HTTPError
from transformers.utils import (
CONFIG_NAME,
FLAX_WEIGHTS_NAME,
TF2_WEIGHTS_NAME,
TRANSFORMERS_CACHE,
WEIGHTS_NAME,
cached_file,
get_file_from_repo,
has_file,
)
__A = "hf-internal-testing/tiny-random-bert"
__A = os.path.join(TRANSFORMERS_CACHE, "models--hf-internal-testing--tiny-random-bert")
__A = "9b8c223d42b2188cb49d29af482996f9d0f3e5a6"
class UpperCAmelCase (unittest.TestCase ):
"""simple docstring"""
def _snake_case ( self ):
lowercase__: Union[str, Any] = cached_file(_UpperCAmelCase , _UpperCAmelCase )
# Should have downloaded the file in here
self.assertTrue(os.path.isdir(_UpperCAmelCase ) )
# Cache should contain at least those three subfolders:
for subfolder in ["blobs", "refs", "snapshots"]:
self.assertTrue(os.path.isdir(os.path.join(_UpperCAmelCase , _UpperCAmelCase ) ) )
with open(os.path.join(_UpperCAmelCase , '''refs''' , '''main''' ) ) as f:
lowercase__: Dict = f.read()
self.assertEqual(_UpperCAmelCase , os.path.join(_UpperCAmelCase , '''snapshots''' , _UpperCAmelCase , _UpperCAmelCase ) )
self.assertTrue(os.path.isfile(_UpperCAmelCase ) )
# File is cached at the same place the second time.
lowercase__: Any = cached_file(_UpperCAmelCase , _UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
# Using a specific revision to test the full commit hash.
lowercase__: Dict = cached_file(_UpperCAmelCase , _UpperCAmelCase , revision='''9b8c223''' )
self.assertEqual(_UpperCAmelCase , os.path.join(_UpperCAmelCase , '''snapshots''' , _UpperCAmelCase , _UpperCAmelCase ) )
def _snake_case ( self ):
with self.assertRaisesRegex(_UpperCAmelCase , '''is not a valid model identifier''' ):
lowercase__: int = cached_file('''tiny-random-bert''' , _UpperCAmelCase )
with self.assertRaisesRegex(_UpperCAmelCase , '''is not a valid git identifier''' ):
lowercase__: List[Any] = cached_file(_UpperCAmelCase , _UpperCAmelCase , revision='''aaaa''' )
with self.assertRaisesRegex(_UpperCAmelCase , '''does not appear to have a file named''' ):
lowercase__: Dict = cached_file(_UpperCAmelCase , '''conf''' )
def _snake_case ( self ):
with self.assertRaisesRegex(_UpperCAmelCase , '''does not appear to have a file named''' ):
lowercase__: Optional[Any] = cached_file(_UpperCAmelCase , '''conf''' )
with open(os.path.join(_UpperCAmelCase , '''refs''' , '''main''' ) ) as f:
lowercase__: int = f.read()
self.assertTrue(os.path.isfile(os.path.join(_UpperCAmelCase , '''.no_exist''' , _UpperCAmelCase , '''conf''' ) ) )
lowercase__: Dict = cached_file(_UpperCAmelCase , '''conf''' , _raise_exceptions_for_missing_entries=_UpperCAmelCase )
self.assertIsNone(_UpperCAmelCase )
lowercase__: List[str] = cached_file(_UpperCAmelCase , '''conf''' , local_files_only=_UpperCAmelCase , _raise_exceptions_for_missing_entries=_UpperCAmelCase )
self.assertIsNone(_UpperCAmelCase )
lowercase__: Union[str, Any] = mock.Mock()
lowercase__: str = 500
lowercase__: Union[str, Any] = {}
lowercase__: List[str] = HTTPError
lowercase__: int = {}
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('''requests.Session.request''' , return_value=_UpperCAmelCase ) as mock_head:
lowercase__: Any = cached_file(_UpperCAmelCase , '''conf''' , _raise_exceptions_for_connection_errors=_UpperCAmelCase )
self.assertIsNone(_UpperCAmelCase )
# This check we did call the fake head request
mock_head.assert_called()
def _snake_case ( self ):
self.assertTrue(has_file('''hf-internal-testing/tiny-bert-pt-only''' , _UpperCAmelCase ) )
self.assertFalse(has_file('''hf-internal-testing/tiny-bert-pt-only''' , _UpperCAmelCase ) )
self.assertFalse(has_file('''hf-internal-testing/tiny-bert-pt-only''' , _UpperCAmelCase ) )
def _snake_case ( self ):
# `get_file_from_repo` returns None if the file does not exist
self.assertIsNone(get_file_from_repo('''bert-base-cased''' , '''ahah.txt''' ) )
# The function raises if the repository does not exist.
with self.assertRaisesRegex(_UpperCAmelCase , '''is not a valid model identifier''' ):
get_file_from_repo('''bert-base-case''' , _UpperCAmelCase )
# The function raises if the revision does not exist.
with self.assertRaisesRegex(_UpperCAmelCase , '''is not a valid git identifier''' ):
get_file_from_repo('''bert-base-cased''' , _UpperCAmelCase , revision='''ahaha''' )
lowercase__: Optional[Any] = get_file_from_repo('''bert-base-cased''' , _UpperCAmelCase )
# The name is the cached name which is not very easy to test, so instead we load the content.
lowercase__: Optional[Any] = json.loads(open(_UpperCAmelCase , '''r''' ).read() )
self.assertEqual(config['''hidden_size'''] , 768 )
def _snake_case ( self ):
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase__: Any = Path(_UpperCAmelCase ) / '''a.txt'''
filename.touch()
self.assertEqual(get_file_from_repo(_UpperCAmelCase , '''a.txt''' ) , str(_UpperCAmelCase ) )
self.assertIsNone(get_file_from_repo(_UpperCAmelCase , '''b.txt''' ) )
| 2 | 0 |
"""simple docstring"""
import sys
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> Any:
lowercase__: Optional[Any] = len(__lowerCAmelCase )
lowercase__: List[str] = [[0 for x in range(__lowerCAmelCase )] for x in range(__lowerCAmelCase )]
lowercase__: List[Any] = [[0 for x in range(__lowerCAmelCase )] for x in range(__lowerCAmelCase )]
for chain_length in range(2 , __lowerCAmelCase ):
for a in range(1 , n - chain_length + 1 ):
lowercase__: str = a + chain_length - 1
lowercase__: Union[str, Any] = sys.maxsize
for c in range(__lowerCAmelCase , __lowerCAmelCase ):
lowercase__: List[Any] = (
matrix[a][c] + matrix[c + 1][b] + array[a - 1] * array[c] * array[b]
)
if cost < matrix[a][b]:
lowercase__: Optional[Any] = cost
lowercase__: Dict = c
return matrix, sol
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Union[str, Any]:
if i == j:
print('''A''' + str(__lowerCAmelCase ) , end=''' ''' )
else:
print('''(''' , end=''' ''' )
print_optiomal_solution(__lowerCAmelCase , __lowerCAmelCase , optimal_solution[i][j] )
print_optiomal_solution(__lowerCAmelCase , optimal_solution[i][j] + 1 , __lowerCAmelCase )
print(''')''' , end=''' ''' )
def SCREAMING_SNAKE_CASE__ ( ) -> List[str]:
lowercase__: Dict = [3_0, 3_5, 1_5, 5, 1_0, 2_0, 2_5]
lowercase__: Optional[Any] = len(__lowerCAmelCase )
# Size of matrix created from above array will be
# 30*35 35*15 15*5 5*10 10*20 20*25
lowercase__: Optional[int] = matrix_chain_order(__lowerCAmelCase )
print('''No. of Operation required: ''' + str(matrix[1][n - 1] ) )
print_optiomal_solution(__lowerCAmelCase , 1 , n - 1 )
if __name__ == "__main__":
main()
| 361 | """simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
"microsoft/beit-base-patch16-224-pt22k": (
"https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json"
),
# See all BEiT models at https://huggingface.co/models?filter=beit
}
class UpperCAmelCase (_UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase :Optional[Any] = "beit"
def __init__( self , _UpperCAmelCase=8192 , _UpperCAmelCase=768 , _UpperCAmelCase=12 , _UpperCAmelCase=12 , _UpperCAmelCase=3072 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.02 , _UpperCAmelCase=1e-1_2 , _UpperCAmelCase=224 , _UpperCAmelCase=16 , _UpperCAmelCase=3 , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=True , _UpperCAmelCase=[3, 5, 7, 11] , _UpperCAmelCase=[1, 2, 3, 6] , _UpperCAmelCase=True , _UpperCAmelCase=0.4 , _UpperCAmelCase=256 , _UpperCAmelCase=1 , _UpperCAmelCase=False , _UpperCAmelCase=255 , **_UpperCAmelCase , ):
super().__init__(**_UpperCAmelCase )
lowercase__: Union[str, Any] = vocab_size
lowercase__: List[Any] = hidden_size
lowercase__: Optional[int] = num_hidden_layers
lowercase__: Optional[int] = num_attention_heads
lowercase__: int = intermediate_size
lowercase__: List[str] = hidden_act
lowercase__: List[Any] = hidden_dropout_prob
lowercase__: Dict = attention_probs_dropout_prob
lowercase__: List[str] = initializer_range
lowercase__: Optional[int] = layer_norm_eps
lowercase__: int = image_size
lowercase__: Tuple = patch_size
lowercase__: int = num_channels
lowercase__: Optional[Any] = use_mask_token
lowercase__: List[Any] = use_absolute_position_embeddings
lowercase__: Optional[int] = use_relative_position_bias
lowercase__: Optional[int] = use_shared_relative_position_bias
lowercase__: Optional[Any] = layer_scale_init_value
lowercase__: Union[str, Any] = drop_path_rate
lowercase__: Tuple = use_mean_pooling
# decode head attributes (semantic segmentation)
lowercase__: Tuple = out_indices
lowercase__: Optional[int] = pool_scales
# auxiliary head attributes (semantic segmentation)
lowercase__: List[str] = use_auxiliary_head
lowercase__: Optional[Any] = auxiliary_loss_weight
lowercase__: str = auxiliary_channels
lowercase__: List[str] = auxiliary_num_convs
lowercase__: Tuple = auxiliary_concat_input
lowercase__: Dict = semantic_loss_ignore_index
class UpperCAmelCase (_UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase :Dict = version.parse("1.11" )
@property
def _snake_case ( self ):
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def _snake_case ( self ):
return 1e-4
| 2 | 0 |
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> float:
if edge <= 0 or not isinstance(_lowercase , _lowercase ):
raise ValueError('''Length must be a positive.''' )
return 3 * ((2_5 + 1_0 * (5 ** (1 / 2))) ** (1 / 2)) * (edge**2)
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> float:
if edge <= 0 or not isinstance(_lowercase , _lowercase ):
raise ValueError('''Length must be a positive.''' )
return ((1_5 + (7 * (5 ** (1 / 2)))) / 4) * (edge**3)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 362 | """simple docstring"""
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase ) -> str:
lowercase__: int = ''''''
for word_or_phrase in separated:
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
raise Exception('''join() accepts only strings to be joined''' )
joined += word_or_phrase + separator
return joined.strip(__UpperCAmelCase )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 2 | 0 |
"""simple docstring"""
import inspect
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
__A = """src/transformers"""
# This is to make sure the transformers module imported is the one in the repo.
__A = direct_transformers_import(PATH_TO_TRANSFORMERS)
__A = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
__A = re.compile(R"\[(.+?)\]\((https://huggingface\.co/.+?)\)")
__A = {
"""DecisionTransformerConfig""",
"""EncoderDecoderConfig""",
"""MusicgenConfig""",
"""RagConfig""",
"""SpeechEncoderDecoderConfig""",
"""TimmBackboneConfig""",
"""VisionEncoderDecoderConfig""",
"""VisionTextDualEncoderConfig""",
"""LlamaConfig""",
}
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> Dict:
lowercase__: List[str] = None
# source code of `config_class`
lowercase__: List[Any] = inspect.getsource(__UpperCAmelCase )
lowercase__: str = _re_checkpoint.findall(__UpperCAmelCase )
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
for ckpt_name, ckpt_link in checkpoints:
# allow the link to end with `/`
if ckpt_link.endswith('''/''' ):
lowercase__: Dict = ckpt_link[:-1]
# verify the checkpoint name corresponds to the checkpoint link
lowercase__: Optional[Any] = F"""https://huggingface.co/{ckpt_name}"""
if ckpt_link == ckpt_link_from_name:
lowercase__: Union[str, Any] = ckpt_name
break
return checkpoint
def SCREAMING_SNAKE_CASE__ ( ) -> List[Any]:
lowercase__: Dict = []
for config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in config_class.__module__:
continue
lowercase__: Dict = get_checkpoint_from_config_class(__UpperCAmelCase )
lowercase__: Optional[int] = config_class.__name__
if checkpoint is None and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(__UpperCAmelCase )
if len(__UpperCAmelCase ) > 0:
lowercase__: str = '''\n'''.join(sorted(__UpperCAmelCase ) )
raise ValueError(F"""The following configurations don\'t contain any valid checkpoint:\n{message}""" )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 363 | """simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPanoramaPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
@skip_mps
class UpperCAmelCase (_UpperCAmelCase ,_UpperCAmelCase ,unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase :Optional[int] = StableDiffusionPanoramaPipeline
_UpperCAmelCase :List[str] = TEXT_TO_IMAGE_PARAMS
_UpperCAmelCase :str = TEXT_TO_IMAGE_BATCH_PARAMS
_UpperCAmelCase :Dict = TEXT_TO_IMAGE_IMAGE_PARAMS
_UpperCAmelCase :List[Any] = TEXT_TO_IMAGE_IMAGE_PARAMS
def _snake_case ( self ):
torch.manual_seed(0 )
lowercase__: Optional[int] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
lowercase__: List[Any] = DDIMScheduler()
torch.manual_seed(0 )
lowercase__: Tuple = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
lowercase__: Tuple = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
lowercase__: List[str] = CLIPTextModel(_UpperCAmelCase )
lowercase__: int = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
lowercase__: int = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase=0 ):
lowercase__: int = torch.manual_seed(_UpperCAmelCase )
lowercase__: List[Any] = {
'''prompt''': '''a photo of the dolomites''',
'''generator''': generator,
# Setting height and width to None to prevent OOMs on CPU.
'''height''': None,
'''width''': None,
'''num_inference_steps''': 1,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def _snake_case ( self ):
lowercase__: Optional[int] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowercase__: List[str] = self.get_dummy_components()
lowercase__: Union[str, Any] = StableDiffusionPanoramaPipeline(**_UpperCAmelCase )
lowercase__: int = sd_pipe.to(_UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=_UpperCAmelCase )
lowercase__: str = self.get_dummy_inputs(_UpperCAmelCase )
lowercase__: Any = sd_pipe(**_UpperCAmelCase ).images
lowercase__: Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase__: List[str] = np.array([0.6_186, 0.5_374, 0.4_915, 0.4_135, 0.4_114, 0.4_563, 0.5_128, 0.4_977, 0.4_757] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _snake_case ( self ):
super().test_inference_batch_consistent(batch_sizes=[1, 2] )
def _snake_case ( self ):
super().test_inference_batch_single_identical(batch_size=2 , expected_max_diff=3.2_5e-3 )
def _snake_case ( self ):
lowercase__: Optional[Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowercase__: Union[str, Any] = self.get_dummy_components()
lowercase__: str = StableDiffusionPanoramaPipeline(**_UpperCAmelCase )
lowercase__: str = sd_pipe.to(_UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=_UpperCAmelCase )
lowercase__: str = self.get_dummy_inputs(_UpperCAmelCase )
lowercase__: Union[str, Any] = '''french fries'''
lowercase__: Union[str, Any] = sd_pipe(**_UpperCAmelCase , negative_prompt=_UpperCAmelCase )
lowercase__: Optional[Any] = output.images
lowercase__: str = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase__: Optional[int] = np.array([0.6_187, 0.5_375, 0.4_915, 0.4_136, 0.4_114, 0.4_563, 0.5_128, 0.4_976, 0.4_757] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _snake_case ( self ):
lowercase__: Optional[Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowercase__: Union[str, Any] = self.get_dummy_components()
lowercase__: Optional[Any] = StableDiffusionPanoramaPipeline(**_UpperCAmelCase )
lowercase__: str = sd_pipe.to(_UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=_UpperCAmelCase )
lowercase__: Optional[int] = self.get_dummy_inputs(_UpperCAmelCase )
lowercase__: Union[str, Any] = sd_pipe(**_UpperCAmelCase , view_batch_size=2 )
lowercase__: List[str] = output.images
lowercase__: List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase__: List[Any] = np.array([0.6_187, 0.5_375, 0.4_915, 0.4_136, 0.4_114, 0.4_563, 0.5_128, 0.4_976, 0.4_757] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _snake_case ( self ):
lowercase__: Optional[int] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowercase__: int = self.get_dummy_components()
lowercase__: List[str] = EulerAncestralDiscreteScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='''scaled_linear''' )
lowercase__: Any = StableDiffusionPanoramaPipeline(**_UpperCAmelCase )
lowercase__: Any = sd_pipe.to(_UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=_UpperCAmelCase )
lowercase__: int = self.get_dummy_inputs(_UpperCAmelCase )
lowercase__: Dict = sd_pipe(**_UpperCAmelCase ).images
lowercase__: Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase__: List[Any] = np.array([0.4_024, 0.6_510, 0.4_901, 0.5_378, 0.5_813, 0.5_622, 0.4_795, 0.4_467, 0.4_952] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _snake_case ( self ):
lowercase__: int = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowercase__: List[Any] = self.get_dummy_components()
lowercase__: Any = PNDMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , skip_prk_steps=_UpperCAmelCase )
lowercase__: Dict = StableDiffusionPanoramaPipeline(**_UpperCAmelCase )
lowercase__: int = sd_pipe.to(_UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=_UpperCAmelCase )
lowercase__: Optional[int] = self.get_dummy_inputs(_UpperCAmelCase )
lowercase__: Dict = sd_pipe(**_UpperCAmelCase ).images
lowercase__: str = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase__: List[Any] = np.array([0.6_391, 0.6_291, 0.4_861, 0.5_134, 0.5_552, 0.4_578, 0.5_032, 0.5_023, 0.4_539] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class UpperCAmelCase (unittest.TestCase ):
"""simple docstring"""
def _snake_case ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _snake_case ( self , _UpperCAmelCase=0 ):
lowercase__: Union[str, Any] = torch.manual_seed(_UpperCAmelCase )
lowercase__: int = {
'''prompt''': '''a photo of the dolomites''',
'''generator''': generator,
'''num_inference_steps''': 3,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def _snake_case ( self ):
lowercase__: Any = '''stabilityai/stable-diffusion-2-base'''
lowercase__: str = DDIMScheduler.from_pretrained(_UpperCAmelCase , subfolder='''scheduler''' )
lowercase__: Dict = StableDiffusionPanoramaPipeline.from_pretrained(_UpperCAmelCase , scheduler=_UpperCAmelCase , safety_checker=_UpperCAmelCase )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
pipe.enable_attention_slicing()
lowercase__: Tuple = self.get_inputs()
lowercase__: Optional[Any] = pipe(**_UpperCAmelCase ).images
lowercase__: Optional[Any] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 2048, 3)
lowercase__: List[Any] = np.array(
[
0.36_968_392,
0.27_025_372,
0.32_446_766,
0.28_379_387,
0.36_363_274,
0.30_733_347,
0.27_100_027,
0.27_054_125,
0.25_536_096,
] )
assert np.abs(expected_slice - image_slice ).max() < 1e-2
def _snake_case ( self ):
lowercase__: int = StableDiffusionPanoramaPipeline.from_pretrained(
'''stabilityai/stable-diffusion-2-base''' , safety_checker=_UpperCAmelCase )
lowercase__: Tuple = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
pipe.enable_attention_slicing()
lowercase__: List[str] = self.get_inputs()
lowercase__: Dict = pipe(**_UpperCAmelCase ).images
lowercase__: Tuple = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 2048, 3)
lowercase__: List[Any] = np.array(
[
[
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
]
] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def _snake_case ( self ):
lowercase__: int = 0
def callback_fn(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> None:
lowercase__: List[str] = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
lowercase__: Dict = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 256)
lowercase__: Any = latents[0, -3:, -3:, -1]
lowercase__: List[Any] = np.array(
[
0.18_681_869,
0.33_907_816,
0.5_361_276,
0.14_432_865,
-0.02_856_611,
-0.73_941_123,
0.23_397_987,
0.47_322_682,
-0.37_823_164,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
elif step == 2:
lowercase__: Tuple = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 256)
lowercase__: Optional[Any] = latents[0, -3:, -3:, -1]
lowercase__: Any = np.array(
[
0.18_539_645,
0.33_987_248,
0.5_378_559,
0.14_437_142,
-0.02_455_261,
-0.7_338_317,
0.23_990_755,
0.47_356_272,
-0.3_786_505,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
lowercase__: int = False
lowercase__: str = '''stabilityai/stable-diffusion-2-base'''
lowercase__: Union[str, Any] = DDIMScheduler.from_pretrained(_UpperCAmelCase , subfolder='''scheduler''' )
lowercase__: Tuple = StableDiffusionPanoramaPipeline.from_pretrained(_UpperCAmelCase , scheduler=_UpperCAmelCase , safety_checker=_UpperCAmelCase )
lowercase__: Optional[Any] = pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
pipe.enable_attention_slicing()
lowercase__: Tuple = self.get_inputs()
pipe(**_UpperCAmelCase , callback=_UpperCAmelCase , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def _snake_case ( self ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowercase__: List[Any] = '''stabilityai/stable-diffusion-2-base'''
lowercase__: Any = DDIMScheduler.from_pretrained(_UpperCAmelCase , subfolder='''scheduler''' )
lowercase__: int = StableDiffusionPanoramaPipeline.from_pretrained(_UpperCAmelCase , scheduler=_UpperCAmelCase , safety_checker=_UpperCAmelCase )
lowercase__: List[Any] = pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
lowercase__: Any = self.get_inputs()
lowercase__: List[str] = pipe(**_UpperCAmelCase )
lowercase__: Optional[int] = torch.cuda.max_memory_allocated()
# make sure that less than 5.2 GB is allocated
assert mem_bytes < 5.5 * 10**9
| 2 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
"microsoft/cvt-13": "https://huggingface.co/microsoft/cvt-13/resolve/main/config.json",
# See all Cvt models at https://huggingface.co/models?filter=cvt
}
class UpperCAmelCase (__lowerCAmelCase ):
"""simple docstring"""
_UpperCAmelCase :List[Any] = '''cvt'''
def __init__( self , _UpperCAmelCase=3 , _UpperCAmelCase=[7, 3, 3] , _UpperCAmelCase=[4, 2, 2] , _UpperCAmelCase=[2, 1, 1] , _UpperCAmelCase=[64, 192, 384] , _UpperCAmelCase=[1, 3, 6] , _UpperCAmelCase=[1, 2, 10] , _UpperCAmelCase=[4.0, 4.0, 4.0] , _UpperCAmelCase=[0.0, 0.0, 0.0] , _UpperCAmelCase=[0.0, 0.0, 0.0] , _UpperCAmelCase=[0.0, 0.0, 0.1] , _UpperCAmelCase=[True, True, True] , _UpperCAmelCase=[False, False, True] , _UpperCAmelCase=["dw_bn", "dw_bn", "dw_bn"] , _UpperCAmelCase=[3, 3, 3] , _UpperCAmelCase=[1, 1, 1] , _UpperCAmelCase=[2, 2, 2] , _UpperCAmelCase=[1, 1, 1] , _UpperCAmelCase=[1, 1, 1] , _UpperCAmelCase=0.02 , _UpperCAmelCase=1e-1_2 , **_UpperCAmelCase , ):
super().__init__(**lowerCAmelCase_ )
lowercase__: Union[str, Any] = num_channels
lowercase__: Optional[Any] = patch_sizes
lowercase__: List[str] = patch_stride
lowercase__: int = patch_padding
lowercase__: Tuple = embed_dim
lowercase__: str = num_heads
lowercase__: List[Any] = depth
lowercase__: Tuple = mlp_ratio
lowercase__: Dict = attention_drop_rate
lowercase__: Optional[int] = drop_rate
lowercase__: Tuple = drop_path_rate
lowercase__: str = qkv_bias
lowercase__: List[str] = cls_token
lowercase__: Optional[int] = qkv_projection_method
lowercase__: Union[str, Any] = kernel_qkv
lowercase__: Any = padding_kv
lowercase__: int = stride_kv
lowercase__: Any = padding_q
lowercase__: Dict = stride_q
lowercase__: Union[str, Any] = initializer_range
lowercase__: Union[str, Any] = layer_norm_eps
| 364 | """simple docstring"""
import unittest
from transformers import DebertaVaTokenizer, DebertaVaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
__A = get_tests_dir("fixtures/spiece.model")
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase (_UpperCAmelCase ,unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase :Dict = DebertaVaTokenizer
_UpperCAmelCase :Tuple = DebertaVaTokenizerFast
_UpperCAmelCase :int = True
_UpperCAmelCase :int = True
def _snake_case ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
lowercase__: List[Any] = DebertaVaTokenizer(_UpperCAmelCase , unk_token='''<unk>''' )
tokenizer.save_pretrained(self.tmpdirname )
def _snake_case ( self , _UpperCAmelCase ):
lowercase__: List[str] = '''this is a test'''
lowercase__: int = '''this is a test'''
return input_text, output_text
def _snake_case ( self ):
lowercase__: Optional[int] = '''<pad>'''
lowercase__: Optional[int] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_UpperCAmelCase ) , _UpperCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_UpperCAmelCase ) , _UpperCAmelCase )
def _snake_case ( self ):
lowercase__: Union[str, Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<pad>''' )
self.assertEqual(vocab_keys[1] , '''<unk>''' )
self.assertEqual(vocab_keys[-1] , '''[PAD]''' )
self.assertEqual(len(_UpperCAmelCase ) , 30001 )
def _snake_case ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 30000 )
def _snake_case ( self ):
# fmt: off
lowercase__: int = ''' \tHeLLo!how \n Are yoU? '''
lowercase__: List[str] = ['''▁hello''', '''!''', '''how''', '''▁are''', '''▁you''', '''?''']
# fmt: on
lowercase__: Any = DebertaVaTokenizer(_UpperCAmelCase , do_lower_case=_UpperCAmelCase )
lowercase__: Union[str, Any] = tokenizer.convert_ids_to_tokens(tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
lowercase__: Optional[Any] = DebertaVaTokenizerFast(_UpperCAmelCase , do_lower_case=_UpperCAmelCase )
lowercase__: Optional[Any] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
@unittest.skip('''There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.''' )
def _snake_case ( self ):
pass
@unittest.skip('''There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.''' )
def _snake_case ( self ):
pass
def _snake_case ( self ):
# fmt: off
lowercase__: Dict = '''I was born in 92000, and this is falsé.'''
lowercase__: str = ['''▁''', '''<unk>''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', '''▁''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''▁''', '''.''', ]
# fmt: on
lowercase__: Union[str, Any] = DebertaVaTokenizer(_UpperCAmelCase , split_by_punct=_UpperCAmelCase )
lowercase__: str = tokenizer.convert_ids_to_tokens(tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
lowercase__: Tuple = DebertaVaTokenizerFast(_UpperCAmelCase , split_by_punct=_UpperCAmelCase )
lowercase__: Union[str, Any] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
def _snake_case ( self ):
# fmt: off
lowercase__: Any = '''I was born in 92000, and this is falsé.'''
lowercase__: str = ['''▁i''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', '''▁''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''▁''', '''.''', ]
# fmt: on
lowercase__: Optional[int] = DebertaVaTokenizer(_UpperCAmelCase , do_lower_case=_UpperCAmelCase , split_by_punct=_UpperCAmelCase )
lowercase__: List[Any] = tokenizer.convert_ids_to_tokens(tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
lowercase__: Optional[int] = DebertaVaTokenizerFast(_UpperCAmelCase , do_lower_case=_UpperCAmelCase , split_by_punct=_UpperCAmelCase )
lowercase__: Tuple = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
def _snake_case ( self ):
# fmt: off
lowercase__: List[str] = '''I was born in 92000, and this is falsé.'''
lowercase__: List[str] = ['''▁i''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''.''', ]
# fmt: on
lowercase__: Union[str, Any] = DebertaVaTokenizer(_UpperCAmelCase , do_lower_case=_UpperCAmelCase , split_by_punct=_UpperCAmelCase )
lowercase__: Union[str, Any] = tokenizer.convert_ids_to_tokens(tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
lowercase__: int = DebertaVaTokenizerFast(_UpperCAmelCase , do_lower_case=_UpperCAmelCase , split_by_punct=_UpperCAmelCase )
lowercase__: Tuple = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
def _snake_case ( self ):
# fmt: off
lowercase__: Union[str, Any] = '''I was born in 92000, and this is falsé.'''
lowercase__: int = ['''▁''', '''<unk>''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', '''▁''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''▁''', '''.''', ]
# fmt: on
lowercase__: Optional[int] = DebertaVaTokenizer(_UpperCAmelCase , do_lower_case=_UpperCAmelCase , split_by_punct=_UpperCAmelCase )
lowercase__: Dict = tokenizer.convert_ids_to_tokens(tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
lowercase__: Union[str, Any] = DebertaVaTokenizerFast(_UpperCAmelCase , do_lower_case=_UpperCAmelCase , split_by_punct=_UpperCAmelCase )
lowercase__: Dict = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
def _snake_case ( self ):
# fmt: off
lowercase__: Optional[int] = ''' \tHeLLo!how \n Are yoU? '''
lowercase__: str = ['''▁''', '''<unk>''', '''e''', '''<unk>''', '''o''', '''!''', '''how''', '''▁''', '''<unk>''', '''re''', '''▁yo''', '''<unk>''', '''?''']
# fmt: on
lowercase__: Dict = DebertaVaTokenizer(_UpperCAmelCase , do_lower_case=_UpperCAmelCase , split_by_punct=_UpperCAmelCase )
lowercase__: List[str] = tokenizer.convert_ids_to_tokens(tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
lowercase__: Optional[Any] = DebertaVaTokenizerFast(_UpperCAmelCase , do_lower_case=_UpperCAmelCase , split_by_punct=_UpperCAmelCase )
lowercase__: List[str] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
def _snake_case ( self ):
lowercase__: int = self.get_tokenizer()
lowercase__: List[Any] = self.get_rust_tokenizer()
lowercase__: List[str] = '''I was born in 92000, and this is falsé.'''
lowercase__: Any = tokenizer.convert_ids_to_tokens(tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) )
lowercase__: List[str] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
lowercase__: Dict = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
lowercase__: Tuple = rust_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
lowercase__: Any = self.get_rust_tokenizer()
lowercase__: str = tokenizer.encode(_UpperCAmelCase )
lowercase__: Any = rust_tokenizer.encode(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
def _snake_case ( self ):
lowercase__: Optional[Any] = '''This is a test'''
lowercase__: str = [13, 1, 4398, 25, 21, 1289]
lowercase__: List[Any] = ['''▁''', '''T''', '''his''', '''▁is''', '''▁a''', '''▁test''']
lowercase__: Any = ['''▁''', '''<unk>''', '''his''', '''▁is''', '''▁a''', '''▁test''']
lowercase__: int = DebertaVaTokenizer(_UpperCAmelCase , keep_accents=_UpperCAmelCase )
lowercase__: int = DebertaVaTokenizerFast(_UpperCAmelCase , keep_accents=_UpperCAmelCase )
lowercase__: Any = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
lowercase__: str = tokenizer.tokenize(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
lowercase__: Any = tokenizer.convert_ids_to_tokens(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
lowercase__: Union[str, Any] = rust_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
lowercase__: List[Any] = rust_tokenizer.tokenize(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
lowercase__: str = rust_tokenizer.convert_ids_to_tokens(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
# fmt: off
lowercase__: str = '''I was born in 92000, and this is falsé.'''
lowercase__: Dict = [13, 1, 23, 386, 19, 561, 3050, 15, 17, 48, 25, 8256, 18, 1, 9]
lowercase__: Tuple = ['''▁''', '''I''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''é''', '''.''', ]
lowercase__: Dict = ['''▁''', '''<unk>''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''.''', ]
# fmt: on
lowercase__: Optional[Any] = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
lowercase__: Dict = tokenizer.tokenize(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
lowercase__: Optional[Any] = tokenizer.convert_ids_to_tokens(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
lowercase__: List[Any] = rust_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
lowercase__: Dict = rust_tokenizer.tokenize(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
lowercase__: Optional[Any] = rust_tokenizer.convert_ids_to_tokens(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
def _snake_case ( self ):
lowercase__: Optional[int] = DebertaVaTokenizer(_UpperCAmelCase )
lowercase__: Optional[int] = tokenizer.encode('''sequence builders''' )
lowercase__: Optional[Any] = tokenizer.encode('''multi-sequence build''' )
lowercase__: Union[str, Any] = tokenizer.build_inputs_with_special_tokens(_UpperCAmelCase )
lowercase__: Dict = tokenizer.build_inputs_with_special_tokens(_UpperCAmelCase , _UpperCAmelCase )
self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] , _UpperCAmelCase )
self.assertEqual(
[tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [tokenizer.sep_token_id] , _UpperCAmelCase , )
@slow
def _snake_case ( self ):
# fmt: off
lowercase__: List[Any] = {'''input_ids''': [[1, 39867, 36, 19390, 486, 27, 35052, 81436, 18, 60685, 1225, 7, 35052, 81436, 18, 9367, 16899, 18, 15937, 53, 594, 773, 18, 16287, 30465, 36, 15937, 6, 41139, 38, 36979, 60763, 191, 6, 34132, 99, 6, 50538, 390, 43230, 6, 34132, 2779, 20850, 14, 699, 1072, 1194, 36, 382, 10901, 53, 7, 699, 1072, 2084, 36, 20422, 630, 53, 19, 105, 3049, 1896, 1053, 16899, 1506, 11, 37978, 4243, 7, 1237, 31869, 200, 16566, 654, 6, 35052, 81436, 7, 55630, 13593, 4, 2], [1, 26, 15011, 13, 667, 8, 1053, 18, 23611, 1237, 72356, 12820, 34, 104134, 1209, 35, 13313, 6627, 21, 202, 347, 7, 164, 2399, 11, 46, 4485, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 1232, 2864, 15785, 14951, 105, 5, 8581, 1250, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''token_type_ids''': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_UpperCAmelCase , model_name='''microsoft/deberta-v2-xlarge''' , revision='''ad6e42c1532ddf3a15c39246b63f5559d558b670''' , )
| 2 | 0 |
"""simple docstring"""
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def SCREAMING_SNAKE_CASE__ ( ) -> str:
lowercase__: Any = ArgumentParser(
description=(
'''PyTorch TPU distributed training launch '''
'''helper utility that will spawn up '''
'''multiple distributed processes'''
) )
# Optional arguments for the launch helper
parser.add_argument('''--num_cores''' , type=lowerCAmelCase__ , default=1 , help='''Number of TPU cores to use (1 or 8).''' )
# positional
parser.add_argument(
'''training_script''' , type=lowerCAmelCase__ , help=(
'''The full path to the single TPU training '''
'''program/script to be launched in parallel, '''
'''followed by all the arguments for the '''
'''training script'''
) , )
# rest from the training program
parser.add_argument('''training_script_args''' , nargs=lowerCAmelCase__ )
return parser.parse_args()
def SCREAMING_SNAKE_CASE__ ( ) -> List[Any]:
lowercase__: Optional[Any] = parse_args()
# Import training_script as a module.
lowercase__: Optional[Any] = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
lowercase__: Optional[int] = script_fpath.stem
lowercase__: Optional[Any] = importlib.import_module(lowerCAmelCase__ )
# Patch sys.argv
lowercase__: List[str] = [args.training_script] + args.training_script_args + ["""--tpu_num_cores""", str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 365 | """simple docstring"""
import unittest
from transformers import DonutProcessor
__A = "naver-clova-ix/donut-base"
class UpperCAmelCase (unittest.TestCase ):
"""simple docstring"""
def _snake_case ( self ):
lowercase__: int = DonutProcessor.from_pretrained(_UpperCAmelCase )
def _snake_case ( self ):
lowercase__: Tuple = {
'''name''': '''John Doe''',
'''age''': '''99''',
'''city''': '''Atlanta''',
'''state''': '''GA''',
'''zip''': '''30301''',
'''phone''': '''123-4567''',
'''nicknames''': [{'''nickname''': '''Johnny'''}, {'''nickname''': '''JD'''}],
}
lowercase__: Union[str, Any] = (
'''<s_name>John Doe</s_name><s_age>99</s_age><s_city>Atlanta</s_city>'''
'''<s_state>GA</s_state><s_zip>30301</s_zip><s_phone>123-4567</s_phone>'''
'''<s_nicknames><s_nickname>Johnny</s_nickname>'''
'''<sep/><s_nickname>JD</s_nickname></s_nicknames>'''
)
lowercase__: str = self.processor.tokenajson(_UpperCAmelCase )
self.assertDictEqual(_UpperCAmelCase , _UpperCAmelCase )
| 2 | 0 |
import os
from datetime import datetime as dt
from github import Github
__A = [
"good first issue",
"good second issue",
"good difficult issue",
"enhancement",
"new pipeline/model",
"new scheduler",
"wip",
]
def SCREAMING_SNAKE_CASE__ ( ) -> int:
lowercase__: List[Any] = Github(os.environ['''GITHUB_TOKEN'''] )
lowercase__: Tuple = g.get_repo('''huggingface/diffusers''' )
lowercase__: List[str] = repo.get_issues(state='''open''' )
for issue in open_issues:
lowercase__: int = sorted(issue.get_comments() , key=lambda __UpperCAmelCase : i.created_at , reverse=SCREAMING_SNAKE_CASE__ )
lowercase__: List[Any] = comments[0] if len(SCREAMING_SNAKE_CASE__ ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 3_0
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Closes the issue after 7 days of inactivity since the Stalebot notification.
issue.edit(state='''closed''' )
elif (
"stale" in issue.get_labels()
and last_comment is not None
and last_comment.user.login != "github-actions[bot]"
):
# Opens the issue if someone other than Stalebot commented.
issue.edit(state='''open''' )
issue.remove_from_labels('''stale''' )
elif (
(dt.utcnow() - issue.updated_at).days > 2_3
and (dt.utcnow() - issue.created_at).days >= 3_0
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Post a Stalebot notification after 23 days of inactivity.
issue.create_comment(
'''This issue has been automatically marked as stale because it has not had '''
'''recent activity. If you think this still needs to be addressed '''
'''please comment on this thread.\n\nPlease note that issues that do not follow the '''
'''[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) '''
'''are likely to be ignored.''' )
issue.add_to_labels('''stale''' )
if __name__ == "__main__":
main()
| 366 | """simple docstring"""
import warnings
from ...utils import logging
from .image_processing_videomae import VideoMAEImageProcessor
__A = logging.get_logger(__name__)
class UpperCAmelCase (_UpperCAmelCase ):
"""simple docstring"""
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ):
warnings.warn(
'''The class VideoMAEFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use VideoMAEImageProcessor instead.''' , _UpperCAmelCase , )
super().__init__(*_UpperCAmelCase , **_UpperCAmelCase )
| 2 | 0 |
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {"vocab_file": "vocab.json", "merges_file": "merges.txt"}
__A = {
"vocab_file": {
"allenai/longformer-base-4096": "https://huggingface.co/allenai/longformer-base-4096/resolve/main/vocab.json",
"allenai/longformer-large-4096": (
"https://huggingface.co/allenai/longformer-large-4096/resolve/main/vocab.json"
),
"allenai/longformer-large-4096-finetuned-triviaqa": (
"https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/vocab.json"
),
"allenai/longformer-base-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/vocab.json"
),
"allenai/longformer-large-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/vocab.json"
),
},
"merges_file": {
"allenai/longformer-base-4096": "https://huggingface.co/allenai/longformer-base-4096/resolve/main/merges.txt",
"allenai/longformer-large-4096": (
"https://huggingface.co/allenai/longformer-large-4096/resolve/main/merges.txt"
),
"allenai/longformer-large-4096-finetuned-triviaqa": (
"https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/merges.txt"
),
"allenai/longformer-base-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/merges.txt"
),
"allenai/longformer-large-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/merges.txt"
),
},
}
__A = {
"allenai/longformer-base-4096": 4_0_9_6,
"allenai/longformer-large-4096": 4_0_9_6,
"allenai/longformer-large-4096-finetuned-triviaqa": 4_0_9_6,
"allenai/longformer-base-4096-extra.pos.embd.only": 4_0_9_6,
"allenai/longformer-large-4096-extra.pos.embd.only": 4_0_9_6,
}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def SCREAMING_SNAKE_CASE__ ( ) -> Any:
lowercase__: Dict = (
list(range(ord('''!''' ) , ord('''~''' ) + 1 ) ) + list(range(ord('''¡''' ) , ord('''¬''' ) + 1 ) ) + list(range(ord('''®''' ) , ord('''ÿ''' ) + 1 ) )
)
lowercase__: Optional[int] = bs[:]
lowercase__: Union[str, Any] = 0
for b in range(2**8 ):
if b not in bs:
bs.append(lowerCamelCase_ )
cs.append(2**8 + n )
n += 1
lowercase__: List[Any] = [chr(lowerCamelCase_ ) for n in cs]
return dict(zip(lowerCamelCase_ , lowerCamelCase_ ) )
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> Optional[int]:
lowercase__: Any = set()
lowercase__: Optional[int] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowercase__: Union[str, Any] = char
return pairs
class UpperCAmelCase (_a ):
"""simple docstring"""
_UpperCAmelCase :Any = VOCAB_FILES_NAMES
_UpperCAmelCase :int = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase :int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase :List[Any] = ["""input_ids""", """attention_mask"""]
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase="replace" , _UpperCAmelCase="<s>" , _UpperCAmelCase="</s>" , _UpperCAmelCase="</s>" , _UpperCAmelCase="<s>" , _UpperCAmelCase="<unk>" , _UpperCAmelCase="<pad>" , _UpperCAmelCase="<mask>" , _UpperCAmelCase=False , **_UpperCAmelCase , ):
lowercase__: Optional[Any] = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else bos_token
lowercase__: int = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else eos_token
lowercase__: Optional[int] = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else sep_token
lowercase__: Union[str, Any] = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else cls_token
lowercase__: Dict = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else unk_token
lowercase__: List[Any] = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
lowercase__: Dict = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else mask_token
super().__init__(
errors=_UpperCAmelCase , bos_token=_UpperCAmelCase , eos_token=_UpperCAmelCase , unk_token=_UpperCAmelCase , sep_token=_UpperCAmelCase , cls_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , mask_token=_UpperCAmelCase , add_prefix_space=_UpperCAmelCase , **_UpperCAmelCase , )
with open(_UpperCAmelCase , encoding='''utf-8''' ) as vocab_handle:
lowercase__: List[Any] = json.load(_UpperCAmelCase )
lowercase__: Dict = {v: k for k, v in self.encoder.items()}
lowercase__: List[str] = errors # how to handle errors in decoding
lowercase__: List[str] = bytes_to_unicode()
lowercase__: int = {v: k for k, v in self.byte_encoder.items()}
with open(_UpperCAmelCase , encoding='''utf-8''' ) as merges_handle:
lowercase__: Union[str, Any] = merges_handle.read().split('''\n''' )[1:-1]
lowercase__: List[Any] = [tuple(merge.split() ) for merge in bpe_merges]
lowercase__: str = dict(zip(_UpperCAmelCase , range(len(_UpperCAmelCase ) ) ) )
lowercase__: Optional[int] = {}
lowercase__: Optional[Any] = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
lowercase__: Tuple = re.compile(r'''\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+''' )
@property
def _snake_case ( self ):
return len(self.encoder )
def _snake_case ( self ):
return dict(self.encoder , **self.added_tokens_encoder )
def _snake_case ( self , _UpperCAmelCase ):
if token in self.cache:
return self.cache[token]
lowercase__: List[str] = tuple(_UpperCAmelCase )
lowercase__: Optional[Any] = get_pairs(_UpperCAmelCase )
if not pairs:
return token
while True:
lowercase__: List[Any] = min(_UpperCAmelCase , key=lambda _UpperCAmelCase : self.bpe_ranks.get(_UpperCAmelCase , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
lowercase__: Any = bigram
lowercase__: int = []
lowercase__: Optional[Any] = 0
while i < len(_UpperCAmelCase ):
try:
lowercase__: str = word.index(_UpperCAmelCase , _UpperCAmelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowercase__: int = j
if word[i] == first and i < len(_UpperCAmelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowercase__: Union[str, Any] = tuple(_UpperCAmelCase )
lowercase__: List[Any] = new_word
if len(_UpperCAmelCase ) == 1:
break
else:
lowercase__: str = get_pairs(_UpperCAmelCase )
lowercase__: Dict = ' '.join(_UpperCAmelCase )
lowercase__: Optional[int] = word
return word
def _snake_case ( self , _UpperCAmelCase ):
lowercase__: List[str] = []
for token in re.findall(self.pat , _UpperCAmelCase ):
lowercase__: str = ''.join(
self.byte_encoder[b] for b in token.encode('''utf-8''' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(_UpperCAmelCase ).split(''' ''' ) )
return bpe_tokens
def _snake_case ( self , _UpperCAmelCase ):
return self.encoder.get(_UpperCAmelCase , self.encoder.get(self.unk_token ) )
def _snake_case ( self , _UpperCAmelCase ):
return self.decoder.get(_UpperCAmelCase )
def _snake_case ( self , _UpperCAmelCase ):
lowercase__: Tuple = ''.join(_UpperCAmelCase )
lowercase__: Dict = bytearray([self.byte_decoder[c] for c in text] ).decode('''utf-8''' , errors=self.errors )
return text
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase = None ):
if not os.path.isdir(_UpperCAmelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowercase__: Dict = os.path.join(
_UpperCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
lowercase__: str = os.path.join(
_UpperCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(_UpperCAmelCase , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=_UpperCAmelCase , ensure_ascii=_UpperCAmelCase ) + '''\n''' )
lowercase__: Dict = 0
with open(_UpperCAmelCase , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda _UpperCAmelCase : kv[1] ):
if index != token_index:
logger.warning(
F"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
''' Please check that the tokenizer is not corrupted!''' )
lowercase__: List[str] = token_index
writer.write(''' '''.join(_UpperCAmelCase ) + '''\n''' )
index += 1
return vocab_file, merge_file
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowercase__: List[str] = [self.cls_token_id]
lowercase__: Tuple = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_UpperCAmelCase , token_ids_a=_UpperCAmelCase , already_has_special_tokens=_UpperCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(_UpperCAmelCase )) + [1]
return [1] + ([0] * len(_UpperCAmelCase )) + [1, 1] + ([0] * len(_UpperCAmelCase )) + [1]
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase = None ):
lowercase__: Any = [self.sep_token_id]
lowercase__: int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase=False , **_UpperCAmelCase ):
lowercase__: Union[str, Any] = kwargs.pop('''add_prefix_space''' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(_UpperCAmelCase ) > 0 and not text[0].isspace()):
lowercase__: Optional[int] = ' ' + text
return (text, kwargs)
| 367 | """simple docstring"""
import math
from typing import Any, Callable, List, Optional, Tuple, Union
import numpy as np
import torch
from ...models import TaFilmDecoder
from ...schedulers import DDPMScheduler
from ...utils import is_onnx_available, logging, randn_tensor
if is_onnx_available():
from ..onnx_utils import OnnxRuntimeModel
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
from .continous_encoder import SpectrogramContEncoder
from .notes_encoder import SpectrogramNotesEncoder
__A = logging.get_logger(__name__) # pylint: disable=invalid-name
__A = 2_5_6
class UpperCAmelCase (_UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase :int = ["melgan"]
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ):
super().__init__()
# From MELGAN
lowercase__: Union[str, Any] = math.log(1e-5 ) # Matches MelGAN training.
lowercase__: Union[str, Any] = 4.0 # Largest value for most examples
lowercase__: Union[str, Any] = 128
self.register_modules(
notes_encoder=_UpperCAmelCase , continuous_encoder=_UpperCAmelCase , decoder=_UpperCAmelCase , scheduler=_UpperCAmelCase , melgan=_UpperCAmelCase , )
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase=(-1.0, 1.0) , _UpperCAmelCase=False ):
lowercase__, lowercase__: int = output_range
if clip:
lowercase__: Any = torch.clip(_UpperCAmelCase , self.min_value , self.max_value )
# Scale to [0, 1].
lowercase__: Optional[int] = (features - self.min_value) / (self.max_value - self.min_value)
# Scale to [min_out, max_out].
return zero_one * (max_out - min_out) + min_out
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase=(-1.0, 1.0) , _UpperCAmelCase=False ):
lowercase__, lowercase__: str = input_range
lowercase__: Dict = torch.clip(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) if clip else outputs
# Scale to [0, 1].
lowercase__: Tuple = (outputs - min_out) / (max_out - min_out)
# Scale to [self.min_value, self.max_value].
return zero_one * (self.max_value - self.min_value) + self.min_value
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
lowercase__: List[str] = input_tokens > 0
lowercase__, lowercase__: str = self.notes_encoder(
encoder_input_tokens=_UpperCAmelCase , encoder_inputs_mask=_UpperCAmelCase )
lowercase__, lowercase__: Optional[int] = self.continuous_encoder(
encoder_inputs=_UpperCAmelCase , encoder_inputs_mask=_UpperCAmelCase )
return [(tokens_encoded, tokens_mask), (continuous_encoded, continuous_mask)]
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
lowercase__: Tuple = noise_time
if not torch.is_tensor(_UpperCAmelCase ):
lowercase__: Tuple = torch.tensor([timesteps] , dtype=torch.long , device=input_tokens.device )
elif torch.is_tensor(_UpperCAmelCase ) and len(timesteps.shape ) == 0:
lowercase__: str = timesteps[None].to(input_tokens.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
lowercase__: Dict = timesteps * torch.ones(input_tokens.shape[0] , dtype=timesteps.dtype , device=timesteps.device )
lowercase__: Union[str, Any] = self.decoder(
encodings_and_masks=_UpperCAmelCase , decoder_input_tokens=_UpperCAmelCase , decoder_noise_time=_UpperCAmelCase )
return logits
@torch.no_grad()
def __call__( self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = 100 , _UpperCAmelCase = True , _UpperCAmelCase = "numpy" , _UpperCAmelCase = None , _UpperCAmelCase = 1 , ):
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(_UpperCAmelCase , _UpperCAmelCase ) or callback_steps <= 0)
):
raise ValueError(
F"""`callback_steps` has to be a positive integer but is {callback_steps} of type"""
F""" {type(_UpperCAmelCase )}.""" )
lowercase__: List[str] = np.zeros([1, TARGET_FEATURE_LENGTH, self.n_dims] , dtype=np.floataa )
lowercase__: Any = np.zeros([1, 0, self.n_dims] , np.floataa )
lowercase__: Tuple = torch.ones((1, TARGET_FEATURE_LENGTH) , dtype=_UpperCAmelCase , device=self.device )
for i, encoder_input_tokens in enumerate(_UpperCAmelCase ):
if i == 0:
lowercase__: str = torch.from_numpy(pred_mel[:1].copy() ).to(
device=self.device , dtype=self.decoder.dtype )
# The first chunk has no previous context.
lowercase__: Optional[int] = torch.zeros((1, TARGET_FEATURE_LENGTH) , dtype=_UpperCAmelCase , device=self.device )
else:
# The full song pipeline does not feed in a context feature, so the mask
# will be all 0s after the feature converter. Because we know we're
# feeding in a full context chunk from the previous prediction, set it
# to all 1s.
lowercase__: Union[str, Any] = ones
lowercase__: str = self.scale_features(
_UpperCAmelCase , output_range=[-1.0, 1.0] , clip=_UpperCAmelCase )
lowercase__: Dict = self.encode(
input_tokens=torch.IntTensor([encoder_input_tokens] ).to(device=self.device ) , continuous_inputs=_UpperCAmelCase , continuous_mask=_UpperCAmelCase , )
# Sample encoder_continuous_inputs shaped gaussian noise to begin loop
lowercase__: int = randn_tensor(
shape=encoder_continuous_inputs.shape , generator=_UpperCAmelCase , device=self.device , dtype=self.decoder.dtype , )
# set step values
self.scheduler.set_timesteps(_UpperCAmelCase )
# Denoising diffusion loop
for j, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
lowercase__: List[Any] = self.decode(
encodings_and_masks=_UpperCAmelCase , input_tokens=_UpperCAmelCase , noise_time=t / self.scheduler.config.num_train_timesteps , )
# Compute previous output: x_t -> x_t-1
lowercase__: Union[str, Any] = self.scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , generator=_UpperCAmelCase ).prev_sample
lowercase__: int = self.scale_to_features(_UpperCAmelCase , input_range=[-1.0, 1.0] )
lowercase__: Dict = mel[:1]
lowercase__: List[Any] = mel.cpu().float().numpy()
lowercase__: Optional[int] = np.concatenate([full_pred_mel, pred_mel[:1]] , axis=1 )
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(_UpperCAmelCase , _UpperCAmelCase )
logger.info('''Generated segment''' , _UpperCAmelCase )
if output_type == "numpy" and not is_onnx_available():
raise ValueError(
'''Cannot return output in \'np\' format if ONNX is not available. Make sure to have ONNX installed or set \'output_type\' to \'mel\'.''' )
elif output_type == "numpy" and self.melgan is None:
raise ValueError(
'''Cannot return output in \'np\' format if melgan component is not defined. Make sure to define `self.melgan` or set \'output_type\' to \'mel\'.''' )
if output_type == "numpy":
lowercase__: Tuple = self.melgan(input_features=full_pred_mel.astype(np.floataa ) )
else:
lowercase__: Dict = full_pred_mel
if not return_dict:
return (output,)
return AudioPipelineOutput(audios=_UpperCAmelCase )
| 2 | 0 |
from __future__ import annotations
import math
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> Any:
if num <= 0:
lowercase__: int = F"""{num}: Invalid input, please enter a positive integer."""
raise ValueError(__UpperCamelCase )
lowercase__: Any = [True] * (num + 1)
lowercase__: Tuple = []
lowercase__: Optional[Any] = 2
lowercase__: Union[str, Any] = int(math.sqrt(__UpperCamelCase ) )
while start <= end:
# If start is a prime
if sieve[start] is True:
prime.append(__UpperCamelCase )
# Set multiples of start be False
for i in range(start * start , num + 1 , __UpperCamelCase ):
if sieve[i] is True:
lowercase__: Optional[Any] = False
start += 1
for j in range(end + 1 , num + 1 ):
if sieve[j] is True:
prime.append(__UpperCamelCase )
return prime
if __name__ == "__main__":
print(prime_sieve(int(input("Enter a positive integer: ").strip())))
| 368 | """simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional
from packaging import version
if TYPE_CHECKING:
from ... import PreTrainedTokenizer, TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import is_torch_available, logging
__A = logging.get_logger(__name__)
__A = {
"bigscience/bloom": "https://huggingface.co/bigscience/bloom/resolve/main/config.json",
"bigscience/bloom-560m": "https://huggingface.co/bigscience/bloom-560m/blob/main/config.json",
"bigscience/bloom-1b1": "https://huggingface.co/bigscience/bloom-1b1/blob/main/config.json",
"bigscience/bloom-1b7": "https://huggingface.co/bigscience/bloom-1b7/blob/main/config.json",
"bigscience/bloom-3b": "https://huggingface.co/bigscience/bloom-3b/blob/main/config.json",
"bigscience/bloom-7b1": "https://huggingface.co/bigscience/bloom-7b1/blob/main/config.json",
}
class UpperCAmelCase (_UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase :str = "bloom"
_UpperCAmelCase :List[str] = ["past_key_values"]
_UpperCAmelCase :Optional[Any] = {
"num_hidden_layers": "n_layer",
"num_attention_heads": "n_head",
}
def __init__( self , _UpperCAmelCase=250880 , _UpperCAmelCase=64 , _UpperCAmelCase=2 , _UpperCAmelCase=8 , _UpperCAmelCase=1e-5 , _UpperCAmelCase=0.02 , _UpperCAmelCase=True , _UpperCAmelCase=1 , _UpperCAmelCase=2 , _UpperCAmelCase=False , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.0 , _UpperCAmelCase=1 , _UpperCAmelCase=False , **_UpperCAmelCase , ):
lowercase__: Any = vocab_size
# Backward compatibility with n_embed kwarg
lowercase__: Optional[Any] = kwargs.pop('''n_embed''' , _UpperCAmelCase )
lowercase__: int = hidden_size if n_embed is None else n_embed
lowercase__: int = n_layer
lowercase__: int = n_head
lowercase__: Optional[Any] = layer_norm_epsilon
lowercase__: int = initializer_range
lowercase__: List[Any] = use_cache
lowercase__: str = pretraining_tp
lowercase__: Tuple = apply_residual_connection_post_layernorm
lowercase__: int = hidden_dropout
lowercase__: Optional[Any] = attention_dropout
lowercase__: int = bos_token_id
lowercase__: Union[str, Any] = eos_token_id
lowercase__: Any = slow_but_exact
super().__init__(bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase )
class UpperCAmelCase (_UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase :int = version.parse("1.12" )
def __init__( self , _UpperCAmelCase , _UpperCAmelCase = "default" , _UpperCAmelCase = None , _UpperCAmelCase = False , ):
super().__init__(_UpperCAmelCase , task=_UpperCAmelCase , patching_specs=_UpperCAmelCase , use_past=_UpperCAmelCase )
if not getattr(self._config , '''pad_token_id''' , _UpperCAmelCase ):
# TODO: how to do that better?
lowercase__: Any = 0
@property
def _snake_case ( self ):
lowercase__: str = OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}} )
if self.use_past:
# BLOOM stores values on dynamic axis 2. For more details see: https://github.com/huggingface/transformers/pull/18344
self.fill_with_past_key_values_(_UpperCAmelCase , direction='''inputs''' , inverted_values_shape=_UpperCAmelCase )
lowercase__: List[str] = {0: '''batch''', 1: '''past_sequence + sequence'''}
else:
lowercase__: str = {0: '''batch''', 1: '''sequence'''}
return common_inputs
@property
def _snake_case ( self ):
return self._config.n_layer
@property
def _snake_case ( self ):
return self._config.n_head
@property
def _snake_case ( self ):
return 1e-3
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase = -1 , _UpperCAmelCase = -1 , _UpperCAmelCase = False , _UpperCAmelCase = None , ):
lowercase__: str = super(_UpperCAmelCase , self ).generate_dummy_inputs(
_UpperCAmelCase , batch_size=_UpperCAmelCase , seq_length=_UpperCAmelCase , is_pair=_UpperCAmelCase , framework=_UpperCAmelCase )
# We need to order the input in the way they appears in the forward()
lowercase__: List[Any] = OrderedDict({'''input_ids''': common_inputs['''input_ids''']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
lowercase__, lowercase__: Optional[Any] = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
lowercase__: Tuple = seqlen + 2
lowercase__: str = self._config.hidden_size // self.num_attention_heads
lowercase__: Optional[int] = (
batch * self.num_attention_heads,
head_dim,
past_key_values_length,
)
lowercase__: Union[str, Any] = (
batch * self.num_attention_heads,
past_key_values_length,
head_dim,
)
lowercase__: str = [
(torch.zeros(_UpperCAmelCase ), torch.zeros(_UpperCAmelCase )) for _ in range(self.num_layers )
]
lowercase__: Tuple = common_inputs['''attention_mask''']
if self.use_past:
lowercase__: int = ordered_inputs['''attention_mask'''].dtype
lowercase__: List[str] = torch.cat(
[ordered_inputs['''attention_mask'''], torch.ones(_UpperCAmelCase , _UpperCAmelCase , dtype=_UpperCAmelCase )] , dim=1 )
return ordered_inputs
@property
def _snake_case ( self ):
return 13
| 2 | 0 |
"""simple docstring"""
import datasets
from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py
__A = '\\n@INPROCEEDINGS{Papineni02bleu:a,\n author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu},\n title = {BLEU: a Method for Automatic Evaluation of Machine Translation},\n booktitle = {},\n year = {2002},\n pages = {311--318}\n}\n@inproceedings{lin-och-2004-orange,\n title = "{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation",\n author = "Lin, Chin-Yew and\n Och, Franz Josef",\n booktitle = "{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics",\n month = "aug 23{--}aug 27",\n year = "2004",\n address = "Geneva, Switzerland",\n publisher = "COLING",\n url = "https://www.aclweb.org/anthology/C04-1072",\n pages = "501--507",\n}\n'
__A = '\\nBLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another.\nQuality is considered to be the correspondence between a machine\'s output and that of a human: "the closer a machine translation is to a professional human translation,\nthe better it is" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and\nremains one of the most popular automated and inexpensive metrics.\n\nScores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations.\nThose scores are then averaged over the whole corpus to reach an estimate of the translation\'s overall quality. Intelligibility or grammatical correctness\nare not taken into account[citation needed].\n\nBLEU\'s output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1\nrepresenting more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the\nreference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional\nreference translations will increase the BLEU score.\n'
__A = '\nComputes BLEU score of translated segments against one or more references.\nArgs:\n predictions: list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n max_order: Maximum n-gram order to use when computing BLEU score.\n smooth: Whether or not to apply Lin et al. 2004 smoothing.\nReturns:\n \'bleu\': bleu score,\n \'precisions\': geometric mean of n-gram precisions,\n \'brevity_penalty\': brevity penalty,\n \'length_ratio\': ratio of lengths,\n \'translation_length\': translation_length,\n \'reference_length\': reference_length\nExamples:\n\n >>> predictions = [\n ... ["hello", "there", "general", "kenobi"], # tokenized prediction of the first sample\n ... ["foo", "bar", "foobar"] # tokenized prediction of the second sample\n ... ]\n >>> references = [\n ... [["hello", "there", "general", "kenobi"], ["hello", "there", "!"]], # tokenized references for the first sample (2 references)\n ... [["foo", "bar", "foobar"]] # tokenized references for the second sample (1 reference)\n ... ]\n >>> bleu = datasets.load_metric("bleu")\n >>> results = bleu.compute(predictions=predictions, references=references)\n >>> print(results["bleu"])\n 1.0\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class UpperCAmelCase (datasets.Metric ):
"""simple docstring"""
def _snake_case ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''string''' , id='''token''' ) , id='''sequence''' ),
'''references''': datasets.Sequence(
datasets.Sequence(datasets.Value('''string''' , id='''token''' ) , id='''sequence''' ) , id='''references''' ),
} ) , codebase_urls=['''https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py'''] , reference_urls=[
'''https://en.wikipedia.org/wiki/BLEU''',
'''https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213''',
] , )
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=4 , _UpperCAmelCase=False ):
lowercase__: int = compute_bleu(
reference_corpus=_lowercase , translation_corpus=_lowercase , max_order=_lowercase , smooth=_lowercase )
((lowercase__), (lowercase__), (lowercase__), (lowercase__), (lowercase__), (lowercase__)): Any = score
return {
"bleu": bleu,
"precisions": precisions,
"brevity_penalty": bp,
"length_ratio": ratio,
"translation_length": translation_length,
"reference_length": reference_length,
}
| 369 | """simple docstring"""
import unittest
from transformers import MobileBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertModel,
)
class UpperCAmelCase :
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=7 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=99 , _UpperCAmelCase=64 , _UpperCAmelCase=32 , _UpperCAmelCase=5 , _UpperCAmelCase=4 , _UpperCAmelCase=37 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=512 , _UpperCAmelCase=16 , _UpperCAmelCase=2 , _UpperCAmelCase=0.02 , _UpperCAmelCase=3 , _UpperCAmelCase=4 , _UpperCAmelCase=None , ):
lowercase__: Dict = parent
lowercase__: Optional[int] = batch_size
lowercase__: List[str] = seq_length
lowercase__: Optional[int] = is_training
lowercase__: Dict = use_input_mask
lowercase__: List[Any] = use_token_type_ids
lowercase__: List[str] = use_labels
lowercase__: Union[str, Any] = vocab_size
lowercase__: str = hidden_size
lowercase__: Any = embedding_size
lowercase__: Any = num_hidden_layers
lowercase__: Any = num_attention_heads
lowercase__: List[Any] = intermediate_size
lowercase__: Dict = hidden_act
lowercase__: List[Any] = hidden_dropout_prob
lowercase__: Dict = attention_probs_dropout_prob
lowercase__: Optional[int] = max_position_embeddings
lowercase__: List[Any] = type_vocab_size
lowercase__: Tuple = type_sequence_label_size
lowercase__: Optional[int] = initializer_range
lowercase__: Dict = num_labels
lowercase__: int = num_choices
lowercase__: int = scope
def _snake_case ( self ):
lowercase__: List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase__: List[Any] = None
if self.use_input_mask:
lowercase__: Any = random_attention_mask([self.batch_size, self.seq_length] )
lowercase__: List[Any] = None
if self.use_token_type_ids:
lowercase__: Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase__: Optional[Any] = None
lowercase__: Any = None
lowercase__: str = None
if self.use_labels:
lowercase__: Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase__: List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase__: Tuple = ids_tensor([self.batch_size] , self.num_choices )
lowercase__: Any = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _snake_case ( self ):
return MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_UpperCAmelCase , initializer_range=self.initializer_range , )
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
lowercase__: int = MobileBertModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
lowercase__: List[str] = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase )
lowercase__: Dict = model(_UpperCAmelCase , token_type_ids=_UpperCAmelCase )
lowercase__: str = model(_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
lowercase__: Any = MobileBertForMaskedLM(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
lowercase__: Union[str, Any] = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
lowercase__: List[Any] = MobileBertForNextSentencePrediction(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
lowercase__: List[str] = model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
lowercase__: Any = MobileBertForPreTraining(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
lowercase__: str = model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase , next_sentence_label=_UpperCAmelCase , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
lowercase__: Any = MobileBertForQuestionAnswering(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
lowercase__: int = model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , start_positions=_UpperCAmelCase , end_positions=_UpperCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
lowercase__: str = self.num_labels
lowercase__: Any = MobileBertForSequenceClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
lowercase__: Tuple = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
lowercase__: str = self.num_labels
lowercase__: Union[str, Any] = MobileBertForTokenClassification(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
lowercase__: Optional[int] = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
lowercase__: Dict = self.num_choices
lowercase__: Union[str, Any] = MobileBertForMultipleChoice(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
lowercase__: List[str] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase__: Optional[Any] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase__: Optional[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase__: Optional[Any] = model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _snake_case ( self ):
lowercase__: Optional[int] = self.prepare_config_and_inputs()
(
(
lowercase__
), (
lowercase__
), (
lowercase__
), (
lowercase__
), (
lowercase__
), (
lowercase__
), (
lowercase__
),
): Union[str, Any] = config_and_inputs
lowercase__: Optional[Any] = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase (_UpperCAmelCase ,_UpperCAmelCase ,unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase :Tuple = (
(
MobileBertModel,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
)
if is_torch_available()
else ()
)
_UpperCAmelCase :Optional[Any] = (
{
"feature-extraction": MobileBertModel,
"fill-mask": MobileBertForMaskedLM,
"question-answering": MobileBertForQuestionAnswering,
"text-classification": MobileBertForSequenceClassification,
"token-classification": MobileBertForTokenClassification,
"zero-shot": MobileBertForSequenceClassification,
}
if is_torch_available()
else {}
)
_UpperCAmelCase :Optional[Any] = True
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=False ):
lowercase__: int = super()._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase , return_labels=_UpperCAmelCase )
if return_labels:
if model_class in get_values(_UpperCAmelCase ):
lowercase__: Any = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=_UpperCAmelCase )
lowercase__: Dict = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_UpperCAmelCase )
return inputs_dict
def _snake_case ( self ):
lowercase__: int = MobileBertModelTester(self )
lowercase__: Dict = ConfigTester(self , config_class=_UpperCAmelCase , hidden_size=37 )
def _snake_case ( self ):
self.config_tester.run_common_tests()
def _snake_case ( self ):
lowercase__: str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*_UpperCAmelCase )
def _snake_case ( self ):
lowercase__: int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*_UpperCAmelCase )
def _snake_case ( self ):
lowercase__: List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*_UpperCAmelCase )
def _snake_case ( self ):
lowercase__: str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*_UpperCAmelCase )
def _snake_case ( self ):
lowercase__: List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*_UpperCAmelCase )
def _snake_case ( self ):
lowercase__: Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*_UpperCAmelCase )
def _snake_case ( self ):
lowercase__: Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*_UpperCAmelCase )
def _snake_case ( self ):
lowercase__: Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> List[Any]:
return torch.tensor(
__UpperCAmelCase , dtype=torch.long , device=__UpperCAmelCase , )
__A = 1E-3
@require_torch
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase (unittest.TestCase ):
"""simple docstring"""
@slow
def _snake_case ( self ):
lowercase__: Tuple = MobileBertModel.from_pretrained('''google/mobilebert-uncased''' ).to(_UpperCAmelCase )
lowercase__: Tuple = _long_tensor([[101, 7110, 1005, 1056, 2023, 11333, 17413, 1029, 102]] )
with torch.no_grad():
lowercase__: Tuple = model(_UpperCAmelCase )[0]
lowercase__: Dict = torch.Size((1, 9, 512) )
self.assertEqual(output.shape , _UpperCAmelCase )
lowercase__: List[Any] = torch.tensor(
[
[
[-2.4_7_3_6_5_2_6e0_7, 8.2_6_9_1_6_5_6e0_4, 1.6_5_2_1_8_3_8e0_5],
[-5.7_5_4_1_7_0_4e-0_1, 3.9_0_5_6_0_2_2e0_0, 4.4_0_1_1_5_0_7e0_0],
[2.6_0_4_7_3_5_9e0_0, 1.5_6_7_7_6_5_2e0_0, -1.7_3_2_4_1_8_8e-0_1],
]
] , device=_UpperCAmelCase , )
# MobileBERT results range from 10e0 to 10e8. Even a 0.0000001% difference with a value of 10e8 results in a
# ~1 difference, it's therefore not a good idea to measure using addition.
# Here, we instead divide the expected result with the result in order to obtain ~1. We then check that the
# result is held between bounds: 1 - TOLERANCE < expected_result / result < 1 + TOLERANCE
lowercase__: int = torch.all((expected_slice / output[..., :3, :3]) >= 1 - TOLERANCE )
lowercase__: Optional[int] = torch.all((expected_slice / output[..., :3, :3]) <= 1 + TOLERANCE )
self.assertTrue(lower_bound and upper_bound )
| 2 | 0 |
"""simple docstring"""
__A = [0, 2, 4, 6, 8]
__A = [1, 3, 5, 7, 9]
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Optional[int]:
if remaining_length == 0:
if digits[0] == 0 or digits[-1] == 0:
return 0
for i in range(length // 2 - 1 , -1 , -1 ):
remainder += digits[i] + digits[length - i - 1]
if remainder % 2 == 0:
return 0
remainder //= 1_0
return 1
if remaining_length == 1:
if remainder % 2 == 0:
return 0
lowercase__: Optional[Any] = 0
for digit in range(1_0 ):
lowercase__: Optional[Any] = digit
result += reversible_numbers(
0 , (remainder + 2 * digit) // 1_0 , a__ , a__ )
return result
lowercase__: Dict = 0
for digita in range(1_0 ):
lowercase__: List[Any] = digita
if (remainder + digita) % 2 == 0:
lowercase__: Union[str, Any] = ODD_DIGITS
else:
lowercase__: Tuple = EVEN_DIGITS
for digita in other_parity_digits:
lowercase__: Tuple = digita
result += reversible_numbers(
remaining_length - 2 , (remainder + digita + digita) // 1_0 , a__ , a__ , )
return result
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase = 9 ) -> Tuple:
lowercase__: List[str] = 0
for length in range(1 , max_power + 1 ):
result += reversible_numbers(a__ , 0 , [0] * length , a__ )
return result
if __name__ == "__main__":
print(f'''{solution() = }''')
| 370 | """simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
"microsoft/unispeech-sat-base-100h-libri-ft": (
"https://huggingface.co/microsoft/unispeech-sat-base-100h-libri-ft/resolve/main/config.json"
),
# See all UniSpeechSat models at https://huggingface.co/models?filter=unispeech_sat
}
class UpperCAmelCase (_UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase :Any = "unispeech-sat"
def __init__( self , _UpperCAmelCase=32 , _UpperCAmelCase=768 , _UpperCAmelCase=12 , _UpperCAmelCase=12 , _UpperCAmelCase=3072 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.02 , _UpperCAmelCase=1e-5 , _UpperCAmelCase="group" , _UpperCAmelCase="gelu" , _UpperCAmelCase=(512, 512, 512, 512, 512, 512, 512) , _UpperCAmelCase=(5, 2, 2, 2, 2, 2, 2) , _UpperCAmelCase=(10, 3, 3, 3, 3, 2, 2) , _UpperCAmelCase=False , _UpperCAmelCase=128 , _UpperCAmelCase=16 , _UpperCAmelCase=False , _UpperCAmelCase=True , _UpperCAmelCase=0.05 , _UpperCAmelCase=10 , _UpperCAmelCase=2 , _UpperCAmelCase=0.0 , _UpperCAmelCase=10 , _UpperCAmelCase=0 , _UpperCAmelCase=320 , _UpperCAmelCase=2 , _UpperCAmelCase=0.1 , _UpperCAmelCase=100 , _UpperCAmelCase=256 , _UpperCAmelCase=256 , _UpperCAmelCase=0.1 , _UpperCAmelCase="mean" , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=256 , _UpperCAmelCase=(512, 512, 512, 512, 1500) , _UpperCAmelCase=(5, 3, 3, 1, 1) , _UpperCAmelCase=(1, 2, 3, 1, 1) , _UpperCAmelCase=512 , _UpperCAmelCase=0 , _UpperCAmelCase=1 , _UpperCAmelCase=2 , _UpperCAmelCase=504 , **_UpperCAmelCase , ):
super().__init__(**_UpperCAmelCase , pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase )
lowercase__: Union[str, Any] = hidden_size
lowercase__: Union[str, Any] = feat_extract_norm
lowercase__: Any = feat_extract_activation
lowercase__: List[Any] = list(_UpperCAmelCase )
lowercase__: Optional[int] = list(_UpperCAmelCase )
lowercase__: int = list(_UpperCAmelCase )
lowercase__: Any = conv_bias
lowercase__: List[str] = num_conv_pos_embeddings
lowercase__: List[str] = num_conv_pos_embedding_groups
lowercase__: int = len(self.conv_dim )
lowercase__: Dict = num_hidden_layers
lowercase__: List[Any] = intermediate_size
lowercase__: Dict = hidden_act
lowercase__: Optional[Any] = num_attention_heads
lowercase__: Union[str, Any] = hidden_dropout
lowercase__: List[Any] = attention_dropout
lowercase__: str = activation_dropout
lowercase__: Optional[Any] = feat_proj_dropout
lowercase__: Optional[int] = final_dropout
lowercase__: Any = layerdrop
lowercase__: int = layer_norm_eps
lowercase__: Any = initializer_range
lowercase__: Union[str, Any] = vocab_size
lowercase__: Optional[Any] = num_clusters
lowercase__: Dict = do_stable_layer_norm
lowercase__: List[str] = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
F""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"""
F""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowercase__: Dict = apply_spec_augment
lowercase__: Union[str, Any] = mask_time_prob
lowercase__: List[str] = mask_time_length
lowercase__: Union[str, Any] = mask_time_min_masks
lowercase__: str = mask_feature_prob
lowercase__: Dict = mask_feature_length
lowercase__: List[Any] = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
lowercase__: Tuple = num_codevectors_per_group
lowercase__: Optional[Any] = num_codevector_groups
lowercase__: int = contrastive_logits_temperature
lowercase__: Any = feat_quantizer_dropout
lowercase__: int = num_negatives
lowercase__: Optional[Any] = codevector_dim
lowercase__: int = proj_codevector_dim
lowercase__: str = diversity_loss_weight
# ctc loss
lowercase__: int = ctc_loss_reduction
lowercase__: Union[str, Any] = ctc_zero_infinity
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
lowercase__: Optional[Any] = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
lowercase__: Union[str, Any] = list(_UpperCAmelCase )
lowercase__: Tuple = list(_UpperCAmelCase )
lowercase__: Union[str, Any] = list(_UpperCAmelCase )
lowercase__: Tuple = xvector_output_dim
@property
def _snake_case ( self ):
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 2 | 0 |
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase = 1_0_0_0 ) -> Tuple:
return sum(e for e in range(3 , __a ) if e % 3 == 0 or e % 5 == 0 )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 371 | """simple docstring"""
import argparse
import torch
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_from_original_stable_diffusion_ckpt
if __name__ == "__main__":
__A = argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_path", default=None, type=str, required=True, help="Path to the checkpoint to convert."
)
# !wget https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml
parser.add_argument(
"--original_config_file",
default=None,
type=str,
help="The YAML config file corresponding to the original architecture.",
)
parser.add_argument(
"--num_in_channels",
default=None,
type=int,
help="The number of input channels. If `None` number of input channels will be automatically inferred.",
)
parser.add_argument(
"--scheduler_type",
default="pndm",
type=str,
help="Type of scheduler to use. Should be one of ['pndm', 'lms', 'ddim', 'euler', 'euler-ancestral', 'dpm']",
)
parser.add_argument(
"--pipeline_type",
default=None,
type=str,
help=(
"The pipeline type. One of 'FrozenOpenCLIPEmbedder', 'FrozenCLIPEmbedder', 'PaintByExample'"
". If `None` pipeline will be automatically inferred."
),
)
parser.add_argument(
"--image_size",
default=None,
type=int,
help=(
"The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2"
" Base. Use 768 for Stable Diffusion v2."
),
)
parser.add_argument(
"--prediction_type",
default=None,
type=str,
help=(
"The prediction type that the model was trained on. Use 'epsilon' for Stable Diffusion v1.X and Stable"
" Diffusion v2 Base. Use 'v_prediction' for Stable Diffusion v2."
),
)
parser.add_argument(
"--extract_ema",
action="store_true",
help=(
"Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights"
" or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield"
" higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning."
),
)
parser.add_argument(
"--upcast_attention",
action="store_true",
help=(
"Whether the attention computation should always be upcasted. This is necessary when running stable"
" diffusion 2.1."
),
)
parser.add_argument(
"--from_safetensors",
action="store_true",
help="If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.",
)
parser.add_argument(
"--to_safetensors",
action="store_true",
help="Whether to store pipeline in safetensors format or not.",
)
parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.")
parser.add_argument("--device", type=str, help="Device to use (e.g. cpu, cuda:0, cuda:1, etc.)")
parser.add_argument(
"--stable_unclip",
type=str,
default=None,
required=False,
help="Set if this is a stable unCLIP model. One of 'txt2img' or 'img2img'.",
)
parser.add_argument(
"--stable_unclip_prior",
type=str,
default=None,
required=False,
help="Set if this is a stable unCLIP txt2img model. Selects which prior to use. If `--stable_unclip` is set to `txt2img`, the karlo prior (https://huggingface.co/kakaobrain/karlo-v1-alpha/tree/main/prior) is selected by default.",
)
parser.add_argument(
"--clip_stats_path",
type=str,
help="Path to the clip stats file. Only required if the stable unclip model's config specifies `model.params.noise_aug_config.params.clip_stats_path`.",
required=False,
)
parser.add_argument(
"--controlnet", action="store_true", default=None, help="Set flag if this is a controlnet checkpoint."
)
parser.add_argument("--half", action="store_true", help="Save weights in half precision.")
parser.add_argument(
"--vae_path",
type=str,
default=None,
required=False,
help="Set to a path, hub id to an already converted vae to not convert it again.",
)
__A = parser.parse_args()
__A = download_from_original_stable_diffusion_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
prediction_type=args.prediction_type,
model_type=args.pipeline_type,
extract_ema=args.extract_ema,
scheduler_type=args.scheduler_type,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
stable_unclip=args.stable_unclip,
stable_unclip_prior=args.stable_unclip_prior,
clip_stats_path=args.clip_stats_path,
controlnet=args.controlnet,
vae_path=args.vae_path,
)
if args.half:
pipe.to(torch_dtype=torch.floataa)
if args.controlnet:
# only save the controlnet model
pipe.controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
else:
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 2 | 0 |
"""simple docstring"""
import argparse
import torch
from transformers import FunnelBaseModel, FunnelConfig, FunnelModel, load_tf_weights_in_funnel
from transformers.utils import logging
logging.set_verbosity_info()
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Tuple:
lowercase__: Union[str, Any] = FunnelConfig.from_json_file(A__ )
print(F"""Building PyTorch model from configuration: {config}""" )
lowercase__: Optional[Any] = FunnelBaseModel(A__ ) if base_model else FunnelModel(A__ )
# Load weights from tf checkpoint
load_tf_weights_in_funnel(A__ , A__ , A__ )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , A__ )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.",
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--base_model", action="store_true", help="Whether you want just the base model (no decoder) or not."
)
__A = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path, args.base_model
)
| 350 | """simple docstring"""
import warnings
from diffusers import StableDiffusionImgaImgPipeline # noqa F401
warnings.warn(
"The `image_to_image.py` script is outdated. Please use directly `from diffusers import"
" StableDiffusionImg2ImgPipeline` instead."
)
| 2 | 0 |
"""simple docstring"""
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionTextToImagePipeline
from diffusers.utils.testing_utils import nightly, require_torch_gpu, torch_device
__A : List[str] = False
class UpperCAmelCase (unittest.TestCase ):
"""simple docstring"""
pass
@nightly
@require_torch_gpu
class UpperCAmelCase (unittest.TestCase ):
"""simple docstring"""
def _snake_case ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _snake_case ( self ):
lowercase__: Any = VersatileDiffusionTextToImagePipeline.from_pretrained('''shi-labs/versatile-diffusion''' )
# remove text_unet
pipe.remove_unused_weights()
pipe.to(snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
lowercase__: Dict = '''A painting of a squirrel eating a burger '''
lowercase__: str = torch.manual_seed(0 )
lowercase__: Any = pipe(
prompt=snake_case_ , generator=snake_case_ , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(snake_case_ )
lowercase__: List[Any] = VersatileDiffusionTextToImagePipeline.from_pretrained(snake_case_ )
pipe.to(snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
lowercase__: Any = generator.manual_seed(0 )
lowercase__: Optional[Any] = pipe(
prompt=snake_case_ , generator=snake_case_ , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' ).images
assert np.abs(image - new_image ).sum() < 1e-5, "Models don't have the same forward pass"
def _snake_case ( self ):
lowercase__: str = VersatileDiffusionTextToImagePipeline.from_pretrained(
'''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa )
pipe.to(snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
lowercase__: int = '''A painting of a squirrel eating a burger '''
lowercase__: Union[str, Any] = torch.manual_seed(0 )
lowercase__: str = pipe(
prompt=snake_case_ , generator=snake_case_ , guidance_scale=7.5 , num_inference_steps=50 , output_type='''numpy''' ).images
lowercase__: Optional[int] = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
lowercase__: List[str] = np.array([0.3_367, 0.3_169, 0.2_656, 0.3_870, 0.4_790, 0.3_796, 0.4_009, 0.4_878, 0.4_778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 351 | """simple docstring"""
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
"Salesforce/codegen-350M-nl": "https://huggingface.co/Salesforce/codegen-350M-nl/resolve/main/config.json",
"Salesforce/codegen-350M-multi": "https://huggingface.co/Salesforce/codegen-350M-multi/resolve/main/config.json",
"Salesforce/codegen-350M-mono": "https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/config.json",
"Salesforce/codegen-2B-nl": "https://huggingface.co/Salesforce/codegen-2B-nl/resolve/main/config.json",
"Salesforce/codegen-2B-multi": "https://huggingface.co/Salesforce/codegen-2B-multi/resolve/main/config.json",
"Salesforce/codegen-2B-mono": "https://huggingface.co/Salesforce/codegen-2B-mono/resolve/main/config.json",
"Salesforce/codegen-6B-nl": "https://huggingface.co/Salesforce/codegen-6B-nl/resolve/main/config.json",
"Salesforce/codegen-6B-multi": "https://huggingface.co/Salesforce/codegen-6B-multi/resolve/main/config.json",
"Salesforce/codegen-6B-mono": "https://huggingface.co/Salesforce/codegen-6B-mono/resolve/main/config.json",
"Salesforce/codegen-16B-nl": "https://huggingface.co/Salesforce/codegen-16B-nl/resolve/main/config.json",
"Salesforce/codegen-16B-multi": "https://huggingface.co/Salesforce/codegen-16B-multi/resolve/main/config.json",
"Salesforce/codegen-16B-mono": "https://huggingface.co/Salesforce/codegen-16B-mono/resolve/main/config.json",
}
class UpperCAmelCase (_UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase :List[str] = "codegen"
_UpperCAmelCase :Optional[int] = {
"max_position_embeddings": "n_positions",
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self , _UpperCAmelCase=50400 , _UpperCAmelCase=2048 , _UpperCAmelCase=2048 , _UpperCAmelCase=4096 , _UpperCAmelCase=28 , _UpperCAmelCase=16 , _UpperCAmelCase=64 , _UpperCAmelCase=None , _UpperCAmelCase="gelu_new" , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.0 , _UpperCAmelCase=1e-5 , _UpperCAmelCase=0.02 , _UpperCAmelCase=True , _UpperCAmelCase=50256 , _UpperCAmelCase=50256 , _UpperCAmelCase=False , **_UpperCAmelCase , ):
lowercase__: int = vocab_size
lowercase__: str = n_ctx
lowercase__: List[Any] = n_positions
lowercase__: Union[str, Any] = n_embd
lowercase__: Optional[Any] = n_layer
lowercase__: str = n_head
lowercase__: List[Any] = n_inner
lowercase__: Union[str, Any] = rotary_dim
lowercase__: Optional[Any] = activation_function
lowercase__: Union[str, Any] = resid_pdrop
lowercase__: Optional[int] = embd_pdrop
lowercase__: Optional[Any] = attn_pdrop
lowercase__: Optional[int] = layer_norm_epsilon
lowercase__: List[Any] = initializer_range
lowercase__: Tuple = use_cache
lowercase__: Any = bos_token_id
lowercase__: Any = eos_token_id
super().__init__(
bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , tie_word_embeddings=_UpperCAmelCase , **_UpperCAmelCase )
class UpperCAmelCase (_UpperCAmelCase ):
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase = "default" , _UpperCAmelCase = None , _UpperCAmelCase = False , ):
super().__init__(_UpperCAmelCase , task=_UpperCAmelCase , patching_specs=_UpperCAmelCase , use_past=_UpperCAmelCase )
if not getattr(self._config , '''pad_token_id''' , _UpperCAmelCase ):
# TODO: how to do that better?
lowercase__: Any = 0
@property
def _snake_case ( self ):
lowercase__: int = OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}} )
if self.use_past:
self.fill_with_past_key_values_(_UpperCAmelCase , direction='''inputs''' )
lowercase__: int = {0: '''batch''', 1: '''past_sequence + sequence'''}
else:
lowercase__: Tuple = {0: '''batch''', 1: '''sequence'''}
return common_inputs
@property
def _snake_case ( self ):
return self._config.n_layer
@property
def _snake_case ( self ):
return self._config.n_head
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase = -1 , _UpperCAmelCase = -1 , _UpperCAmelCase = False , _UpperCAmelCase = None , ):
lowercase__: Optional[int] = super(_UpperCAmelCase , self ).generate_dummy_inputs(
_UpperCAmelCase , batch_size=_UpperCAmelCase , seq_length=_UpperCAmelCase , is_pair=_UpperCAmelCase , framework=_UpperCAmelCase )
# We need to order the input in the way they appears in the forward()
lowercase__: List[Any] = OrderedDict({'''input_ids''': common_inputs['''input_ids''']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
lowercase__, lowercase__: Union[str, Any] = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
lowercase__: Any = seqlen + 2
lowercase__: List[str] = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
lowercase__: Optional[Any] = [
(torch.zeros(_UpperCAmelCase ), torch.zeros(_UpperCAmelCase )) for _ in range(self.num_layers )
]
lowercase__: Optional[Any] = common_inputs['''attention_mask''']
if self.use_past:
lowercase__: List[str] = ordered_inputs['''attention_mask'''].dtype
lowercase__: List[Any] = torch.cat(
[ordered_inputs['''attention_mask'''], torch.ones(_UpperCAmelCase , _UpperCAmelCase , dtype=_UpperCAmelCase )] , dim=1 )
return ordered_inputs
@property
def _snake_case ( self ):
return 13
| 2 | 0 |
"""simple docstring"""
from json import JSONDecodeError # Workaround for requests.exceptions.JSONDecodeError
import requests
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase = "isbn/0140328726" ) -> str:
lowercase__: Dict = olid.strip().strip('''/''' ) # Remove leading/trailing whitespace & slashes
if new_olid.count('''/''' ) != 1:
lowercase__: Tuple = F"""{olid} is not a valid Open Library olid"""
raise ValueError(__UpperCAmelCase )
return requests.get(F"""https://openlibrary.org/{new_olid}.json""" ).json()
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> List[str]:
lowercase__: Optional[int] = {
'''title''': '''Title''',
'''publish_date''': '''Publish date''',
'''authors''': '''Authors''',
'''number_of_pages''': '''Number of pages:''',
'''first_sentence''': '''First sentence''',
'''isbn_10''': '''ISBN (10)''',
'''isbn_13''': '''ISBN (13)''',
}
lowercase__: Dict = {better_key: ol_book_data[key] for key, better_key in desired_keys.items()}
lowercase__: str = [
get_openlibrary_data(author['''key'''] )['''name'''] for author in data['''Authors''']
]
lowercase__: Optional[Any] = data['''First sentence''']['''value''']
for key, value in data.items():
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
lowercase__: Optional[int] = ''', '''.join(__UpperCAmelCase )
return data
if __name__ == "__main__":
import doctest
doctest.testmod()
while True:
__A = input("\nEnter the ISBN code to search (or 'quit' to stop): ").strip()
if isbn.lower() in ("", "q", "quit", "exit", "stop"):
break
if len(isbn) not in (1_0, 1_3) or not isbn.isdigit():
print(f'''Sorry, {isbn} is not a valid ISBN. Please, input a valid ISBN.''')
continue
print(f'''\nSearching Open Library for ISBN: {isbn}...\n''')
try:
__A = summarize_book(get_openlibrary_data(f'''isbn/{isbn}'''))
print("\n".join(f'''{key}: {value}''' for key, value in book_summary.items()))
except JSONDecodeError: # Workaround for requests.exceptions.RequestException:
print(f'''Sorry, there are no results for ISBN: {isbn}.''')
| 352 | """simple docstring"""
from dataclasses import dataclass, field
from typing import Optional
from transformers import AutoConfig, AutoImageProcessor, AutoTokenizer, FlaxVisionEncoderDecoderModel, HfArgumentParser
@dataclass
class UpperCAmelCase :
"""simple docstring"""
_UpperCAmelCase :str = field(
metadata={"help": "The output directory where the model will be written."} ,)
_UpperCAmelCase :str = field(
metadata={
"help": (
"The encoder model checkpoint for weights initialization."
"Don't set if you want to train an encoder model from scratch."
)
} ,)
_UpperCAmelCase :str = field(
metadata={
"help": (
"The decoder model checkpoint for weights initialization."
"Don't set if you want to train a decoder model from scratch."
)
} ,)
_UpperCAmelCase :Optional[str] = field(
default=_UpperCAmelCase ,metadata={"help": "Pretrained encoder config name or path if not the same as encoder_model_name"} )
_UpperCAmelCase :Optional[str] = field(
default=_UpperCAmelCase ,metadata={"help": "Pretrained decoder config name or path if not the same as decoder_model_name"} )
def SCREAMING_SNAKE_CASE__ ( ) -> Tuple:
lowercase__: Dict = HfArgumentParser((ModelArguments,) )
((lowercase__), ): List[str] = parser.parse_args_into_dataclasses()
# Load pretrained model and tokenizer
# Use explicit specified encoder config
if model_args.encoder_config_name:
lowercase__: List[Any] = AutoConfig.from_pretrained(model_args.encoder_config_name )
# Use pretrained encoder model's config
else:
lowercase__: int = AutoConfig.from_pretrained(model_args.encoder_model_name_or_path )
# Use explicit specified decoder config
if model_args.decoder_config_name:
lowercase__: str = AutoConfig.from_pretrained(model_args.decoder_config_name )
# Use pretrained decoder model's config
else:
lowercase__: Union[str, Any] = AutoConfig.from_pretrained(model_args.decoder_model_name_or_path )
# necessary for `from_encoder_decoder_pretrained` when `decoder_config` is passed
lowercase__: Tuple = True
lowercase__: int = True
lowercase__: Any = FlaxVisionEncoderDecoderModel.from_encoder_decoder_pretrained(
encoder_pretrained_model_name_or_path=model_args.encoder_model_name_or_path , decoder_pretrained_model_name_or_path=model_args.decoder_model_name_or_path , encoder_config=__UpperCAmelCase , decoder_config=__UpperCAmelCase , )
# GPT2 only has bos/eos tokens but not decoder_start/pad tokens
lowercase__: int = decoder_config.decoder_start_token_id
lowercase__: Tuple = decoder_config.pad_token_id
if decoder_start_token_id is None:
lowercase__: Tuple = decoder_config.bos_token_id
if pad_token_id is None:
lowercase__: Optional[int] = decoder_config.eos_token_id
# This is necessary to make Flax's generate() work
lowercase__: Optional[Any] = decoder_config.eos_token_id
lowercase__: Tuple = decoder_start_token_id
lowercase__: Dict = pad_token_id
lowercase__: Optional[int] = AutoImageProcessor.from_pretrained(model_args.encoder_model_name_or_path )
lowercase__: Union[str, Any] = AutoTokenizer.from_pretrained(model_args.decoder_model_name_or_path )
lowercase__: Tuple = tokenizer.convert_ids_to_tokens(model.config.pad_token_id )
model.save_pretrained(model_args.output_dir )
image_processor.save_pretrained(model_args.output_dir )
tokenizer.save_pretrained(model_args.output_dir )
if __name__ == "__main__":
main()
| 2 | 0 |
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase ) -> List[Any]:
return (pointa[0] - pointa[0]) ** 2 + (pointa[1] - pointa[1]) ** 2
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase=0 ) -> Optional[int]:
return sorted(__UpperCAmelCase , key=lambda __UpperCAmelCase : x[column] )
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=float('''inf''' ) ) -> int:
for i in range(points_counts - 1 ):
for j in range(i + 1 , __UpperCAmelCase ):
lowercase__: Dict = euclidean_distance_sqr(points[i] , points[j] )
if current_dis < min_dis:
lowercase__: List[Any] = current_dis
return min_dis
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=float('''inf''' ) ) -> int:
for i in range(min(6 , points_counts - 1 ) , __UpperCAmelCase ):
for j in range(max(0 , i - 6 ) , __UpperCAmelCase ):
lowercase__: Union[str, Any] = euclidean_distance_sqr(points[i] , points[j] )
if current_dis < min_dis:
lowercase__: Any = current_dis
return min_dis
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Optional[Any]:
if points_counts <= 3:
return dis_between_closest_pair(__UpperCAmelCase , __UpperCAmelCase )
# recursion
lowercase__: int = points_counts // 2
lowercase__: List[Any] = closest_pair_of_points_sqr(
__UpperCAmelCase , points_sorted_on_y[:mid] , __UpperCAmelCase )
lowercase__: Dict = closest_pair_of_points_sqr(
__UpperCAmelCase , points_sorted_on_y[mid:] , points_counts - mid )
lowercase__: List[Any] = min(__UpperCAmelCase , __UpperCAmelCase )
lowercase__: Optional[int] = []
for point in points_sorted_on_x:
if abs(point[0] - points_sorted_on_x[mid][0] ) < closest_pair_dis:
cross_strip.append(__UpperCAmelCase )
lowercase__: str = dis_between_closest_in_strip(
__UpperCAmelCase , len(__UpperCAmelCase ) , __UpperCAmelCase )
return min(__UpperCAmelCase , __UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase ) -> Tuple:
lowercase__: Optional[int] = column_based_sort(__UpperCAmelCase , column=0 )
lowercase__: str = column_based_sort(__UpperCAmelCase , column=1 )
return (
closest_pair_of_points_sqr(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
) ** 0.5
if __name__ == "__main__":
__A = [(2, 3), (1_2, 3_0), (4_0, 5_0), (5, 1), (1_2, 1_0), (3, 4)]
print("Distance:", closest_pair_of_points(points, len(points)))
| 353 | """simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {"ctrl": "https://huggingface.co/ctrl/resolve/main/config.json"}
class UpperCAmelCase (_UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase :Optional[Any] = "ctrl"
_UpperCAmelCase :int = ["past_key_values"]
_UpperCAmelCase :Dict = {
"max_position_embeddings": "n_positions",
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self , _UpperCAmelCase=246534 , _UpperCAmelCase=256 , _UpperCAmelCase=1280 , _UpperCAmelCase=8192 , _UpperCAmelCase=48 , _UpperCAmelCase=16 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=1e-6 , _UpperCAmelCase=0.02 , _UpperCAmelCase=True , **_UpperCAmelCase , ):
lowercase__: Union[str, Any] = vocab_size
lowercase__: Optional[int] = n_positions
lowercase__: Optional[int] = n_embd
lowercase__: Any = n_layer
lowercase__: Any = n_head
lowercase__: int = dff
lowercase__: Dict = resid_pdrop
lowercase__: Any = embd_pdrop
lowercase__: Any = layer_norm_epsilon
lowercase__: Optional[int] = initializer_range
lowercase__: Dict = use_cache
super().__init__(**_UpperCAmelCase )
| 2 | 0 |
"""simple docstring"""
import os
import random
import sys
from . import cryptomath_module as cryptomath
from . import rabin_miller
__A = 3
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> Optional[int]:
print('''Generating primitive root of p''' )
while True:
lowercase__: Optional[int] = random.randrange(3 , __UpperCAmelCase )
if pow(__UpperCAmelCase , 2 , __UpperCAmelCase ) == 1:
continue
if pow(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) == 1:
continue
return g
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> List[Any]:
print('''Generating prime p...''' )
lowercase__: List[str] = rabin_miller.generate_large_prime(__UpperCAmelCase ) # select large prime number.
lowercase__: List[Any] = primitive_root(__UpperCAmelCase ) # one primitive root on modulo p.
lowercase__: Dict = random.randrange(3 , __UpperCAmelCase ) # private_key -> have to be greater than 2 for safety.
lowercase__: int = cryptomath.find_mod_inverse(pow(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) , __UpperCAmelCase )
lowercase__: Dict = (key_size, e_a, e_a, p)
lowercase__: List[Any] = (key_size, d)
return public_key, private_key
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase ) -> Dict:
if os.path.exists(F"""{name}_pubkey.txt""" ) or os.path.exists(F"""{name}_privkey.txt""" ):
print('''\nWARNING:''' )
print(
F"""\"{name}_pubkey.txt\" or \"{name}_privkey.txt\" already exists. \n"""
'''Use a different name or delete these files and re-run this program.''' )
sys.exit()
lowercase__, lowercase__: Union[str, Any] = generate_key(__UpperCAmelCase )
print(F"""\nWriting public key to file {name}_pubkey.txt...""" )
with open(F"""{name}_pubkey.txt""" , '''w''' ) as fo:
fo.write(F"""{public_key[0]},{public_key[1]},{public_key[2]},{public_key[3]}""" )
print(F"""Writing private key to file {name}_privkey.txt...""" )
with open(F"""{name}_privkey.txt""" , '''w''' ) as fo:
fo.write(F"""{private_key[0]},{private_key[1]}""" )
def SCREAMING_SNAKE_CASE__ ( ) -> Dict:
print('''Making key files...''' )
make_key_files('''elgamal''' , 2_0_4_8 )
print('''Key files generation successful''' )
if __name__ == "__main__":
main()
| 354 | """simple docstring"""
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase = 5_0 ) -> int:
lowercase__: str = [[0] * 3 for _ in range(length + 1 )]
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
different_colour_ways_number[row_length][tile_length - 2] += (
different_colour_ways_number[row_length - tile_start - tile_length][
tile_length - 2
]
+ 1
)
return sum(different_colour_ways_number[length] )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 2 | 0 |
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase ) -> str:
lowercase__: Optional[Any] = [1]
for i in range(2 , lowerCamelCase_ ):
factorials.append(factorials[-1] * i )
assert 0 <= k < factorials[-1] * n, "k out of bounds"
lowercase__: Tuple = []
lowercase__: List[str] = list(range(lowerCamelCase_ ) )
# Find permutation
while factorials:
lowercase__: List[Any] = factorials.pop()
lowercase__: List[str] = divmod(lowerCamelCase_ , lowerCamelCase_ )
permutation.append(elements[number] )
elements.remove(elements[number] )
permutation.append(elements[0] )
return permutation
if __name__ == "__main__":
import doctest
doctest.testmod()
| 355 | """simple docstring"""
import pickle
import numpy as np
from matplotlib import pyplot as plt
class UpperCAmelCase :
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=0.2 , _UpperCAmelCase=0.2 ):
lowercase__: int = bp_numa
lowercase__: Union[str, Any] = bp_numa
lowercase__: List[str] = bp_numa
lowercase__: str = conva_get[:2]
lowercase__: Union[str, Any] = conva_get[2]
lowercase__: Any = size_pa
lowercase__: Optional[Any] = rate_w
lowercase__: Tuple = rate_t
lowercase__: List[str] = [
np.mat(-1 * np.random.rand(self.conva[0] , self.conva[0] ) + 0.5 )
for i in range(self.conva[1] )
]
lowercase__: Dict = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 )
lowercase__: str = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 )
lowercase__: Union[str, Any] = -2 * np.random.rand(self.conva[1] ) + 1
lowercase__: Any = -2 * np.random.rand(self.num_bpa ) + 1
lowercase__: Any = -2 * np.random.rand(self.num_bpa ) + 1
def _snake_case ( self , _UpperCAmelCase ):
# save model dict with pickle
lowercase__: int = {
'''num_bp1''': self.num_bpa,
'''num_bp2''': self.num_bpa,
'''num_bp3''': self.num_bpa,
'''conv1''': self.conva,
'''step_conv1''': self.step_conva,
'''size_pooling1''': self.size_poolinga,
'''rate_weight''': self.rate_weight,
'''rate_thre''': self.rate_thre,
'''w_conv1''': self.w_conva,
'''wkj''': self.wkj,
'''vji''': self.vji,
'''thre_conv1''': self.thre_conva,
'''thre_bp2''': self.thre_bpa,
'''thre_bp3''': self.thre_bpa,
}
with open(_UpperCAmelCase , '''wb''' ) as f:
pickle.dump(_UpperCAmelCase , _UpperCAmelCase )
print(F"""Model saved: {save_path}""" )
@classmethod
def _snake_case ( cls , _UpperCAmelCase ):
# read saved model
with open(_UpperCAmelCase , '''rb''' ) as f:
lowercase__: Optional[int] = pickle.load(_UpperCAmelCase ) # noqa: S301
lowercase__: Tuple = model_dic.get('''conv1''' )
conv_get.append(model_dic.get('''step_conv1''' ) )
lowercase__: Any = model_dic.get('''size_pooling1''' )
lowercase__: int = model_dic.get('''num_bp1''' )
lowercase__: Optional[int] = model_dic.get('''num_bp2''' )
lowercase__: str = model_dic.get('''num_bp3''' )
lowercase__: Any = model_dic.get('''rate_weight''' )
lowercase__: Union[str, Any] = model_dic.get('''rate_thre''' )
# create model instance
lowercase__: str = CNN(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# modify model parameter
lowercase__: Dict = model_dic.get('''w_conv1''' )
lowercase__: Dict = model_dic.get('''wkj''' )
lowercase__: str = model_dic.get('''vji''' )
lowercase__: List[Any] = model_dic.get('''thre_conv1''' )
lowercase__: Optional[int] = model_dic.get('''thre_bp2''' )
lowercase__: Tuple = model_dic.get('''thre_bp3''' )
return conv_ins
def _snake_case ( self , _UpperCAmelCase ):
return 1 / (1 + np.exp(-1 * x ))
def _snake_case ( self , _UpperCAmelCase ):
return round(_UpperCAmelCase , 3 )
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
# convolution process
lowercase__: Any = convs[0]
lowercase__: Tuple = convs[1]
lowercase__: List[Any] = np.shape(_UpperCAmelCase )[0]
# get the data slice of original image data, data_focus
lowercase__: List[Any] = []
for i_focus in range(0 , size_data - size_conv + 1 , _UpperCAmelCase ):
for j_focus in range(0 , size_data - size_conv + 1 , _UpperCAmelCase ):
lowercase__: Tuple = data[
i_focus : i_focus + size_conv, j_focus : j_focus + size_conv
]
data_focus.append(_UpperCAmelCase )
# calculate the feature map of every single kernel, and saved as list of matrix
lowercase__: Optional[int] = []
lowercase__: Optional[int] = int((size_data - size_conv) / conv_step + 1 )
for i_map in range(_UpperCAmelCase ):
lowercase__: str = []
for i_focus in range(len(_UpperCAmelCase ) ):
lowercase__: Any = (
np.sum(np.multiply(data_focus[i_focus] , w_convs[i_map] ) )
- thre_convs[i_map]
)
featuremap.append(self.sig(_UpperCAmelCase ) )
lowercase__: str = np.asmatrix(_UpperCAmelCase ).reshape(
_UpperCAmelCase , _UpperCAmelCase )
data_featuremap.append(_UpperCAmelCase )
# expanding the data slice to One dimenssion
lowercase__: Union[str, Any] = []
for each_focus in data_focus:
focusa_list.extend(self.Expand_Mat(_UpperCAmelCase ) )
lowercase__: Any = np.asarray(_UpperCAmelCase )
return focus_list, data_featuremap
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase="average_pool" ):
# pooling process
lowercase__: List[Any] = len(featuremaps[0] )
lowercase__: Any = int(size_map / size_pooling )
lowercase__: List[Any] = []
for i_map in range(len(_UpperCAmelCase ) ):
lowercase__: Any = featuremaps[i_map]
lowercase__: Tuple = []
for i_focus in range(0 , _UpperCAmelCase , _UpperCAmelCase ):
for j_focus in range(0 , _UpperCAmelCase , _UpperCAmelCase ):
lowercase__: Optional[Any] = feature_map[
i_focus : i_focus + size_pooling,
j_focus : j_focus + size_pooling,
]
if pooling_type == "average_pool":
# average pooling
map_pooled.append(np.average(_UpperCAmelCase ) )
elif pooling_type == "max_pooling":
# max pooling
map_pooled.append(np.max(_UpperCAmelCase ) )
lowercase__: str = np.asmatrix(_UpperCAmelCase ).reshape(_UpperCAmelCase , _UpperCAmelCase )
featuremap_pooled.append(_UpperCAmelCase )
return featuremap_pooled
def _snake_case ( self , _UpperCAmelCase ):
# expanding three dimension data to one dimension list
lowercase__: Optional[Any] = []
for i in range(len(_UpperCAmelCase ) ):
lowercase__: Any = np.shape(data[i] )
lowercase__: List[Any] = data[i].reshape(1 , shapes[0] * shapes[1] )
lowercase__: List[str] = data_listed.getA().tolist()[0]
data_expanded.extend(_UpperCAmelCase )
lowercase__: List[str] = np.asarray(_UpperCAmelCase )
return data_expanded
def _snake_case ( self , _UpperCAmelCase ):
# expanding matrix to one dimension list
lowercase__: Union[str, Any] = np.asarray(_UpperCAmelCase )
lowercase__: List[str] = np.shape(_UpperCAmelCase )
lowercase__: List[Any] = data_mat.reshape(1 , shapes[0] * shapes[1] )
return data_expanded
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
lowercase__: str = []
lowercase__: List[str] = 0
for i_map in range(_UpperCAmelCase ):
lowercase__: Union[str, Any] = np.ones((size_map, size_map) )
for i in range(0 , _UpperCAmelCase , _UpperCAmelCase ):
for j in range(0 , _UpperCAmelCase , _UpperCAmelCase ):
lowercase__: Optional[Any] = pd_pool[
i_pool
]
lowercase__: List[Any] = i_pool + 1
lowercase__: str = np.multiply(
_UpperCAmelCase , np.multiply(out_map[i_map] , (1 - out_map[i_map]) ) )
pd_all.append(_UpperCAmelCase )
return pd_all
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=bool ):
# model traning
print('''----------------------Start Training-------------------------''' )
print((''' - - Shape: Train_Data ''', np.shape(_UpperCAmelCase )) )
print((''' - - Shape: Teach_Data ''', np.shape(_UpperCAmelCase )) )
lowercase__: Tuple = 0
lowercase__: Tuple = []
lowercase__: Optional[int] = 10000
while rp < n_repeat and mse >= error_accuracy:
lowercase__: Tuple = 0
print(F"""-------------Learning Time {rp}--------------""" )
for p in range(len(_UpperCAmelCase ) ):
# print('------------Learning Image: %d--------------'%p)
lowercase__: List[Any] = np.asmatrix(datas_train[p] )
lowercase__: Optional[int] = np.asarray(datas_teach[p] )
lowercase__, lowercase__: List[str] = self.convolute(
_UpperCAmelCase , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
lowercase__: Optional[int] = self.pooling(_UpperCAmelCase , self.size_poolinga )
lowercase__: int = np.shape(_UpperCAmelCase )
lowercase__: Optional[Any] = self._expand(_UpperCAmelCase )
lowercase__: Any = data_bp_input
lowercase__: Any = np.dot(_UpperCAmelCase , self.vji.T ) - self.thre_bpa
lowercase__: str = self.sig(_UpperCAmelCase )
lowercase__: Optional[Any] = np.dot(_UpperCAmelCase , self.wkj.T ) - self.thre_bpa
lowercase__: Dict = self.sig(_UpperCAmelCase )
# --------------Model Leaning ------------------------
# calculate error and gradient---------------
lowercase__: str = np.multiply(
(data_teach - bp_outa) , np.multiply(_UpperCAmelCase , (1 - bp_outa) ) )
lowercase__: str = np.multiply(
np.dot(_UpperCAmelCase , self.wkj ) , np.multiply(_UpperCAmelCase , (1 - bp_outa) ) )
lowercase__: Dict = np.dot(_UpperCAmelCase , self.vji )
lowercase__: Any = pd_i_all / (self.size_poolinga * self.size_poolinga)
lowercase__: List[str] = pd_conva_pooled.T.getA().tolist()
lowercase__: Optional[Any] = self._calculate_gradient_from_pool(
_UpperCAmelCase , _UpperCAmelCase , shape_featuremapa[0] , shape_featuremapa[1] , self.size_poolinga , )
# weight and threshold learning process---------
# convolution layer
for k_conv in range(self.conva[1] ):
lowercase__: str = self._expand_mat(pd_conva_all[k_conv] )
lowercase__: str = self.rate_weight * np.dot(_UpperCAmelCase , _UpperCAmelCase )
lowercase__: Any = self.w_conva[k_conv] + delta_w.reshape(
(self.conva[0], self.conva[0]) )
lowercase__: List[Any] = (
self.thre_conva[k_conv]
- np.sum(pd_conva_all[k_conv] ) * self.rate_thre
)
# all connected layer
lowercase__: Optional[int] = self.wkj + pd_k_all.T * bp_outa * self.rate_weight
lowercase__: List[Any] = self.vji + pd_j_all.T * bp_outa * self.rate_weight
lowercase__: List[str] = self.thre_bpa - pd_k_all * self.rate_thre
lowercase__: Optional[Any] = self.thre_bpa - pd_j_all * self.rate_thre
# calculate the sum error of all single image
lowercase__: Optional[Any] = np.sum(abs(data_teach - bp_outa ) )
error_count += errors
# print(' ----Teach ',data_teach)
# print(' ----BP_output ',bp_out3)
lowercase__: str = rp + 1
lowercase__: Optional[Any] = error_count / patterns
all_mse.append(_UpperCAmelCase )
def draw_error():
lowercase__: Union[str, Any] = [error_accuracy for i in range(int(n_repeat * 1.2 ) )]
plt.plot(_UpperCAmelCase , '''+-''' )
plt.plot(_UpperCAmelCase , '''r--''' )
plt.xlabel('''Learning Times''' )
plt.ylabel('''All_mse''' )
plt.grid(_UpperCAmelCase , alpha=0.5 )
plt.show()
print('''------------------Training Complished---------------------''' )
print((''' - - Training epoch: ''', rp, F""" - - Mse: {mse:.6f}""") )
if draw_e:
draw_error()
return mse
def _snake_case ( self , _UpperCAmelCase ):
# model predict
lowercase__: Union[str, Any] = []
print('''-------------------Start Testing-------------------------''' )
print((''' - - Shape: Test_Data ''', np.shape(_UpperCAmelCase )) )
for p in range(len(_UpperCAmelCase ) ):
lowercase__: Union[str, Any] = np.asmatrix(datas_test[p] )
lowercase__, lowercase__: Any = self.convolute(
_UpperCAmelCase , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
lowercase__: List[str] = self.pooling(_UpperCAmelCase , self.size_poolinga )
lowercase__: str = self._expand(_UpperCAmelCase )
lowercase__: List[Any] = data_bp_input
lowercase__: List[str] = bp_outa * self.vji.T - self.thre_bpa
lowercase__: Any = self.sig(_UpperCAmelCase )
lowercase__: Optional[int] = bp_outa * self.wkj.T - self.thre_bpa
lowercase__: Any = self.sig(_UpperCAmelCase )
produce_out.extend(bp_outa.getA().tolist() )
lowercase__: str = [list(map(self.do_round , _UpperCAmelCase ) ) for each in produce_out]
return np.asarray(_UpperCAmelCase )
def _snake_case ( self , _UpperCAmelCase ):
# return the data of image after convoluting process so we can check it out
lowercase__: int = np.asmatrix(_UpperCAmelCase )
lowercase__, lowercase__: Optional[int] = self.convolute(
_UpperCAmelCase , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
lowercase__: List[Any] = self.pooling(_UpperCAmelCase , self.size_poolinga )
return data_conveda, data_pooleda
if __name__ == "__main__":
pass
| 2 | 0 |
from itertools import zip_longest
import requests
from bsa import BeautifulSoup
from pandas import DataFrame
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase = "laptop" ) -> DataFrame:
lowercase__: Tuple = F"""https://www.amazon.in/laptop/s?k={product}"""
lowercase__: Any = {
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36\n (KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36',
'Accept-Language': 'en-US, en;q=0.5',
}
lowercase__: List[Any] = BeautifulSoup(requests.get(UpperCAmelCase_ , headers=UpperCAmelCase_ ).text )
# Initialize a Pandas dataframe with the column titles
lowercase__: Union[str, Any] = DataFrame(
columns=[
'''Product Title''',
'''Product Link''',
'''Current Price of the product''',
'''Product Rating''',
'''MRP of the product''',
'''Discount''',
] )
# Loop through each entry and store them in the dataframe
for item, _ in zip_longest(
soup.find_all(
'''div''' , attrs={'''class''': '''s-result-item''', '''data-component-type''': '''s-search-result'''} , ) , soup.find_all('''div''' , attrs={'''class''': '''a-row a-size-base a-color-base'''} ) , ):
try:
lowercase__: List[str] = item.ha.text
lowercase__: Optional[int] = 'https://www.amazon.in/' + item.ha.a['href']
lowercase__: Optional[int] = item.find('''span''' , attrs={'''class''': '''a-offscreen'''} ).text
try:
lowercase__: int = item.find('''span''' , attrs={'''class''': '''a-icon-alt'''} ).text
except AttributeError:
lowercase__: List[str] = 'Not available'
try:
lowercase__: str = (
'₹'
+ item.find(
'''span''' , attrs={'''class''': '''a-price a-text-price'''} ).text.split('''₹''' )[1]
)
except AttributeError:
lowercase__: List[Any] = ''
try:
lowercase__: int = float(
(
(
float(product_mrp.strip('''₹''' ).replace(''',''' , '''''' ) )
- float(product_price.strip('''₹''' ).replace(''',''' , '''''' ) )
)
/ float(product_mrp.strip('''₹''' ).replace(''',''' , '''''' ) )
)
* 1_0_0 )
except ValueError:
lowercase__: str = float('''nan''' )
except AttributeError:
pass
lowercase__: List[str] = [
product_title,
product_link,
product_price,
product_rating,
product_mrp,
discount,
]
lowercase__: Any = ' '
lowercase__: Any = ' '
data_frame.index += 1
return data_frame
if __name__ == "__main__":
__A = "headphones"
get_amazon_product_data(product).to_csv(f'''Amazon Product Data for {product}.csv''')
| 356 | """simple docstring"""
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class UpperCAmelCase (_UpperCAmelCase ,unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase :Union[str, Any] = CTRLTokenizer
_UpperCAmelCase :Any = False
_UpperCAmelCase :List[Any] = False
def _snake_case ( self ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowercase__: Dict = ['''adapt''', '''re@@''', '''a@@''', '''apt''', '''c@@''', '''t''', '''<unk>''']
lowercase__: Any = dict(zip(_UpperCAmelCase , range(len(_UpperCAmelCase ) ) ) )
lowercase__: Optional[int] = ['''#version: 0.2''', '''a p''', '''ap t</w>''', '''r e''', '''a d''', '''ad apt</w>''', '''''']
lowercase__: Optional[Any] = {'''unk_token''': '''<unk>'''}
lowercase__: Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
lowercase__: int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_UpperCAmelCase ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(_UpperCAmelCase ) )
def _snake_case ( self , **_UpperCAmelCase ):
kwargs.update(self.special_tokens_map )
return CTRLTokenizer.from_pretrained(self.tmpdirname , **_UpperCAmelCase )
def _snake_case ( self , _UpperCAmelCase ):
lowercase__: Optional[int] = '''adapt react readapt apt'''
lowercase__: Optional[int] = '''adapt react readapt apt'''
return input_text, output_text
def _snake_case ( self ):
lowercase__: List[str] = CTRLTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
lowercase__: Optional[int] = '''adapt react readapt apt'''
lowercase__: Any = '''adapt re@@ a@@ c@@ t re@@ adapt apt'''.split()
lowercase__: Optional[Any] = tokenizer.tokenize(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
lowercase__: int = tokens + [tokenizer.unk_token]
lowercase__: str = [0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) , _UpperCAmelCase )
| 2 | 0 |
"""simple docstring"""
import logging
import torch
from accelerate import Accelerator
from arguments import EvaluationArguments
from datasets import load_dataset
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, set_seed
class UpperCAmelCase (lowerCamelCase__ ):
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=1024 , _UpperCAmelCase=1024 , _UpperCAmelCase=3.6 ):
lowercase__: int = tokenizer
lowercase__: Optional[int] = tokenizer.bos_token_id
lowercase__: Dict = dataset
lowercase__: Union[str, Any] = seq_length
lowercase__: Tuple = seq_length * chars_per_token * num_of_sequences
def __iter__( self ):
lowercase__: Tuple = iter(self.dataset )
lowercase__: Union[str, Any] = True
while more_examples:
lowercase__: List[Any] = [], 0
while True:
if buffer_len >= self.input_characters:
break
try:
buffer.append(next(__snake_case )['''content'''] )
buffer_len += len(buffer[-1] )
except StopIteration:
lowercase__: Dict = False
break
lowercase__: Dict = tokenizer(__snake_case , truncation=__snake_case )['input_ids']
lowercase__: int = []
for tokenized_input in tokenized_inputs:
all_token_ids.extend(tokenized_input + [self.concat_token_id] )
for i in range(0 , len(__snake_case ) , self.seq_length ):
lowercase__: str = all_token_ids[i : i + self.seq_length]
if len(__snake_case ) == self.seq_length:
yield torch.tensor(__snake_case )
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> List[str]:
lowercase__: Union[str, Any] = {'streaming': True}
lowercase__: List[Any] = load_dataset(args.dataset_name , split='''train''' , **_A )
lowercase__: Optional[Any] = ConstantLengthDataset(_A , _A , seq_length=args.seq_length )
lowercase__: Union[str, Any] = DataLoader(_A , batch_size=args.batch_size )
return eval_dataloader
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> List[Any]:
model.eval()
lowercase__: str = []
for step, batch in enumerate(_A ):
with torch.no_grad():
lowercase__: Optional[int] = model(_A , labels=_A )
lowercase__: List[Any] = outputs.loss.repeat(args.batch_size )
losses.append(accelerator.gather(_A ) )
if args.max_eval_steps > 0 and step >= args.max_eval_steps:
break
lowercase__: str = torch.mean(torch.cat(_A ) )
try:
lowercase__: Dict = torch.exp(_A )
except OverflowError:
lowercase__: Dict = float('''inf''' )
return loss.item(), perplexity.item()
# Setup Accelerator
__A = Accelerator()
# Parse configuration
__A = HfArgumentParser(EvaluationArguments)
__A = parser.parse_args()
set_seed(args.seed)
# Logging
__A = logging.getLogger(__name__)
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO
)
# Load model and tokenizer
__A = AutoModelForCausalLM.from_pretrained(args.model_ckpt)
__A = AutoTokenizer.from_pretrained(args.model_ckpt)
# Load dataset and dataloader
__A = create_dataloader(args)
# Prepare everything with our `accelerator`.
__A = accelerator.prepare(model, eval_dataloader)
# Evaluate and save the last checkpoint
logger.info("Evaluating and saving model after training")
__A = evaluate(args)
logger.info(f'''loss/eval: {eval_loss}, perplexity: {perplexity}''')
| 357 | """simple docstring"""
import os
import re
import shutil
from argparse import ArgumentParser, Namespace
from datasets.commands import BaseDatasetsCLICommand
from datasets.utils.logging import get_logger
__A = "<<<<<<< This should probably be modified because it mentions: "
__A = "=======\n>>>>>>>\n"
__A = [
"TextEncoderConfig",
"ByteTextEncoder",
"SubwordTextEncoder",
"encoder_config",
"maybe_build_from_corpus",
"manual_dir",
]
__A = [
# (pattern, replacement)
# Order is important here for some replacements
(R"tfds\.core", R"datasets"),
(R"tf\.io\.gfile\.GFile", R"open"),
(R"tf\.([\w\d]+)", R"datasets.Value('\1')"),
(R"tfds\.features\.Text\(\)", R"datasets.Value('string')"),
(R"tfds\.features\.Text\(", R"datasets.Value('string'),"),
(R"features\s*=\s*tfds.features.FeaturesDict\(", R"features=datasets.Features("),
(R"tfds\.features\.FeaturesDict\(", R"dict("),
(R"The TensorFlow Datasets Authors", R"The TensorFlow Datasets Authors and the HuggingFace Datasets Authors"),
(R"tfds\.", R"datasets."),
(R"dl_manager\.manual_dir", R"self.config.data_dir"),
(R"self\.builder_config", R"self.config"),
]
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> Tuple:
return ConvertCommand(args.tfds_path , args.datasets_directory )
class UpperCAmelCase (_UpperCAmelCase ):
"""simple docstring"""
@staticmethod
def _snake_case ( _UpperCAmelCase ):
lowercase__: int = parser.add_parser(
'''convert''' , help='''Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset.''' , )
train_parser.add_argument(
'''--tfds_path''' , type=_UpperCAmelCase , required=_UpperCAmelCase , help='''Path to a TensorFlow Datasets folder to convert or a single tfds file to convert.''' , )
train_parser.add_argument(
'''--datasets_directory''' , type=_UpperCAmelCase , required=_UpperCAmelCase , help='''Path to the HuggingFace Datasets folder.''' )
train_parser.set_defaults(func=_UpperCAmelCase )
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , *_UpperCAmelCase ):
lowercase__: List[str] = get_logger('''datasets-cli/converting''' )
lowercase__: Optional[Any] = tfds_path
lowercase__: Dict = datasets_directory
def _snake_case ( self ):
if os.path.isdir(self._tfds_path ):
lowercase__: Optional[Any] = os.path.abspath(self._tfds_path )
elif os.path.isfile(self._tfds_path ):
lowercase__: Optional[int] = os.path.dirname(self._tfds_path )
else:
raise ValueError('''--tfds_path is neither a directory nor a file. Please check path.''' )
lowercase__: int = os.path.abspath(self._datasets_directory )
self._logger.info(F"""Converting datasets from {abs_tfds_path} to {abs_datasets_path}""" )
lowercase__: Tuple = []
lowercase__: Dict = []
lowercase__: Any = {}
if os.path.isdir(self._tfds_path ):
lowercase__: Dict = os.listdir(_UpperCAmelCase )
else:
lowercase__: Dict = [os.path.basename(self._tfds_path )]
for f_name in file_names:
self._logger.info(F"""Looking at file {f_name}""" )
lowercase__: Tuple = os.path.join(_UpperCAmelCase , _UpperCAmelCase )
lowercase__: Optional[int] = os.path.join(_UpperCAmelCase , _UpperCAmelCase )
if not os.path.isfile(_UpperCAmelCase ) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name:
self._logger.info('''Skipping file''' )
continue
with open(_UpperCAmelCase , encoding='''utf-8''' ) as f:
lowercase__: Tuple = f.readlines()
lowercase__: Optional[Any] = []
lowercase__: Dict = False
lowercase__: List[str] = False
lowercase__: List[Any] = []
for line in lines:
lowercase__: List[str] = line
# Convert imports
if "import tensorflow.compat.v2 as tf" in out_line:
continue
elif "@tfds.core" in out_line:
continue
elif "builder=self" in out_line:
continue
elif "import tensorflow_datasets.public_api as tfds" in out_line:
lowercase__: Optional[int] = '''import datasets\n'''
elif "import tensorflow" in out_line:
# order is important here
lowercase__: Dict = ''''''
continue
elif "from absl import logging" in out_line:
lowercase__: Tuple = '''from datasets import logging\n'''
elif "getLogger" in out_line:
lowercase__: Optional[Any] = out_line.replace('''getLogger''' , '''get_logger''' )
elif any(expression in out_line for expression in TO_HIGHLIGHT ):
lowercase__: Any = True
lowercase__: str = list(filter(lambda _UpperCAmelCase : e in out_line , _UpperCAmelCase ) )
out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(_UpperCAmelCase ) + '''\n''' )
out_lines.append(_UpperCAmelCase )
out_lines.append(_UpperCAmelCase )
continue
else:
for pattern, replacement in TO_CONVERT:
lowercase__: List[Any] = re.sub(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# Take care of saving utilities (to later move them together with main script)
if "tensorflow_datasets" in out_line:
lowercase__: Any = re.match(r'''from\stensorflow_datasets.*import\s([^\.\r\n]+)''' , _UpperCAmelCase )
tfds_imports.extend(imp.strip() for imp in match.group(1 ).split(''',''' ) )
lowercase__: List[str] = '''from . import ''' + match.group(1 )
# Check we have not forget anything
if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line:
raise ValueError(F"""Error converting {out_line.strip()}""" )
if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line:
lowercase__: Optional[Any] = True
out_lines.append(_UpperCAmelCase )
if is_builder or "wmt" in f_name:
# We create a new directory for each dataset
lowercase__: Dict = f_name.replace('''.py''' , '''''' )
lowercase__: Dict = os.path.join(_UpperCAmelCase , _UpperCAmelCase )
lowercase__: Optional[Any] = os.path.join(_UpperCAmelCase , _UpperCAmelCase )
os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase )
self._logger.info(F"""Adding directory {output_dir}""" )
imports_to_builder_map.update({imp: output_dir for imp in tfds_imports} )
else:
# Utilities will be moved at the end
utils_files.append(_UpperCAmelCase )
if needs_manual_update:
with_manual_update.append(_UpperCAmelCase )
with open(_UpperCAmelCase , '''w''' , encoding='''utf-8''' ) as f:
f.writelines(_UpperCAmelCase )
self._logger.info(F"""Converted in {output_file}""" )
for utils_file in utils_files:
try:
lowercase__: str = os.path.basename(_UpperCAmelCase )
lowercase__: Union[str, Any] = imports_to_builder_map[f_name.replace('''.py''' , '''''' )]
self._logger.info(F"""Moving {dest_folder} to {utils_file}""" )
shutil.copy(_UpperCAmelCase , _UpperCAmelCase )
except KeyError:
self._logger.error(F"""Cannot find destination folder for {utils_file}. Please copy manually.""" )
if with_manual_update:
for file_path in with_manual_update:
self._logger.warning(
F"""You need to manually update file {file_path} to remove configurations using 'TextEncoderConfig'.""" )
| 2 | 0 |
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import SeqaSeqTrainer
from seqaseq_training_args import SeqaSeqTrainingArguments
import transformers
from transformers import (
AutoConfig,
AutoModelForSeqaSeqLM,
AutoTokenizer,
HfArgumentParser,
MBartTokenizer,
MBartTokenizerFast,
set_seed,
)
from transformers.trainer_utils import EvaluationStrategy, is_main_process
from transformers.training_args import ParallelMode
from utils import (
SeqaSeqDataCollator,
SeqaSeqDataset,
assert_all_frozen,
build_compute_metrics_fn,
check_output_dir,
freeze_embeds,
freeze_params,
lmap,
save_json,
use_task_specific_params,
write_txt_file,
)
__A = logging.getLogger(__name__)
@dataclass
class UpperCAmelCase :
"""simple docstring"""
_UpperCAmelCase :str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
_UpperCAmelCase :Optional[str] = field(
default=SCREAMING_SNAKE_CASE__ ,metadata={"help": "Pretrained config name or path if not the same as model_name"} )
_UpperCAmelCase :Optional[str] = field(
default=SCREAMING_SNAKE_CASE__ ,metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
_UpperCAmelCase :Optional[str] = field(
default=SCREAMING_SNAKE_CASE__ ,metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} ,)
_UpperCAmelCase :bool = field(default=SCREAMING_SNAKE_CASE__ ,metadata={"help": "Whether tp freeze the encoder."} )
_UpperCAmelCase :bool = field(default=SCREAMING_SNAKE_CASE__ ,metadata={"help": "Whether to freeze the embeddings."} )
@dataclass
class UpperCAmelCase :
"""simple docstring"""
_UpperCAmelCase :str = field(
metadata={"help": "The input data dir. Should contain the .tsv files (or other data files) for the task."} )
_UpperCAmelCase :Optional[str] = field(
default="summarization" ,metadata={"help": "Task name, summarization (or summarization_{dataset} for pegasus) or translation"} ,)
_UpperCAmelCase :Optional[int] = field(
default=1024 ,metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} ,)
_UpperCAmelCase :Optional[int] = field(
default=128 ,metadata={
"help": (
"The maximum total sequence length for target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} ,)
_UpperCAmelCase :Optional[int] = field(
default=142 ,metadata={
"help": (
"The maximum total sequence length for validation target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded. "
"This argument is also used to override the ``max_length`` param of ``model.generate``, which is used "
"during ``evaluate`` and ``predict``."
)
} ,)
_UpperCAmelCase :Optional[int] = field(
default=142 ,metadata={
"help": (
"The maximum total sequence length for test target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} ,)
_UpperCAmelCase :Optional[int] = field(default=-1 ,metadata={"help": "# training examples. -1 means use all."} )
_UpperCAmelCase :Optional[int] = field(default=-1 ,metadata={"help": "# validation examples. -1 means use all."} )
_UpperCAmelCase :Optional[int] = field(default=-1 ,metadata={"help": "# test examples. -1 means use all."} )
_UpperCAmelCase :Optional[str] = field(default=SCREAMING_SNAKE_CASE__ ,metadata={"help": "Source language id for translation."} )
_UpperCAmelCase :Optional[str] = field(default=SCREAMING_SNAKE_CASE__ ,metadata={"help": "Target language id for translation."} )
_UpperCAmelCase :Optional[int] = field(default=SCREAMING_SNAKE_CASE__ ,metadata={"help": "# num_beams to use for evaluation."} )
_UpperCAmelCase :bool = field(
default=SCREAMING_SNAKE_CASE__ ,metadata={"help": "If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined."} ,)
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> str:
logger.info(F"""***** {split} metrics *****""" )
for key in sorted(metrics.keys() ):
logger.info(F""" {key} = {metrics[key]}""" )
save_json(lowerCAmelCase__ , os.path.join(lowerCAmelCase__ , F"""{split}_results.json""" ) )
def SCREAMING_SNAKE_CASE__ ( ) -> Union[str, Any]:
lowercase__: Any = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowercase__, lowercase__, lowercase__: Tuple = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowercase__, lowercase__, lowercase__: Any = parser.parse_args_into_dataclasses()
check_output_dir(lowerCAmelCase__ )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info('''Training/evaluation parameters %s''' , lowerCAmelCase__ )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowercase__: int = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
lowercase__: Optional[int] = ('''encoder_layerdrop''', '''decoder_layerdrop''', '''dropout''', '''attention_dropout''')
for p in extra_model_params:
if getattr(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
assert hasattr(lowerCAmelCase__ , lowerCAmelCase__ ), F"""({config.__class__.__name__}) doesn\'t have a `{p}` attribute"""
setattr(lowerCAmelCase__ , lowerCAmelCase__ , getattr(lowerCAmelCase__ , lowerCAmelCase__ ) )
lowercase__: Any = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
lowercase__: List[Any] = AutoModelForSeqaSeqLM.from_pretrained(
model_args.model_name_or_path , from_tf='''.ckpt''' in model_args.model_name_or_path , config=lowerCAmelCase__ , cache_dir=model_args.cache_dir , )
# use task specific params
use_task_specific_params(lowerCAmelCase__ , data_args.task )
# set num_beams for evaluation
if data_args.eval_beams is None:
lowercase__: Union[str, Any] = model.config.num_beams
# set decoder_start_token_id for MBart
if model.config.decoder_start_token_id is None and isinstance(lowerCAmelCase__ , (MBartTokenizer, MBartTokenizerFast) ):
assert (
data_args.tgt_lang is not None and data_args.src_lang is not None
), "mBart requires --tgt_lang and --src_lang"
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
lowercase__: int = tokenizer.lang_code_to_id[data_args.tgt_lang]
else:
lowercase__: List[str] = tokenizer.convert_tokens_to_ids(data_args.tgt_lang )
if model_args.freeze_embeds:
freeze_embeds(lowerCAmelCase__ )
if model_args.freeze_encoder:
freeze_params(model.get_encoder() )
assert_all_frozen(model.get_encoder() )
lowercase__: List[Any] = SeqaSeqDataset
# Get datasets
lowercase__: Optional[int] = (
dataset_class(
lowerCAmelCase__ , type_path='''train''' , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '''''' , )
if training_args.do_train
else None
)
lowercase__: Tuple = (
dataset_class(
lowerCAmelCase__ , type_path='''val''' , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '''''' , )
if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO
else None
)
lowercase__: List[Any] = (
dataset_class(
lowerCAmelCase__ , type_path='''test''' , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '''''' , )
if training_args.do_predict
else None
)
# Initialize our Trainer
lowercase__: str = (
build_compute_metrics_fn(data_args.task , lowerCAmelCase__ ) if training_args.predict_with_generate else None
)
lowercase__: List[str] = SeqaSeqTrainer(
model=lowerCAmelCase__ , args=lowerCAmelCase__ , data_args=lowerCAmelCase__ , train_dataset=lowerCAmelCase__ , eval_dataset=lowerCAmelCase__ , data_collator=SeqaSeqDataCollator(
lowerCAmelCase__ , lowerCAmelCase__ , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ , )
lowercase__: Any = {}
# Training
if training_args.do_train:
logger.info('''*** Train ***''' )
lowercase__: Optional[int] = trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
lowercase__: Any = train_result.metrics
lowercase__: Any = data_args.n_train
trainer.save_model() # this also saves the tokenizer
if trainer.is_world_process_zero():
handle_metrics('''train''' , lowerCAmelCase__ , training_args.output_dir )
all_metrics.update(lowerCAmelCase__ )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , '''trainer_state.json''' ) )
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
lowercase__: List[Any] = trainer.evaluate(metric_key_prefix='''val''' )
lowercase__: str = data_args.n_val
lowercase__: Union[str, Any] = round(metrics['''val_loss'''] , 4 )
if trainer.is_world_process_zero():
handle_metrics('''val''' , lowerCAmelCase__ , training_args.output_dir )
all_metrics.update(lowerCAmelCase__ )
if training_args.do_predict:
logger.info('''*** Predict ***''' )
lowercase__: Tuple = trainer.predict(test_dataset=lowerCAmelCase__ , metric_key_prefix='''test''' )
lowercase__: Union[str, Any] = test_output.metrics
lowercase__: int = data_args.n_test
if trainer.is_world_process_zero():
lowercase__: List[str] = round(metrics['''test_loss'''] , 4 )
handle_metrics('''test''' , lowerCAmelCase__ , training_args.output_dir )
all_metrics.update(lowerCAmelCase__ )
if training_args.predict_with_generate:
lowercase__: Optional[int] = tokenizer.batch_decode(
test_output.predictions , skip_special_tokens=lowerCAmelCase__ , clean_up_tokenization_spaces=lowerCAmelCase__ )
lowercase__: Tuple = lmap(str.strip , lowerCAmelCase__ )
write_txt_file(lowerCAmelCase__ , os.path.join(training_args.output_dir , '''test_generations.txt''' ) )
if trainer.is_world_process_zero():
save_json(lowerCAmelCase__ , os.path.join(training_args.output_dir , '''all_results.json''' ) )
return all_metrics
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> Any:
main()
if __name__ == "__main__":
main()
| 358 | """simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
"microsoft/cvt-13": "https://huggingface.co/microsoft/cvt-13/resolve/main/config.json",
# See all Cvt models at https://huggingface.co/models?filter=cvt
}
class UpperCAmelCase (_UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase :Tuple = "cvt"
def __init__( self , _UpperCAmelCase=3 , _UpperCAmelCase=[7, 3, 3] , _UpperCAmelCase=[4, 2, 2] , _UpperCAmelCase=[2, 1, 1] , _UpperCAmelCase=[64, 192, 384] , _UpperCAmelCase=[1, 3, 6] , _UpperCAmelCase=[1, 2, 10] , _UpperCAmelCase=[4.0, 4.0, 4.0] , _UpperCAmelCase=[0.0, 0.0, 0.0] , _UpperCAmelCase=[0.0, 0.0, 0.0] , _UpperCAmelCase=[0.0, 0.0, 0.1] , _UpperCAmelCase=[True, True, True] , _UpperCAmelCase=[False, False, True] , _UpperCAmelCase=["dw_bn", "dw_bn", "dw_bn"] , _UpperCAmelCase=[3, 3, 3] , _UpperCAmelCase=[1, 1, 1] , _UpperCAmelCase=[2, 2, 2] , _UpperCAmelCase=[1, 1, 1] , _UpperCAmelCase=[1, 1, 1] , _UpperCAmelCase=0.02 , _UpperCAmelCase=1e-1_2 , **_UpperCAmelCase , ):
super().__init__(**_UpperCAmelCase )
lowercase__: Dict = num_channels
lowercase__: str = patch_sizes
lowercase__: Optional[Any] = patch_stride
lowercase__: List[str] = patch_padding
lowercase__: Optional[Any] = embed_dim
lowercase__: Optional[int] = num_heads
lowercase__: Any = depth
lowercase__: str = mlp_ratio
lowercase__: Any = attention_drop_rate
lowercase__: Any = drop_rate
lowercase__: Optional[Any] = drop_path_rate
lowercase__: Dict = qkv_bias
lowercase__: Dict = cls_token
lowercase__: Any = qkv_projection_method
lowercase__: List[str] = kernel_qkv
lowercase__: Union[str, Any] = padding_kv
lowercase__: Optional[int] = stride_kv
lowercase__: int = padding_q
lowercase__: Dict = stride_q
lowercase__: Any = initializer_range
lowercase__: Union[str, Any] = layer_norm_eps
| 2 | 0 |
"""simple docstring"""
from statistics import mean
import numpy as np
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> list:
lowercase__: Optional[Any] = 0
# Number of processes finished
lowercase__: Dict = 0
# Displays the finished process.
# If it is 0, the performance is completed if it is 1, before the performance.
lowercase__: Tuple = [0] * no_of_process
# List to include calculation results
lowercase__: int = [0] * no_of_process
# Sort by arrival time.
lowercase__: Any = [burst_time[i] for i in np.argsort(__UpperCAmelCase )]
lowercase__: str = [process_name[i] for i in np.argsort(__UpperCAmelCase )]
arrival_time.sort()
while no_of_process > finished_process_count:
lowercase__: List[Any] = 0
while finished_process[i] == 1:
i += 1
if current_time < arrival_time[i]:
lowercase__: List[Any] = arrival_time[i]
lowercase__: Optional[int] = 0
# Index showing the location of the process being performed
lowercase__: Union[str, Any] = 0
# Saves the current response ratio.
lowercase__: int = 0
for i in range(0 , __UpperCAmelCase ):
if finished_process[i] == 0 and arrival_time[i] <= current_time:
lowercase__: str = (burst_time[i] + (current_time - arrival_time[i])) / burst_time[
i
]
if response_ratio < temp:
lowercase__: Dict = temp
lowercase__: str = i
# Calculate the turn around time
lowercase__: Any = current_time + burst_time[loc] - arrival_time[loc]
current_time += burst_time[loc]
# Indicates that the process has been performed.
lowercase__: Optional[int] = 1
# Increase finished_process_count by 1
finished_process_count += 1
return turn_around_time
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> list:
lowercase__: List[Any] = [0] * no_of_process
for i in range(0 , __UpperCAmelCase ):
lowercase__: Dict = turn_around_time[i] - burst_time[i]
return waiting_time
if __name__ == "__main__":
__A = 5
__A = ["A", "B", "C", "D", "E"]
__A = [1, 2, 3, 4, 5]
__A = [1, 2, 3, 4, 5]
__A = calculate_turn_around_time(
process_name, arrival_time, burst_time, no_of_process
)
__A = calculate_waiting_time(
process_name, turn_around_time, burst_time, no_of_process
)
print("Process name \tArrival time \tBurst time \tTurn around time \tWaiting time")
for i in range(0, no_of_process):
print(
f'''{process_name[i]}\t\t{arrival_time[i]}\t\t{burst_time[i]}\t\t'''
f'''{turn_around_time[i]}\t\t\t{waiting_time[i]}'''
)
print(f'''average waiting time : {mean(waiting_time):.5f}''')
print(f'''average turn around time : {mean(turn_around_time):.5f}''')
| 359 | """simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import add_start_docstrings
__A = R"\n [`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and\n can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.\n\n Args:\n title_sep (`str`, *optional*, defaults to `\" / \"`):\n Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].\n doc_sep (`str`, *optional*, defaults to `\" // \"`):\n Separator inserted between the text of the retrieved document and the original input when calling\n [`RagRetriever`].\n n_docs (`int`, *optional*, defaults to 5):\n Number of documents to retrieve.\n max_combined_length (`int`, *optional*, defaults to 300):\n Max length of contextualized input returned by [`~RagRetriever.__call__`].\n retrieval_vector_size (`int`, *optional*, defaults to 768):\n Dimensionality of the document embeddings indexed by [`RagRetriever`].\n retrieval_batch_size (`int`, *optional*, defaults to 8):\n Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated\n [`RagRetriever`].\n dataset (`str`, *optional*, defaults to `\"wiki_dpr\"`):\n A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids\n using `datasets.list_datasets()`).\n dataset_split (`str`, *optional*, defaults to `\"train\"`)\n Which split of the `dataset` to load.\n index_name (`str`, *optional*, defaults to `\"compressed\"`)\n The index name of the index associated with the `dataset`. One can choose between `\"legacy\"`, `\"exact\"` and\n `\"compressed\"`.\n index_path (`str`, *optional*)\n The path to the serialized faiss index on disk.\n passages_path (`str`, *optional*):\n A path to text passages compatible with the faiss index. Required if using\n [`~models.rag.retrieval_rag.LegacyIndex`]\n use_dummy_dataset (`bool`, *optional*, defaults to `False`)\n Whether to load a \"dummy\" variant of the dataset specified by `dataset`.\n label_smoothing (`float`, *optional*, defaults to 0.0):\n Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing\n in the loss calculation. If set to 0, no label smoothing is performed.\n do_marginalize (`bool`, *optional*, defaults to `False`):\n If `True`, the logits are marginalized over all documents by making use of\n `torch.nn.functional.log_softmax`.\n reduce_loss (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.\n do_deduplication (`bool`, *optional*, defaults to `True`):\n Whether or not to deduplicate the generations from different context documents for a given input. Has to be\n set to `False` if used while training with distributed backend.\n exclude_bos_score (`bool`, *optional*, defaults to `False`):\n Whether or not to disregard the BOS token when computing the loss.\n output_retrieved(`bool`, *optional*, defaults to `False`):\n If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and\n `context_attention_mask` are returned. See returned tensors for more detail.\n use_cache (`bool`, *optional*, defaults to `True`):\n Whether or not the model should return the last key/values attentions (not used by all models).\n forced_eos_token_id (`int`, *optional*):\n The id of the token to force as the last generated token when `max_length` is reached. Usually set to\n `eos_token_id`.\n"
@add_start_docstrings(_UpperCAmelCase )
class UpperCAmelCase (_UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase :Optional[int] = "rag"
_UpperCAmelCase :List[Any] = True
def __init__( self , _UpperCAmelCase=None , _UpperCAmelCase=True , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=" / " , _UpperCAmelCase=" // " , _UpperCAmelCase=5 , _UpperCAmelCase=300 , _UpperCAmelCase=768 , _UpperCAmelCase=8 , _UpperCAmelCase="wiki_dpr" , _UpperCAmelCase="train" , _UpperCAmelCase="compressed" , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=0.0 , _UpperCAmelCase=True , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=True , _UpperCAmelCase=None , **_UpperCAmelCase , ):
super().__init__(
bos_token_id=_UpperCAmelCase , pad_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , decoder_start_token_id=_UpperCAmelCase , forced_eos_token_id=_UpperCAmelCase , is_encoder_decoder=_UpperCAmelCase , prefix=_UpperCAmelCase , vocab_size=_UpperCAmelCase , **_UpperCAmelCase , )
assert (
"question_encoder" in kwargs and "generator" in kwargs
), "Config has to be initialized with question_encoder and generator config"
lowercase__: Optional[Any] = kwargs.pop('''question_encoder''' )
lowercase__: Any = question_encoder_config.pop('''model_type''' )
lowercase__: Tuple = kwargs.pop('''generator''' )
lowercase__: Union[str, Any] = decoder_config.pop('''model_type''' )
from ..auto.configuration_auto import AutoConfig
lowercase__: Optional[int] = AutoConfig.for_model(_UpperCAmelCase , **_UpperCAmelCase )
lowercase__: Any = AutoConfig.for_model(_UpperCAmelCase , **_UpperCAmelCase )
lowercase__: str = reduce_loss
lowercase__: str = label_smoothing
lowercase__: Dict = exclude_bos_score
lowercase__: Any = do_marginalize
lowercase__: Optional[int] = title_sep
lowercase__: Any = doc_sep
lowercase__: Any = n_docs
lowercase__: List[Any] = max_combined_length
lowercase__: int = dataset
lowercase__: int = dataset_split
lowercase__: str = index_name
lowercase__: Dict = retrieval_vector_size
lowercase__: Dict = retrieval_batch_size
lowercase__: List[str] = passages_path
lowercase__: str = index_path
lowercase__: Optional[Any] = use_dummy_dataset
lowercase__: str = output_retrieved
lowercase__: List[str] = do_deduplication
lowercase__: List[Any] = use_cache
if self.forced_eos_token_id is None:
lowercase__: int = getattr(self.generator , '''forced_eos_token_id''' , _UpperCAmelCase )
@classmethod
def _snake_case ( cls , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ):
return cls(question_encoder=question_encoder_config.to_dict() , generator=generator_config.to_dict() , **_UpperCAmelCase )
def _snake_case ( self ):
lowercase__: List[str] = copy.deepcopy(self.__dict__ )
lowercase__: str = self.question_encoder.to_dict()
lowercase__: str = self.generator.to_dict()
lowercase__: str = self.__class__.model_type
return output
| 2 | 0 |
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase ) -> str:
return "\n".join(
F"""{number} * {i} = {number * i}""" for i in range(1 , number_of_terms + 1 ) )
if __name__ == "__main__":
print(multiplication_table(number=5, number_of_terms=1_0))
| 360 | """simple docstring"""
import json
import os
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from requests.exceptions import HTTPError
from transformers.utils import (
CONFIG_NAME,
FLAX_WEIGHTS_NAME,
TF2_WEIGHTS_NAME,
TRANSFORMERS_CACHE,
WEIGHTS_NAME,
cached_file,
get_file_from_repo,
has_file,
)
__A = "hf-internal-testing/tiny-random-bert"
__A = os.path.join(TRANSFORMERS_CACHE, "models--hf-internal-testing--tiny-random-bert")
__A = "9b8c223d42b2188cb49d29af482996f9d0f3e5a6"
class UpperCAmelCase (unittest.TestCase ):
"""simple docstring"""
def _snake_case ( self ):
lowercase__: Union[str, Any] = cached_file(_UpperCAmelCase , _UpperCAmelCase )
# Should have downloaded the file in here
self.assertTrue(os.path.isdir(_UpperCAmelCase ) )
# Cache should contain at least those three subfolders:
for subfolder in ["blobs", "refs", "snapshots"]:
self.assertTrue(os.path.isdir(os.path.join(_UpperCAmelCase , _UpperCAmelCase ) ) )
with open(os.path.join(_UpperCAmelCase , '''refs''' , '''main''' ) ) as f:
lowercase__: Dict = f.read()
self.assertEqual(_UpperCAmelCase , os.path.join(_UpperCAmelCase , '''snapshots''' , _UpperCAmelCase , _UpperCAmelCase ) )
self.assertTrue(os.path.isfile(_UpperCAmelCase ) )
# File is cached at the same place the second time.
lowercase__: Any = cached_file(_UpperCAmelCase , _UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
# Using a specific revision to test the full commit hash.
lowercase__: Dict = cached_file(_UpperCAmelCase , _UpperCAmelCase , revision='''9b8c223''' )
self.assertEqual(_UpperCAmelCase , os.path.join(_UpperCAmelCase , '''snapshots''' , _UpperCAmelCase , _UpperCAmelCase ) )
def _snake_case ( self ):
with self.assertRaisesRegex(_UpperCAmelCase , '''is not a valid model identifier''' ):
lowercase__: int = cached_file('''tiny-random-bert''' , _UpperCAmelCase )
with self.assertRaisesRegex(_UpperCAmelCase , '''is not a valid git identifier''' ):
lowercase__: List[Any] = cached_file(_UpperCAmelCase , _UpperCAmelCase , revision='''aaaa''' )
with self.assertRaisesRegex(_UpperCAmelCase , '''does not appear to have a file named''' ):
lowercase__: Dict = cached_file(_UpperCAmelCase , '''conf''' )
def _snake_case ( self ):
with self.assertRaisesRegex(_UpperCAmelCase , '''does not appear to have a file named''' ):
lowercase__: Optional[Any] = cached_file(_UpperCAmelCase , '''conf''' )
with open(os.path.join(_UpperCAmelCase , '''refs''' , '''main''' ) ) as f:
lowercase__: int = f.read()
self.assertTrue(os.path.isfile(os.path.join(_UpperCAmelCase , '''.no_exist''' , _UpperCAmelCase , '''conf''' ) ) )
lowercase__: Dict = cached_file(_UpperCAmelCase , '''conf''' , _raise_exceptions_for_missing_entries=_UpperCAmelCase )
self.assertIsNone(_UpperCAmelCase )
lowercase__: List[str] = cached_file(_UpperCAmelCase , '''conf''' , local_files_only=_UpperCAmelCase , _raise_exceptions_for_missing_entries=_UpperCAmelCase )
self.assertIsNone(_UpperCAmelCase )
lowercase__: Union[str, Any] = mock.Mock()
lowercase__: str = 500
lowercase__: Union[str, Any] = {}
lowercase__: List[str] = HTTPError
lowercase__: int = {}
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('''requests.Session.request''' , return_value=_UpperCAmelCase ) as mock_head:
lowercase__: Any = cached_file(_UpperCAmelCase , '''conf''' , _raise_exceptions_for_connection_errors=_UpperCAmelCase )
self.assertIsNone(_UpperCAmelCase )
# This check we did call the fake head request
mock_head.assert_called()
def _snake_case ( self ):
self.assertTrue(has_file('''hf-internal-testing/tiny-bert-pt-only''' , _UpperCAmelCase ) )
self.assertFalse(has_file('''hf-internal-testing/tiny-bert-pt-only''' , _UpperCAmelCase ) )
self.assertFalse(has_file('''hf-internal-testing/tiny-bert-pt-only''' , _UpperCAmelCase ) )
def _snake_case ( self ):
# `get_file_from_repo` returns None if the file does not exist
self.assertIsNone(get_file_from_repo('''bert-base-cased''' , '''ahah.txt''' ) )
# The function raises if the repository does not exist.
with self.assertRaisesRegex(_UpperCAmelCase , '''is not a valid model identifier''' ):
get_file_from_repo('''bert-base-case''' , _UpperCAmelCase )
# The function raises if the revision does not exist.
with self.assertRaisesRegex(_UpperCAmelCase , '''is not a valid git identifier''' ):
get_file_from_repo('''bert-base-cased''' , _UpperCAmelCase , revision='''ahaha''' )
lowercase__: Optional[Any] = get_file_from_repo('''bert-base-cased''' , _UpperCAmelCase )
# The name is the cached name which is not very easy to test, so instead we load the content.
lowercase__: Optional[Any] = json.loads(open(_UpperCAmelCase , '''r''' ).read() )
self.assertEqual(config['''hidden_size'''] , 768 )
def _snake_case ( self ):
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase__: Any = Path(_UpperCAmelCase ) / '''a.txt'''
filename.touch()
self.assertEqual(get_file_from_repo(_UpperCAmelCase , '''a.txt''' ) , str(_UpperCAmelCase ) )
self.assertIsNone(get_file_from_repo(_UpperCAmelCase , '''b.txt''' ) )
| 2 | 0 |
"""simple docstring"""
from __future__ import annotations
from fractions import Fraction
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase ) -> bool:
return (
num != den and num % 1_0 == den // 1_0 and (num // 1_0) / (den % 1_0) == num / den
)
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> list[str]:
lowercase__: Union[str, Any] = []
lowercase__: Optional[Any] = 1_1
lowercase__: int = int('''1''' + '''0''' * digit_len )
for num in range(__snake_case , __snake_case ):
while den <= 9_9:
if (num != den) and (num % 1_0 == den // 1_0) and (den % 1_0 != 0):
if is_digit_cancelling(__snake_case , __snake_case ):
solutions.append(F"""{num}/{den}""" )
den += 1
num += 1
lowercase__: List[str] = 1_0
return solutions
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase = 2 ) -> int:
lowercase__: Any = 1.0
for fraction in fraction_list(__snake_case ):
lowercase__: Union[str, Any] = Fraction(__snake_case )
result *= frac.denominator / frac.numerator
return int(__snake_case )
if __name__ == "__main__":
print(solution())
| 361 | """simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
"microsoft/beit-base-patch16-224-pt22k": (
"https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json"
),
# See all BEiT models at https://huggingface.co/models?filter=beit
}
class UpperCAmelCase (_UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase :Optional[Any] = "beit"
def __init__( self , _UpperCAmelCase=8192 , _UpperCAmelCase=768 , _UpperCAmelCase=12 , _UpperCAmelCase=12 , _UpperCAmelCase=3072 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.02 , _UpperCAmelCase=1e-1_2 , _UpperCAmelCase=224 , _UpperCAmelCase=16 , _UpperCAmelCase=3 , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=True , _UpperCAmelCase=[3, 5, 7, 11] , _UpperCAmelCase=[1, 2, 3, 6] , _UpperCAmelCase=True , _UpperCAmelCase=0.4 , _UpperCAmelCase=256 , _UpperCAmelCase=1 , _UpperCAmelCase=False , _UpperCAmelCase=255 , **_UpperCAmelCase , ):
super().__init__(**_UpperCAmelCase )
lowercase__: Union[str, Any] = vocab_size
lowercase__: List[Any] = hidden_size
lowercase__: Optional[int] = num_hidden_layers
lowercase__: Optional[int] = num_attention_heads
lowercase__: int = intermediate_size
lowercase__: List[str] = hidden_act
lowercase__: List[Any] = hidden_dropout_prob
lowercase__: Dict = attention_probs_dropout_prob
lowercase__: List[str] = initializer_range
lowercase__: Optional[int] = layer_norm_eps
lowercase__: int = image_size
lowercase__: Tuple = patch_size
lowercase__: int = num_channels
lowercase__: Optional[Any] = use_mask_token
lowercase__: List[Any] = use_absolute_position_embeddings
lowercase__: Optional[int] = use_relative_position_bias
lowercase__: Optional[int] = use_shared_relative_position_bias
lowercase__: Optional[Any] = layer_scale_init_value
lowercase__: Union[str, Any] = drop_path_rate
lowercase__: Tuple = use_mean_pooling
# decode head attributes (semantic segmentation)
lowercase__: Tuple = out_indices
lowercase__: Optional[int] = pool_scales
# auxiliary head attributes (semantic segmentation)
lowercase__: List[str] = use_auxiliary_head
lowercase__: Optional[Any] = auxiliary_loss_weight
lowercase__: str = auxiliary_channels
lowercase__: List[str] = auxiliary_num_convs
lowercase__: Tuple = auxiliary_concat_input
lowercase__: Dict = semantic_loss_ignore_index
class UpperCAmelCase (_UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase :Dict = version.parse("1.11" )
@property
def _snake_case ( self ):
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def _snake_case ( self ):
return 1e-4
| 2 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.