code
stringlengths 87
55.2k
| code_codestyle
int64 0
349
| style_context
stringlengths 135
49.1k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
'''simple docstring'''
import argparse
import fairseq
import torch
from torch import nn
from transformers import (
MBartaaTokenizer,
MBartConfig,
MBartForCausalLM,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
_A : str =logging.get_logger(__name__)
_A : str ={
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
}
_A : Dict =[
'''lm_head''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
]
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> List[Any]:
for attribute in key.split(""".""" ):
lowerCamelCase__ : Union[str, Any] = getattr(UpperCamelCase , UpperCamelCase )
if weight_type is not None:
lowerCamelCase__ : Optional[int] = getattr(UpperCamelCase , UpperCamelCase ).shape
else:
lowerCamelCase__ : Optional[int] = hf_pointer.shape
assert hf_shape == value.shape, (
f'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
lowerCamelCase__ : Optional[Any] = value
elif weight_type == "weight_g":
lowerCamelCase__ : str = value
elif weight_type == "weight_v":
lowerCamelCase__ : Any = value
elif weight_type == "bias":
lowerCamelCase__ : str = value
else:
lowerCamelCase__ : Any = value
logger.info(f'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> int:
lowerCamelCase__ : int = []
lowerCamelCase__ : Optional[Any] = fairseq_model.state_dict()
lowerCamelCase__ : Dict = hf_model.feature_extractor
lowerCamelCase__ : List[str] = hf_model.adapter
for name, value in fairseq_dict.items():
lowerCamelCase__ : Optional[int] = False
if "conv_layers" in name:
load_conv_layer(
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , hf_model.config.feat_extract_norm == """group""" , )
lowerCamelCase__ : Tuple = True
elif any(x in name for x in ["""adaptor""", """w2v_encoder.proj.""", """w2v_proj_ln."""] ):
load_adapter(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
lowerCamelCase__ : List[Any] = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
lowerCamelCase__ : Dict = True
if "*" in mapped_key:
lowerCamelCase__ : Dict = name.split(UpperCamelCase )[0].split(""".""" )[-2]
lowerCamelCase__ : Any = mapped_key.replace("""*""" , UpperCamelCase )
if "weight_g" in name:
lowerCamelCase__ : Optional[int] = """weight_g"""
elif "weight_v" in name:
lowerCamelCase__ : Any = """weight_v"""
elif "bias" in name:
lowerCamelCase__ : List[str] = """bias"""
elif "weight" in name:
lowerCamelCase__ : int = """weight"""
else:
lowerCamelCase__ : Optional[Any] = None
set_recursively(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
continue
if not is_used:
unused_weights.append(UpperCamelCase )
logger.warning(f'''Unused weights: {unused_weights}''' )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> List[str]:
lowerCamelCase__ : Any = full_name.split("""conv_layers.""" )[-1]
lowerCamelCase__ : int = name.split(""".""" )
lowerCamelCase__ : List[Any] = int(items[0] )
lowerCamelCase__ : List[str] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
lowerCamelCase__ : Union[str, Any] = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
lowerCamelCase__ : List[Any] = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
lowerCamelCase__ : List[Any] = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
lowerCamelCase__ : Dict = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(UpperCamelCase )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> List[str]:
lowerCamelCase__ : Union[str, Any] = full_name.split("""adaptor.""" )[-1]
lowerCamelCase__ : Optional[int] = name.split(""".""" )
if items[1].isdigit():
lowerCamelCase__ : Dict = int(items[1] )
else:
lowerCamelCase__ : str = None
if "adaptor" not in full_name:
if "proj_ln" in full_name:
# has to be layer norm
if "bias" in name:
assert (
value.shape == adapter.proj_layer_norm.bias.data.shape
), f'''{full_name} has size {value.shape}, but {adapter.proj_layer_norm.bias.data.shape} was found.'''
lowerCamelCase__ : List[Any] = value
logger.info(f'''Adapter proj layer norm bias was initialized from {full_name}.''' )
if "weight" in name:
assert (
value.shape == adapter.proj_layer_norm.weight.data.shape
), f'''{full_name} has size {value.shape}, but {adapter.proj_layer_norm.weight.data.shape} was found.'''
lowerCamelCase__ : Dict = value
else:
# has to be projection layer
if "bias" in name:
assert (
value.shape == adapter.proj.bias.data.shape
), f'''{full_name} has size {value.shape}, but {adapter.proj.bias.data.shape} was found.'''
lowerCamelCase__ : Tuple = value
logger.info(f'''Adapter proj layer bias was initialized from {full_name}.''' )
if "weight" in name:
assert (
value.shape == adapter.proj.weight.data.shape
), f'''{full_name} has size {value.shape}, but {adapter.proj.weight.data.shape} was found.'''
lowerCamelCase__ : Union[str, Any] = value
logger.info(f'''Adapter proj layer weight was initialized from {full_name}.''' )
elif isinstance(UpperCamelCase , UpperCamelCase ):
if "bias" in name:
assert (
value.shape == adapter.layers[layer_id].conv.bias.data.shape
), f'''{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.bias.data.shape} was found.'''
lowerCamelCase__ : Any = value
logger.info(f'''Adapter layer {layer_id} bias was initialized from {full_name}.''' )
elif "weight" in name:
assert (
value.shape == adapter.layers[layer_id].conv.weight.data.shape
), f'''{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.weight.data.shape} was found.'''
lowerCamelCase__ : str = value
logger.info(f'''Adapter layer {layer_id} bias was initialized from {full_name}.''' )
else:
unused_weights.append(UpperCamelCase )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> int:
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = emb.weight.shape
lowerCamelCase__ : int = nn.Linear(UpperCamelCase , UpperCamelCase , bias=UpperCamelCase )
lowerCamelCase__ : str = emb.weight.data
return lin_layer
@torch.no_grad()
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , ) -> Union[str, Any]:
lowerCamelCase__ : Optional[Any] = WavaVecaConfig.from_pretrained(
UpperCamelCase , add_adapter=UpperCamelCase , adapter_stride=UpperCamelCase , adapter_kernel_size=UpperCamelCase , use_auth_token=UpperCamelCase , output_hidden_size=UpperCamelCase , )
lowerCamelCase__ : Dict = MBartConfig.from_pretrained(UpperCamelCase )
# load model
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : int = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={
"""config_yaml""": config_yaml_path,
"""data""": """/""".join(dict_path.split("""/""" )[:-1] ),
"""w2v_path""": checkpoint_path,
"""load_pretrained_decoder_from""": None,
} , )
lowerCamelCase__ : Union[str, Any] = model[0].eval()
# load feature extractor
lowerCamelCase__ : Any = WavaVecaFeatureExtractor.from_pretrained(UpperCamelCase , use_auth_token=UpperCamelCase )
# set weights for wav2vec2 encoder
lowerCamelCase__ : int = WavaVecaModel(UpperCamelCase )
recursively_load_weights_wavaveca(model.encoder , UpperCamelCase )
# load decoder weights
lowerCamelCase__ : int = MBartForCausalLM(UpperCamelCase )
lowerCamelCase__ , lowerCamelCase__ : Dict = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=UpperCamelCase )
logger.warning(f'''The following keys are missing when loading the decoder weights: {missing_keys}''' )
logger.warning(f'''The following keys are unexpected when loading the decoder weights: {unexpected_keys}''' )
lowerCamelCase__ : List[str] = SpeechEncoderDecoderModel(encoder=UpperCamelCase , decoder=UpperCamelCase )
lowerCamelCase__ : Any = False
lowerCamelCase__ : Any = MBartaaTokenizer(UpperCamelCase )
tokenizer.save_pretrained(UpperCamelCase )
lowerCamelCase__ : List[str] = hf_wavavec.config.to_dict()
lowerCamelCase__ : Union[str, Any] = tokenizer.pad_token_id
lowerCamelCase__ : Dict = tokenizer.bos_token_id
lowerCamelCase__ : List[str] = tokenizer.eos_token_id
lowerCamelCase__ : Tuple = """mbart50"""
lowerCamelCase__ : int = """wav2vec2"""
lowerCamelCase__ : Any = tokenizer.eos_token_id
lowerCamelCase__ : List[Any] = 250004
lowerCamelCase__ : Dict = tokenizer.eos_token_id
lowerCamelCase__ : Tuple = SpeechEncoderDecoderConfig.from_dict(UpperCamelCase )
hf_wavavec.save_pretrained(UpperCamelCase )
feature_extractor.save_pretrained(UpperCamelCase )
if __name__ == "__main__":
_A : Optional[Any] =argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_yaml_path''', default=None, type=str, help='''Path to yaml file of fine-tuned model''')
parser.add_argument(
'''--encoder_config_path''',
default='''facebook/wav2vec2-xls-r-1b''',
type=str,
help='''Path to hf encoder wav2vec2 checkpoint config''',
)
parser.add_argument(
'''--decoder_config_path''',
default='''facebook/mbart-large-50-one-to-many-mmt''',
type=str,
help='''Path to hf decoder checkpoint config''',
)
parser.add_argument('''--add_adapter''', default=True, type=bool, help='''whethere to add model adapter layers''')
parser.add_argument('''--adapter_stride''', default=2, type=int, help='''stride of adapter layers''')
parser.add_argument('''--adapter_kernel_size''', default=3, type=int, help='''kernel size of adapter layers''')
parser.add_argument('''--encoder_output_dim''', default=1_024, type=int, help='''encoder output dim''')
parser.add_argument('''--start_token_id''', default=250_004, type=int, help='''`decoder_start_token_id` of model config''')
_A : Optional[Any] =parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
args.config_yaml_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
add_adapter=args.add_adapter,
adapter_kernel_size=args.adapter_kernel_size,
adapter_stride=args.adapter_stride,
decoder_start_token_id=args.start_token_id,
encoder_output_dim=args.encoder_output_dim,
)
| 41 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_mbart import MBartTokenizer
else:
lowerCAmelCase__ = None
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'}
lowerCAmelCase__ = {
'vocab_file': {
'facebook/mbart-large-en-ro': (
'https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model'
),
'facebook/mbart-large-cc25': (
'https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model'
),
},
'tokenizer_file': {
'facebook/mbart-large-en-ro': 'https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json',
'facebook/mbart-large-cc25': 'https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json',
},
}
lowerCAmelCase__ = {
'facebook/mbart-large-en-ro': 10_24,
'facebook/mbart-large-cc25': 10_24,
}
# fmt: off
lowerCAmelCase__ = ['ar_AR', 'cs_CZ', 'de_DE', 'en_XX', 'es_XX', 'et_EE', 'fi_FI', 'fr_XX', 'gu_IN', 'hi_IN', 'it_IT', 'ja_XX', 'kk_KZ', 'ko_KR', 'lt_LT', 'lv_LV', 'my_MM', 'ne_NP', 'nl_XX', 'ro_RO', 'ru_RU', 'si_LK', 'tr_TR', 'vi_VN', 'zh_CN']
class lowerCAmelCase__ ( a):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE = ["input_ids", "attention_mask"]
__SCREAMING_SNAKE_CASE = MBartTokenizer
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = []
def __init__( self , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase="<s>" , __lowerCamelCase="</s>" , __lowerCamelCase="</s>" , __lowerCamelCase="<s>" , __lowerCamelCase="<unk>" , __lowerCamelCase="<pad>" , __lowerCamelCase="<mask>" , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=None , **__lowerCamelCase , ) -> Optional[int]:
# Mask token behave like a normal word, i.e. include the space before it
_A : List[str] = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase) if isinstance(__lowerCamelCase , __lowerCamelCase) else mask_token
super().__init__(
vocab_file=__lowerCamelCase , tokenizer_file=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , sep_token=__lowerCamelCase , cls_token=__lowerCamelCase , unk_token=__lowerCamelCase , pad_token=__lowerCamelCase , mask_token=__lowerCamelCase , src_lang=__lowerCamelCase , tgt_lang=__lowerCamelCase , additional_special_tokens=__lowerCamelCase , **__lowerCamelCase , )
_A : Union[str, Any] = vocab_file
_A : int = False if not self.vocab_file else True
_A : Optional[int] = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens])
self.add_special_tokens({"additional_special_tokens": _additional_special_tokens})
_A : Union[str, Any] = {
lang_code: self.convert_tokens_to_ids(__lowerCamelCase) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
_A : Optional[int] = src_lang if src_lang is not None else "en_XX"
_A : Union[str, Any] = self.convert_tokens_to_ids(self._src_lang)
_A : int = tgt_lang
self.set_src_lang_special_tokens(self._src_lang)
@property
def _lowerCamelCase ( self) -> str:
return self._src_lang
@src_lang.setter
def _lowerCamelCase ( self , __lowerCamelCase) -> None:
_A : Dict = new_src_lang
self.set_src_lang_special_tokens(self._src_lang)
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = None) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = None) -> List[int]:
_A : List[str] = [self.sep_token_id]
_A : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase) -> Dict:
if src_lang is None or tgt_lang is None:
raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model")
_A : str = src_lang
_A : Any = self(__lowerCamelCase , add_special_tokens=__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase)
_A : Tuple = self.convert_tokens_to_ids(__lowerCamelCase)
_A : Dict = tgt_lang_id
return inputs
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = "en_XX" , __lowerCamelCase = None , __lowerCamelCase = "ro_RO" , **__lowerCamelCase , ) -> BatchEncoding:
_A : Any = src_lang
_A : int = tgt_lang
return super().prepare_seqaseq_batch(__lowerCamelCase , __lowerCamelCase , **__lowerCamelCase)
def _lowerCamelCase ( self) -> List[str]:
return self.set_src_lang_special_tokens(self.src_lang)
def _lowerCamelCase ( self) -> List[Any]:
return self.set_tgt_lang_special_tokens(self.tgt_lang)
def _lowerCamelCase ( self , __lowerCamelCase) -> None:
_A : int = self.convert_tokens_to_ids(__lowerCamelCase)
_A : int = []
_A : List[str] = [self.eos_token_id, self.cur_lang_code]
_A : Union[str, Any] = self.convert_ids_to_tokens(self.prefix_tokens)
_A : str = self.convert_ids_to_tokens(self.suffix_tokens)
_A : List[Any] = processors.TemplateProcessing(
single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens)) , )
def _lowerCamelCase ( self , __lowerCamelCase) -> None:
_A : Optional[int] = self.convert_tokens_to_ids(__lowerCamelCase)
_A : List[Any] = []
_A : str = [self.eos_token_id, self.cur_lang_code]
_A : Optional[int] = self.convert_ids_to_tokens(self.prefix_tokens)
_A : int = self.convert_ids_to_tokens(self.suffix_tokens)
_A : str = processors.TemplateProcessing(
single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens)) , )
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = None) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer.")
if not os.path.isdir(__lowerCamelCase):
logger.error(F"Vocabulary path ({save_directory}) should be a directory.")
return
_A : int = os.path.join(
__lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
if os.path.abspath(self.vocab_file) != os.path.abspath(__lowerCamelCase):
copyfile(self.vocab_file , __lowerCamelCase)
return (out_vocab_file,)
| 11 | 0 |
'''simple docstring'''
from __future__ import annotations
def SCREAMING_SNAKE_CASE__ ( __A , __A , __A , __A ) -> Tuple: # noqa: E741
while r - l > 1:
_snake_case = (l + r) // 2
if v[m] >= key:
_snake_case = m
else:
_snake_case = m # noqa: E741
return r
def SCREAMING_SNAKE_CASE__ ( __A ) -> int:
if len(__A ) == 0:
return 0
_snake_case = [0] * len(__A )
_snake_case = 1
_snake_case = v[0]
for i in range(1 , len(__A ) ):
if v[i] < tail[0]:
_snake_case = v[i]
elif v[i] > tail[length - 1]:
_snake_case = v[i]
length += 1
else:
_snake_case = v[i]
return length
if __name__ == "__main__":
import doctest
doctest.testmod()
| 42 |
import json
import os
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'vocab_file': 'vocab.json',
'tokenizer_config_file': 'tokenizer_config.json',
'merges_file': 'merges.txt',
}
lowerCAmelCase__ = {
'vocab_file': {
'facebook/s2t-wav2vec2-large-en-de': (
'https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/vocab.json'
),
},
'tokenizer_config_file': {
'facebook/s2t-wav2vec2-large-en-de': (
'https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/tokenizer_config.json'
),
},
'merges_file': {
'facebook/s2t-wav2vec2-large-en-de': (
'https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/merges.txt'
),
},
}
lowerCAmelCase__ = '</w>'
lowerCAmelCase__ = '@@ '
def _UpperCAmelCase (UpperCamelCase__ : Optional[Any] ):
_A : Optional[int] = set()
_A : Optional[Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_A : List[Any] = char
return pairs
# Speech2Text2 has no max input length
lowerCAmelCase__ = {'facebook/s2t-wav2vec2-large-en-de': 10_24}
class lowerCAmelCase__ ( a):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE = ["input_ids", "attention_mask"]
def __init__( self , __lowerCamelCase , __lowerCamelCase="<s>" , __lowerCamelCase="<pad>" , __lowerCamelCase="</s>" , __lowerCamelCase="<unk>" , __lowerCamelCase=False , __lowerCamelCase=None , **__lowerCamelCase , ) -> Optional[Any]:
super().__init__(
unk_token=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , pad_token=__lowerCamelCase , do_lower_case=__lowerCamelCase , **__lowerCamelCase , )
_A : Dict = do_lower_case
with open(__lowerCamelCase , encoding="utf-8") as vocab_handle:
_A : Optional[int] = json.load(__lowerCamelCase)
_A : Optional[Any] = {v: k for k, v in self.encoder.items()}
if merges_file is None:
logger.info(F"No merges files provided. {self.__class__.__name__} can only be used for decoding.")
_A : Optional[Any] = None
_A : Tuple = None
else:
with open(__lowerCamelCase , encoding="utf-8") as merges_handle:
_A : Optional[int] = merges_handle.read().split("\n")[:-1]
_A : Union[str, Any] = [tuple(merge.split()[:2]) for merge in merges]
_A : Optional[int] = dict(zip(__lowerCamelCase , range(len(__lowerCamelCase))))
_A : List[Any] = {}
@property
def _lowerCamelCase ( self) -> int:
return len(self.decoder)
def _lowerCamelCase ( self) -> Dict:
return dict(self.encoder , **self.added_tokens_encoder)
def _lowerCamelCase ( self , __lowerCamelCase) -> Dict:
_A : Tuple = tuple(token[:-1]) + (token[-1] + BPE_TOKEN_MERGES,)
if token in self.cache:
return self.cache[token]
_A : int = get_pairs(__lowerCamelCase)
if not pairs:
return token
while True:
_A : Any = min(__lowerCamelCase , key=lambda __lowerCamelCase: self.bpe_ranks.get(__lowerCamelCase , float("inf")))
if bigram not in self.bpe_ranks:
break
_A , _A : Optional[int] = bigram
_A : int = []
_A : str = 0
while i < len(__lowerCamelCase):
try:
_A : str = word.index(__lowerCamelCase , __lowerCamelCase)
except ValueError:
new_word.extend(word[i:])
break
else:
new_word.extend(word[i:j])
_A : str = j
if word[i] == first and i < len(__lowerCamelCase) - 1 and word[i + 1] == second:
new_word.append(first + second)
i += 2
else:
new_word.append(word[i])
i += 1
_A : List[str] = tuple(__lowerCamelCase)
_A : List[str] = new_word
if len(__lowerCamelCase) == 1:
break
else:
_A : List[Any] = get_pairs(__lowerCamelCase)
_A : Tuple = " ".join(__lowerCamelCase)
if word == "\n " + BPE_TOKEN_MERGES:
_A : List[str] = "\n" + BPE_TOKEN_MERGES
if word.endswith(__lowerCamelCase):
_A : int = word.replace(__lowerCamelCase , "")
_A : int = word.replace(" " , __lowerCamelCase)
_A : Union[str, Any] = word
return word
def _lowerCamelCase ( self , __lowerCamelCase) -> Optional[Any]:
if self.bpe_ranks is None:
raise ValueError(
"This tokenizer was instantiated without a `merges.txt` file, so"
" that it can only be used for decoding, not for encoding."
"Make sure to provide `merges.txt` file at instantiation to enable "
"encoding.")
if self.do_lower_case:
_A : List[Any] = text.lower()
_A : Optional[int] = text.split()
_A : List[str] = []
for token in text:
if token:
split_tokens.extend(list(self.bpe(__lowerCamelCase).split(" ")))
return split_tokens
def _lowerCamelCase ( self , __lowerCamelCase) -> int:
return self.encoder.get(__lowerCamelCase , self.encoder.get(self.unk_token))
def _lowerCamelCase ( self , __lowerCamelCase) -> str:
_A : List[str] = self.decoder.get(__lowerCamelCase , self.unk_token)
return result
def _lowerCamelCase ( self , __lowerCamelCase) -> str:
_A : str = " ".join(__lowerCamelCase)
# make sure @@ tokens are concatenated
_A : int = "".join(string.split(__lowerCamelCase))
return string
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = None) -> Tuple[str]:
if not os.path.isdir(__lowerCamelCase):
logger.error(F"Vocabulary path ({save_directory}) should be a directory")
return
_A : Any = os.path.join(
__lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
_A : Any = os.path.join(
__lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"])
with open(__lowerCamelCase , "w" , encoding="utf-8") as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__lowerCamelCase , ensure_ascii=__lowerCamelCase) + "\n")
_A : Union[str, Any] = 0
if self.bpe_ranks is None:
return (vocab_file,)
with open(__lowerCamelCase , "w" , encoding="utf-8") as writer:
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __lowerCamelCase: kv[1]):
if index != token_index:
logger.warning(
F"Saving vocabulary to {merges_file}: BPE merge indices are not consecutive."
" Please check that the tokenizer is not corrupted!")
_A : Optional[int] = token_index
writer.write(" ".join(__lowerCamelCase) + "\n")
index += 1
return (vocab_file, merges_file)
| 11 | 0 |
from __future__ import annotations
import copy
import inspect
import json
import math
import os
import tempfile
import unittest
from importlib import import_module
import numpy as np
from transformers import ViTMAEConfig
from transformers.file_utils import cached_property, is_tf_available, is_vision_available
from transformers.testing_utils import require_tf, require_vision, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTMAEForPreTraining, TFViTMAEModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self , __lowercase , __lowercase=13 , __lowercase=30 , __lowercase=2 , __lowercase=3 , __lowercase=True , __lowercase=True , __lowercase=32 , __lowercase=2 , __lowercase=4 , __lowercase=37 , __lowercase="gelu" , __lowercase=0.1 , __lowercase=0.1 , __lowercase=10 , __lowercase=0.02 , __lowercase=3 , __lowercase=0.6 , __lowercase=None , ) -> Tuple:
__UpperCamelCase :List[str] = parent
__UpperCamelCase :List[Any] = batch_size
__UpperCamelCase :str = image_size
__UpperCamelCase :List[Any] = patch_size
__UpperCamelCase :List[str] = num_channels
__UpperCamelCase :Union[str, Any] = is_training
__UpperCamelCase :List[str] = use_labels
__UpperCamelCase :Tuple = hidden_size
__UpperCamelCase :str = num_hidden_layers
__UpperCamelCase :List[Any] = num_attention_heads
__UpperCamelCase :Optional[Any] = intermediate_size
__UpperCamelCase :List[str] = hidden_act
__UpperCamelCase :str = hidden_dropout_prob
__UpperCamelCase :List[str] = attention_probs_dropout_prob
__UpperCamelCase :Union[str, Any] = type_sequence_label_size
__UpperCamelCase :List[str] = initializer_range
__UpperCamelCase :Optional[int] = mask_ratio
__UpperCamelCase :Optional[int] = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
__UpperCamelCase :Optional[Any] = (image_size // patch_size) ** 2
__UpperCamelCase :Any = int(math.ceil((1 - mask_ratio) * (num_patches + 1)))
def UpperCamelCase__ ( self) -> Union[str, Any]:
__UpperCamelCase :Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
__UpperCamelCase :Tuple = None
if self.use_labels:
__UpperCamelCase :str = ids_tensor([self.batch_size] , self.type_sequence_label_size)
__UpperCamelCase :List[Any] = self.get_config()
return config, pixel_values, labels
def UpperCamelCase__ ( self) -> Tuple:
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , decoder_hidden_size=self.hidden_size , decoder_num_hidden_layers=self.num_hidden_layers , decoder_num_attention_heads=self.num_attention_heads , decoder_intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__lowercase , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase) -> Any:
__UpperCamelCase :Any = TFViTMAEModel(config=__lowercase)
__UpperCamelCase :Union[str, Any] = model(__lowercase , training=__lowercase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase) -> List[str]:
__UpperCamelCase :str = TFViTMAEForPreTraining(__lowercase)
__UpperCamelCase :str = model(__lowercase , training=__lowercase)
# expected sequence length = num_patches
__UpperCamelCase :List[str] = (self.image_size // self.patch_size) ** 2
__UpperCamelCase :Union[str, Any] = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels))
# test greyscale images
__UpperCamelCase :List[str] = 1
__UpperCamelCase :List[str] = TFViTMAEForPreTraining(__lowercase)
__UpperCamelCase :int = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
__UpperCamelCase :Dict = model(__lowercase , training=__lowercase)
__UpperCamelCase :List[str] = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels))
def UpperCamelCase__ ( self) -> List[Any]:
__UpperCamelCase :Optional[int] = self.prepare_config_and_inputs()
((__UpperCamelCase) , (__UpperCamelCase) , (__UpperCamelCase)) :List[str] = config_and_inputs
__UpperCamelCase :Dict = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class lowerCamelCase_ ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
a__ : str = (TFViTMAEModel, TFViTMAEForPreTraining) if is_tf_available() else ()
a__ : Dict = {"""feature-extraction""": TFViTMAEModel} if is_tf_available() else {}
a__ : Tuple = False
a__ : str = False
a__ : Optional[Any] = False
a__ : Union[str, Any] = False
def UpperCamelCase__ ( self) -> Union[str, Any]:
__UpperCamelCase :List[str] = TFViTMAEModelTester(self)
__UpperCamelCase :List[str] = ConfigTester(self , config_class=__lowercase , has_text_modality=__lowercase , hidden_size=37)
def UpperCamelCase__ ( self) -> Tuple:
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViTMAE does not use inputs_embeds''')
def UpperCamelCase__ ( self) -> str:
pass
def UpperCamelCase__ ( self) -> Any:
__UpperCamelCase , __UpperCamelCase :int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCamelCase :List[Any] = model_class(__lowercase)
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer))
__UpperCamelCase :Optional[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__lowercase , tf.keras.layers.Layer))
def UpperCamelCase__ ( self) -> Dict:
__UpperCamelCase , __UpperCamelCase :List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCamelCase :Tuple = model_class(__lowercase)
__UpperCamelCase :int = inspect.signature(model.call)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCamelCase :Optional[int] = [*signature.parameters.keys()]
__UpperCamelCase :Any = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __lowercase)
def UpperCamelCase__ ( self) -> Any:
__UpperCamelCase :List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowercase)
def UpperCamelCase__ ( self) -> List[Any]:
__UpperCamelCase :str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*__lowercase)
def UpperCamelCase__ ( self) -> Optional[Any]:
# make the mask reproducible
np.random.seed(2)
__UpperCamelCase , __UpperCamelCase :Dict = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCamelCase :Tuple = int((config.image_size // config.patch_size) ** 2)
__UpperCamelCase :Optional[int] = np.random.uniform(size=(self.model_tester.batch_size, num_patches))
for model_class in self.all_model_classes:
__UpperCamelCase :str = model_class(__lowercase)
__UpperCamelCase :Optional[int] = self._prepare_for_class(__lowercase , __lowercase)
__UpperCamelCase :Dict = model(__lowercase , noise=__lowercase)
__UpperCamelCase :int = copy.deepcopy(self._prepare_for_class(__lowercase , __lowercase))
__UpperCamelCase :Union[str, Any] = model(**__lowercase , noise=__lowercase)
__UpperCamelCase :Tuple = outputs_dict[0].numpy()
__UpperCamelCase :Union[str, Any] = outputs_keywords[0].numpy()
self.assertLess(np.sum(np.abs(output_dict - output_keywords)) , 1E-6)
def UpperCamelCase__ ( self) -> Optional[int]:
# make the mask reproducible
np.random.seed(2)
__UpperCamelCase , __UpperCamelCase :str = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCamelCase :int = int((config.image_size // config.patch_size) ** 2)
__UpperCamelCase :str = np.random.uniform(size=(self.model_tester.batch_size, num_patches))
def prepare_numpy_arrays(__lowercase):
__UpperCamelCase :Optional[int] = {}
for k, v in inputs_dict.items():
if tf.is_tensor(__lowercase):
__UpperCamelCase :Optional[Any] = v.numpy()
else:
__UpperCamelCase :Optional[int] = np.array(__lowercase)
return inputs_np_dict
for model_class in self.all_model_classes:
__UpperCamelCase :int = model_class(__lowercase)
__UpperCamelCase :Tuple = self._prepare_for_class(__lowercase , __lowercase)
__UpperCamelCase :Any = prepare_numpy_arrays(__lowercase)
__UpperCamelCase :Any = model(__lowercase , noise=__lowercase)
__UpperCamelCase :Tuple = model(**__lowercase , noise=__lowercase)
self.assert_outputs_same(__lowercase , __lowercase)
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase) -> List[Any]:
# make masks reproducible
np.random.seed(2)
__UpperCamelCase :Any = int((tf_model.config.image_size // tf_model.config.patch_size) ** 2)
__UpperCamelCase :Optional[Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches))
__UpperCamelCase :Dict = tf.constant(__lowercase)
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
__UpperCamelCase :Any = tf_noise
super().check_pt_tf_models(__lowercase , __lowercase , __lowercase)
def UpperCamelCase__ ( self) -> Tuple:
# make mask reproducible
np.random.seed(2)
__UpperCamelCase , __UpperCamelCase :Dict = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCamelCase :Optional[int] = {
module_member
for model_class in self.all_model_classes
for module in (import_module(model_class.__module__),)
for module_member_name in dir(__lowercase)
if module_member_name.endswith('''MainLayer''')
# This condition is required, since `modeling_tf_clip.py` has 3 classes whose names end with `MainLayer`.
and module_member_name[: -len('''MainLayer''')] == model_class.__name__[: -len('''Model''')]
for module_member in (getattr(__lowercase , __lowercase),)
if isinstance(__lowercase , __lowercase)
and tf.keras.layers.Layer in module_member.__bases__
and getattr(__lowercase , '''_keras_serializable''' , __lowercase)
}
__UpperCamelCase :Union[str, Any] = int((config.image_size // config.patch_size) ** 2)
__UpperCamelCase :List[str] = np.random.uniform(size=(self.model_tester.batch_size, num_patches))
__UpperCamelCase :str = tf.convert_to_tensor(__lowercase)
inputs_dict.update({'''noise''': noise})
for main_layer_class in tf_main_layer_classes:
__UpperCamelCase :Optional[int] = main_layer_class(__lowercase)
__UpperCamelCase :Optional[Any] = {
name: tf.keras.Input(tensor.shape[1:] , dtype=tensor.dtype) for name, tensor in inputs_dict.items()
}
__UpperCamelCase :Dict = tf.keras.Model(__lowercase , outputs=main_layer(__lowercase))
__UpperCamelCase :str = model(__lowercase)
with tempfile.TemporaryDirectory() as tmpdirname:
__UpperCamelCase :str = os.path.join(__lowercase , '''keras_model.h5''')
model.save(__lowercase)
__UpperCamelCase :List[Any] = tf.keras.models.load_model(
__lowercase , custom_objects={main_layer_class.__name__: main_layer_class})
assert isinstance(__lowercase , tf.keras.Model)
__UpperCamelCase :Optional[Any] = model(__lowercase)
self.assert_outputs_same(__lowercase , __lowercase)
@slow
def UpperCamelCase__ ( self) -> Dict:
# make mask reproducible
np.random.seed(2)
__UpperCamelCase , __UpperCamelCase :Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCamelCase :Optional[Any] = int((config.image_size // config.patch_size) ** 2)
__UpperCamelCase :Any = np.random.uniform(size=(self.model_tester.batch_size, num_patches))
for model_class in self.all_model_classes:
__UpperCamelCase :Optional[int] = model_class(__lowercase)
__UpperCamelCase :Union[str, Any] = self._prepare_for_class(__lowercase , __lowercase)
__UpperCamelCase :Optional[int] = model(__lowercase , noise=__lowercase)
if model_class.__name__ == "TFViTMAEModel":
__UpperCamelCase :Any = outputs.last_hidden_state.numpy()
__UpperCamelCase :Optional[Any] = 0
else:
__UpperCamelCase :List[str] = outputs.logits.numpy()
__UpperCamelCase :Optional[int] = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__lowercase , saved_model=__lowercase)
__UpperCamelCase :Optional[int] = model_class.from_pretrained(__lowercase)
__UpperCamelCase :List[str] = model(__lowercase , noise=__lowercase)
if model_class.__name__ == "TFViTMAEModel":
__UpperCamelCase :List[Any] = after_outputs['''last_hidden_state'''].numpy()
__UpperCamelCase :List[Any] = 0
else:
__UpperCamelCase :Any = after_outputs['''logits'''].numpy()
__UpperCamelCase :Tuple = 0
__UpperCamelCase :Any = np.amax(np.abs(out_a - out_a))
self.assertLessEqual(__lowercase , 1E-5)
def UpperCamelCase__ ( self) -> Union[str, Any]:
# make mask reproducible
np.random.seed(2)
__UpperCamelCase , __UpperCamelCase :Any = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCamelCase :str = int((config.image_size // config.patch_size) ** 2)
__UpperCamelCase :Optional[int] = np.random.uniform(size=(self.model_tester.batch_size, num_patches))
for model_class in self.all_model_classes:
__UpperCamelCase :Tuple = model_class(__lowercase)
__UpperCamelCase :Any = self._prepare_for_class(__lowercase , __lowercase)
__UpperCamelCase :Tuple = model(__lowercase , noise=__lowercase)
__UpperCamelCase :List[Any] = model.get_config()
# make sure that returned config is jsonifiable, which is required by keras
json.dumps(__lowercase)
__UpperCamelCase :Optional[Any] = model_class.from_config(model.get_config())
# make sure it also accepts a normal config
__UpperCamelCase :Any = model_class.from_config(model.config)
__UpperCamelCase :List[Any] = new_model(__lowercase) # Build model
new_model.set_weights(model.get_weights())
__UpperCamelCase :str = new_model(__lowercase , noise=__lowercase)
self.assert_outputs_same(__lowercase , __lowercase)
@unittest.skip(
reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.''')
def UpperCamelCase__ ( self) -> Dict:
pass
@unittest.skip(reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load''')
def UpperCamelCase__ ( self) -> Any:
pass
@slow
def UpperCamelCase__ ( self) -> Any:
__UpperCamelCase :List[Any] = TFViTMAEModel.from_pretrained('''google/vit-base-patch16-224''')
self.assertIsNotNone(__lowercase)
def lowerCamelCase ( ):
'''simple docstring'''
__UpperCamelCase :Tuple = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCamelCase__ ( self) -> Optional[Any]:
return ViTImageProcessor.from_pretrained('''facebook/vit-mae-base''') if is_vision_available() else None
@slow
def UpperCamelCase__ ( self) -> List[str]:
# make random mask reproducible across the PT and TF model
np.random.seed(2)
__UpperCamelCase :Optional[Any] = TFViTMAEForPreTraining.from_pretrained('''facebook/vit-mae-base''')
__UpperCamelCase :Optional[int] = self.default_image_processor
__UpperCamelCase :Optional[int] = prepare_img()
__UpperCamelCase :Optional[int] = image_processor(images=__lowercase , return_tensors='''tf''')
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
__UpperCamelCase :Union[str, Any] = ViTMAEConfig()
__UpperCamelCase :Union[str, Any] = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2)
__UpperCamelCase :Tuple = np.random.uniform(size=(1, num_patches))
# forward pass
__UpperCamelCase :int = model(**__lowercase , noise=__lowercase)
# verify the logits
__UpperCamelCase :Optional[int] = tf.convert_to_tensor([1, 196, 768])
self.assertEqual(outputs.logits.shape , __lowercase)
__UpperCamelCase :List[Any] = tf.convert_to_tensor(
[[-0.05_48, -1.70_23, -0.93_25], [0.37_21, -0.56_70, -0.22_33], [0.82_35, -1.38_78, -0.35_24]])
tf.debugging.assert_near(outputs.logits[0, :3, :3] , __lowercase , atol=1E-4)
| 43 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'facebook/vit-mae-base': 'https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json',
# See all ViT MAE models at https://huggingface.co/models?filter=vit-mae
}
class lowerCAmelCase__ ( a):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = "vit_mae"
def __init__( self , __lowerCamelCase=7_6_8 , __lowerCamelCase=1_2 , __lowerCamelCase=1_2 , __lowerCamelCase=3_0_7_2 , __lowerCamelCase="gelu" , __lowerCamelCase=0.0 , __lowerCamelCase=0.0 , __lowerCamelCase=0.0_2 , __lowerCamelCase=1e-12 , __lowerCamelCase=2_2_4 , __lowerCamelCase=1_6 , __lowerCamelCase=3 , __lowerCamelCase=True , __lowerCamelCase=1_6 , __lowerCamelCase=5_1_2 , __lowerCamelCase=8 , __lowerCamelCase=2_0_4_8 , __lowerCamelCase=0.7_5 , __lowerCamelCase=False , **__lowerCamelCase , ) -> int:
super().__init__(**__lowerCamelCase)
_A : int = hidden_size
_A : List[str] = num_hidden_layers
_A : List[Any] = num_attention_heads
_A : Optional[Any] = intermediate_size
_A : Optional[int] = hidden_act
_A : List[Any] = hidden_dropout_prob
_A : List[Any] = attention_probs_dropout_prob
_A : Union[str, Any] = initializer_range
_A : str = layer_norm_eps
_A : Any = image_size
_A : int = patch_size
_A : int = num_channels
_A : Dict = qkv_bias
_A : Tuple = decoder_num_attention_heads
_A : Tuple = decoder_hidden_size
_A : List[str] = decoder_num_hidden_layers
_A : Optional[Any] = decoder_intermediate_size
_A : List[str] = mask_ratio
_A : Union[str, Any] = norm_pix_loss
| 11 | 0 |
"""simple docstring"""
import gc
import math
import unittest
import torch
from diffusers import UNetaDModel
from diffusers.utils import floats_tensor, logging, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
_a : Optional[int] = logging.get_logger(__name__)
enable_full_determinism()
class __A ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
_UpperCamelCase : Optional[int] = UNetaDModel
_UpperCamelCase : int = "sample"
@property
def __A ( self ):
_lowerCAmelCase : Optional[int] = 4
_lowerCAmelCase : Optional[int] = 3
_lowerCAmelCase : List[str] = (32, 32)
_lowerCAmelCase : str = floats_tensor((batch_size, num_channels) + sizes ).to(a__ )
_lowerCAmelCase : Dict = torch.tensor([10] ).to(a__ )
return {"sample": noise, "timestep": time_step}
@property
def __A ( self ):
return (3, 32, 32)
@property
def __A ( self ):
return (3, 32, 32)
def __A ( self ):
_lowerCAmelCase : str = {
"""block_out_channels""": (32, 64),
"""down_block_types""": ("""DownBlock2D""", """AttnDownBlock2D"""),
"""up_block_types""": ("""AttnUpBlock2D""", """UpBlock2D"""),
"""attention_head_dim""": 3,
"""out_channels""": 3,
"""in_channels""": 3,
"""layers_per_block""": 2,
"""sample_size""": 32,
}
_lowerCAmelCase : int = self.dummy_input
return init_dict, inputs_dict
class __A ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
_UpperCamelCase : Tuple = UNetaDModel
_UpperCamelCase : Dict = "sample"
@property
def __A ( self ):
_lowerCAmelCase : Optional[int] = 4
_lowerCAmelCase : int = 4
_lowerCAmelCase : Optional[int] = (32, 32)
_lowerCAmelCase : List[Any] = floats_tensor((batch_size, num_channels) + sizes ).to(a__ )
_lowerCAmelCase : int = torch.tensor([10] ).to(a__ )
return {"sample": noise, "timestep": time_step}
@property
def __A ( self ):
return (4, 32, 32)
@property
def __A ( self ):
return (4, 32, 32)
def __A ( self ):
_lowerCAmelCase : Dict = {
"""sample_size""": 32,
"""in_channels""": 4,
"""out_channels""": 4,
"""layers_per_block""": 2,
"""block_out_channels""": (32, 64),
"""attention_head_dim""": 32,
"""down_block_types""": ("""DownBlock2D""", """DownBlock2D"""),
"""up_block_types""": ("""UpBlock2D""", """UpBlock2D"""),
}
_lowerCAmelCase : int = self.dummy_input
return init_dict, inputs_dict
def __A ( self ):
_lowerCAmelCase , _lowerCAmelCase : List[str] = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" , output_loading_info=a__ )
self.assertIsNotNone(a__ )
self.assertEqual(len(loading_info["""missing_keys"""] ) , 0 )
model.to(a__ )
_lowerCAmelCase : Any = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != """cuda""" , """This test is supposed to run on GPU""" )
def __A ( self ):
_lowerCAmelCase , _lowerCAmelCase : Any = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" , output_loading_info=a__ )
model.to(a__ )
_lowerCAmelCase : Union[str, Any] = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != """cuda""" , """This test is supposed to run on GPU""" )
def __A ( self ):
# by defautl model loading will use accelerate as `low_cpu_mem_usage=True`
_lowerCAmelCase , _lowerCAmelCase : List[Any] = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" , output_loading_info=a__ )
model_accelerate.to(a__ )
model_accelerate.eval()
_lowerCAmelCase : int = torch.randn(
1 , model_accelerate.config.in_channels , model_accelerate.config.sample_size , model_accelerate.config.sample_size , generator=torch.manual_seed(0 ) , )
_lowerCAmelCase : Optional[int] = noise.to(a__ )
_lowerCAmelCase : Dict = torch.tensor([10] * noise.shape[0] ).to(a__ )
_lowerCAmelCase : Dict = model_accelerate(a__ , a__ )["""sample"""]
# two models don't need to stay in the device at the same time
del model_accelerate
torch.cuda.empty_cache()
gc.collect()
_lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = UNetaDModel.from_pretrained(
"""fusing/unet-ldm-dummy-update""" , output_loading_info=a__ , low_cpu_mem_usage=a__ )
model_normal_load.to(a__ )
model_normal_load.eval()
_lowerCAmelCase : Dict = model_normal_load(a__ , a__ )["""sample"""]
assert torch_all_close(a__ , a__ , rtol=1e-3 )
def __A ( self ):
_lowerCAmelCase : Optional[Any] = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" )
model.eval()
model.to(a__ )
_lowerCAmelCase : List[Any] = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
_lowerCAmelCase : List[str] = noise.to(a__ )
_lowerCAmelCase : Any = torch.tensor([10] * noise.shape[0] ).to(a__ )
with torch.no_grad():
_lowerCAmelCase : Union[str, Any] = model(a__ , a__ ).sample
_lowerCAmelCase : int = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
_lowerCAmelCase : Optional[int] = torch.tensor([-1_3.3_2_5_8, -2_0.1_1_0_0, -1_5.9_8_7_3, -1_7.6_6_1_7, -2_3.0_5_9_6, -1_7.9_4_1_9, -1_3.3_6_7_5, -1_6.1_8_8_9, -1_2.3_8_0_0] )
# fmt: on
self.assertTrue(torch_all_close(a__ , a__ , rtol=1e-3 ) )
class __A ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
_UpperCamelCase : List[str] = UNetaDModel
_UpperCamelCase : Any = "sample"
@property
def __A ( self , a__=(32, 32) ):
_lowerCAmelCase : Optional[int] = 4
_lowerCAmelCase : Dict = 3
_lowerCAmelCase : List[Any] = floats_tensor((batch_size, num_channels) + sizes ).to(a__ )
_lowerCAmelCase : Any = torch.tensor(batch_size * [10] ).to(dtype=torch.intaa , device=a__ )
return {"sample": noise, "timestep": time_step}
@property
def __A ( self ):
return (3, 32, 32)
@property
def __A ( self ):
return (3, 32, 32)
def __A ( self ):
_lowerCAmelCase : List[Any] = {
"""block_out_channels""": [32, 64, 64, 64],
"""in_channels""": 3,
"""layers_per_block""": 1,
"""out_channels""": 3,
"""time_embedding_type""": """fourier""",
"""norm_eps""": 1e-6,
"""mid_block_scale_factor""": math.sqrt(2.0 ),
"""norm_num_groups""": None,
"""down_block_types""": [
"""SkipDownBlock2D""",
"""AttnSkipDownBlock2D""",
"""SkipDownBlock2D""",
"""SkipDownBlock2D""",
],
"""up_block_types""": [
"""SkipUpBlock2D""",
"""SkipUpBlock2D""",
"""AttnSkipUpBlock2D""",
"""SkipUpBlock2D""",
],
}
_lowerCAmelCase : Optional[int] = self.dummy_input
return init_dict, inputs_dict
@slow
def __A ( self ):
_lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = UNetaDModel.from_pretrained("""google/ncsnpp-celebahq-256""" , output_loading_info=a__ )
self.assertIsNotNone(a__ )
self.assertEqual(len(loading_info["""missing_keys"""] ) , 0 )
model.to(a__ )
_lowerCAmelCase : str = self.dummy_input
_lowerCAmelCase : Optional[Any] = floats_tensor((4, 3) + (256, 256) ).to(a__ )
_lowerCAmelCase : Any = noise
_lowerCAmelCase : str = model(**a__ )
assert image is not None, "Make sure output is not None"
@slow
def __A ( self ):
_lowerCAmelCase : Optional[Any] = UNetaDModel.from_pretrained("""google/ncsnpp-celebahq-256""" )
model.to(a__ )
_lowerCAmelCase : str = 4
_lowerCAmelCase : List[str] = 3
_lowerCAmelCase : Any = (256, 256)
_lowerCAmelCase : List[Any] = torch.ones((batch_size, num_channels) + sizes ).to(a__ )
_lowerCAmelCase : str = torch.tensor(batch_size * [1e-4] ).to(a__ )
with torch.no_grad():
_lowerCAmelCase : Optional[Any] = model(a__ , a__ ).sample
_lowerCAmelCase : Optional[Any] = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
_lowerCAmelCase : int = torch.tensor([-4_8_4_2.8_6_9_1, -6_4_9_9.6_6_3_1, -3_8_0_0.1_9_5_3, -7_9_7_8.2_6_8_6, -1_0_9_8_0.7_1_2_9, -2_0_0_2_8.8_5_3_5, 8_1_4_8.2_8_2_2, 2_3_4_2.2_9_0_5, 5_6_7.7_6_0_8] )
# fmt: on
self.assertTrue(torch_all_close(a__ , a__ , rtol=1e-2 ) )
def __A ( self ):
_lowerCAmelCase : int = UNetaDModel.from_pretrained("""fusing/ncsnpp-ffhq-ve-dummy-update""" )
model.to(a__ )
_lowerCAmelCase : Union[str, Any] = 4
_lowerCAmelCase : Tuple = 3
_lowerCAmelCase : int = (32, 32)
_lowerCAmelCase : List[str] = torch.ones((batch_size, num_channels) + sizes ).to(a__ )
_lowerCAmelCase : List[Any] = torch.tensor(batch_size * [1e-4] ).to(a__ )
with torch.no_grad():
_lowerCAmelCase : int = model(a__ , a__ ).sample
_lowerCAmelCase : Tuple = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
_lowerCAmelCase : Optional[Any] = torch.tensor([-0.0_3_2_5, -0.0_9_0_0, -0.0_8_6_9, -0.0_3_3_2, -0.0_7_2_5, -0.0_2_7_0, -0.0_1_0_1, 0.0_2_2_7, 0.0_2_5_6] )
# fmt: on
self.assertTrue(torch_all_close(a__ , a__ , rtol=1e-2 ) )
def __A ( self ):
# not required for this model
pass
| 44 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
lowerCAmelCase__ = {'configuration_speech_encoder_decoder': ['SpeechEncoderDecoderConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['SpeechEncoderDecoderModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['FlaxSpeechEncoderDecoderModel']
if TYPE_CHECKING:
from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 11 | 0 |
"""simple docstring"""
import string
def lowercase ( lowerCAmelCase__ : str ) -> str:
__a = ''''''
for i in sequence:
__a = ord(lowerCAmelCase__ )
if 65 <= extract <= 90:
output += chr(155 - extract )
elif 97 <= extract <= 122:
output += chr(219 - extract )
else:
output += i
return output
def lowercase ( lowerCAmelCase__ : str ) -> str:
__a = string.ascii_letters
__a = string.ascii_lowercase[::-1] + string.ascii_uppercase[::-1]
return "".join(
letters_reversed[letters.index(lowerCAmelCase__ )] if c in letters else c for c in sequence )
def lowercase ( ) -> None:
from timeit import timeit
print('''Running performance benchmarks...''' )
__a = '''from string import printable ; from __main__ import atbash, atbash_slow'''
print(f'''> atbash_slow(): {timeit('atbash_slow(printable)' , setup=lowerCAmelCase__ )} seconds''' )
print(f'''> atbash(): {timeit('atbash(printable)' , setup=lowerCAmelCase__ )} seconds''' )
if __name__ == "__main__":
for example in ("ABCDEFGH", "123GGjj", "testStringtest", "with space"):
print(F'''{example} encrypted in atbash: {atbash(example)}''')
benchmark()
| 45 |
# HF Trainer benchmarking tool
#
# This tool can be used to run and compare multiple dimensions of the HF Trainers args.
#
# It then prints a report once in github format with all the information that needs to be shared
# with others and second time in a console-friendly format, so it's easier to use for tuning things up.
#
# The main idea is:
#
# ./trainer-benchmark.py --base-cmd '<cmd args that don't change>' \
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1' \
# --target-metric-key train_samples_per_second
#
# The variations can be any command line argument that you want to compare and not just dtype as in
# the example.
#
# --variations allows you to compare variations in multiple dimensions.
#
# as the first dimention has 2 options and the second 3 in our example, this will run the trainer 6
# times adding one of:
#
# 1. --tf32 0 --fp16 0
# 2. --tf32 0 --fp16 1
# 3. --tf32 0 --bf16 1
# 4. --tf32 1 --fp16 0
# 5. --tf32 1 --fp16 1
# 6. --tf32 1 --bf16 1
#
# and print the results. This is just a cartesian product - and more than 2 dimensions can be used.
#
# If you want to rely on defaults, this:
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1'
# is identical to this:
# --variations '--tf32 0|--tf32 1' '|--fp16|--bf16'
#
# the leading empty variation in the 2nd dimension is a valid variation.
#
# So here we get the following 6 variations:
#
# 1. --tf32 0
# 2. --tf32 0 --fp16
# 3. --tf32 0 --bf16
# 4. --tf32 1
# 5. --tf32 1 --fp16
# 6. --tf32 1 --bf16
#
# In this particular case we don't know what the default tf32 setting is as it's normally
# pytorch-version dependent). That's why it's best to do an explicit setting of each variation:
# `--tf32 0|--tf32 1`
#
# Here is a full example of a train:
#
# CUDA_VISIBLE_DEVICES=0 python ./scripts/benchmark/trainer-benchmark.py \
# --base-cmd \
# ' examples/pytorch/translation/run_translation.py --model_name_or_path t5-small \
# --output_dir output_dir --do_train --label_smoothing 0.1 --logging_strategy no \
# --save_strategy no --per_device_train_batch_size 32 --max_source_length 512 \
# --max_target_length 512 --num_train_epochs 1 --overwrite_output_dir \
# --source_lang en --target_lang ro --dataset_name wmt16 --dataset_config "ro-en" \
# --source_prefix "translate English to Romanian: " --warmup_steps 50 \
# --max_train_samples 20000 --dataloader_num_workers 2 ' \
# --target-metric-key train_samples_per_second --repeat-times 1 --variations \
# '|--fp16|--bf16' '--tf32 0|--tf32 1' --report-metric-keys train_loss \
# --repeat-times 1 --base-variation '--tf32 0'
#
# and here is a possible output:
#
#
# | Variation | Train | Diff | Train |
# | | samples | % | loss |
# | | per | | |
# | | second | | |
# |:----------------|----------:|-------:|--------:|
# | --tf32 0 | 285.11 | 0 | 2.51 |
# | --tf32 1 | 342.09 | 20 | 2.51 |
# | --fp16 --tf32 0 | 423.49 | 49 | 2.51 |
# | --fp16 --tf32 1 | 423.13 | 48 | 2.51 |
# | --bf16 --tf32 0 | 416.80 | 46 | 2.52 |
# | --bf16 --tf32 1 | 415.87 | 46 | 2.52 |
#
#
# So you can quickly compare the different outcomes.
#
# Typically running each experiment once is enough, but if the environment is unstable you can
# re-run each multiple times, e.g., 3 using --repeat-times 3 and it will report the averaged results.
#
# By default it'll use the lowest result as the base line to use as 100% and then compare the rest to
# it as can be seen from the table above, but you can also specify which combination is the one to use as
# the baseline, e.g., to change to another entry use: --base-variation '--tf32 1 --fp16 0'
#
# --target-metric-key is there to tell the program which metrics to compare - the different metric keys are
# inside output_dir/all_results.json. e.g., to measure eval performance instead of train use:
# --target-metric-key eval_samples_per_second
# but of course you will need to adjust the --base-cmd value in the example to perform evaluation as
# well (as currently it doesn't)
#
import argparse
import datetime
import io
import itertools
import json
import math
import os
import platform
import re
import shlex
import subprocess
import sys
from pathlib import Path
from statistics import fmean
import pandas as pd
import torch
from tqdm import tqdm
import transformers
lowerCAmelCase__ = float('nan')
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self , __lowerCamelCase) -> Optional[Any]:
_A : List[Any] = sys.stdout
_A : str = open(__lowerCamelCase , "a")
def __getattr__( self , __lowerCamelCase) -> List[str]:
return getattr(self.stdout , __lowerCamelCase)
def _lowerCamelCase ( self , __lowerCamelCase) -> str:
self.stdout.write(__lowerCamelCase)
# strip tqdm codes
self.file.write(re.sub(r"^.*\r" , "" , __lowerCamelCase , 0 , re.M))
def _UpperCAmelCase (UpperCamelCase__ : str=80 , UpperCamelCase__ : Tuple=False ):
_A : Tuple = []
# deal with critical env vars
_A : Dict = ["CUDA_VISIBLE_DEVICES"]
for key in env_keys:
_A : Optional[int] = os.environ.get(UpperCamelCase__ , UpperCamelCase__ )
if val is not None:
cmd.append(f"{key}={val}" )
# python executable (not always needed if the script is executable)
_A : Optional[int] = sys.executable if full_python_path else sys.executable.split("/" )[-1]
cmd.append(UpperCamelCase__ )
# now the normal args
cmd += list(map(shlex.quote , sys.argv ) )
# split up into up to MAX_WIDTH lines with shell multi-line escapes
_A : Tuple = []
_A : Dict = ""
while len(UpperCamelCase__ ) > 0:
current_line += f"{cmd.pop(0 )} "
if len(UpperCamelCase__ ) == 0 or len(UpperCamelCase__ ) + len(cmd[0] ) + 1 > max_width - 1:
lines.append(UpperCamelCase__ )
_A : Union[str, Any] = ""
return "\\\n".join(UpperCamelCase__ )
def _UpperCAmelCase (UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Tuple ):
# unwrap multi-line input
_A : Union[str, Any] = re.sub(r"[\\\n]+" , " " , args.base_cmd )
# remove --output_dir if any and set our own
_A : int = re.sub("--output_dir\s+[^\s]+" , "" , args.base_cmd )
args.base_cmd += f" --output_dir {output_dir}"
# ensure we have --overwrite_output_dir
_A : int = re.sub("--overwrite_output_dir\s+" , "" , args.base_cmd )
args.base_cmd += " --overwrite_output_dir"
return [sys.executable] + shlex.split(args.base_cmd )
def _UpperCAmelCase (UpperCamelCase__ : List[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Tuple , UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[int] ):
# Enable to debug everything but the run itself, to do it fast and see the progress.
# This is useful for debugging the output formatting quickly - we can remove it later once
# everybody is happy with the output
if 0:
import random
from time import sleep
sleep(0 )
return dict(
{k: random.uniform(0 , 100 ) for k in metric_keys} , **{target_metric_key: random.choice([nan, 10.31, 1_00.2, 55.66_66, 2_22.22_22_22_22] )} , )
_A : Dict = subprocess.run(UpperCamelCase__ , capture_output=UpperCamelCase__ , text=UpperCamelCase__ )
if verbose:
print("STDOUT" , result.stdout )
print("STDERR" , result.stderr )
# save the streams
_A : Tuple = variation.replace(" " , "-" )
with open(Path(UpperCamelCase__ ) / f"log.{prefix}.stdout.txt" , "w" ) as f:
f.write(result.stdout )
with open(Path(UpperCamelCase__ ) / f"log.{prefix}.stderr.txt" , "w" ) as f:
f.write(result.stderr )
if result.returncode != 0:
if verbose:
print("failed" )
return {target_metric_key: nan}
with io.open(f"{output_dir}/all_results.json" , "r" , encoding="utf-8" ) as f:
_A : List[str] = json.load(UpperCamelCase__ )
# filter out just the keys we want
return {k: v for k, v in metrics.items() if k in metric_keys}
def _UpperCAmelCase (UpperCamelCase__ : int , UpperCamelCase__ : Dict , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : int , UpperCamelCase__ : Tuple , UpperCamelCase__ : List[Any] , UpperCamelCase__ : str , UpperCamelCase__ : Any , ):
_A : Union[str, Any] = []
_A : Optional[int] = []
_A : Any = f"{id}: {variation:<{longest_variation_len}}"
_A : Dict = f"{preamble}: "
_A : Union[str, Any] = set(report_metric_keys + [target_metric_key] )
for i in tqdm(range(UpperCamelCase__ ) , desc=UpperCamelCase__ , leave=UpperCamelCase__ ):
_A : Optional[Any] = process_run_single(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
_A : Optional[Any] = single_run_metrics[target_metric_key]
if not math.isnan(UpperCamelCase__ ):
metrics.append(UpperCamelCase__ )
results.append(UpperCamelCase__ )
outcome += "✓"
else:
outcome += "✘"
_A : str = f"\33[2K\r{outcome}"
if len(UpperCamelCase__ ) > 0:
_A : List[str] = {k: fmean([x[k] for x in metrics] ) for k in metrics[0].keys()}
_A : Any = round(mean_metrics[target_metric_key] , 2 )
_A : Tuple = f"{outcome} {mean_target}"
if len(UpperCamelCase__ ) > 1:
results_str += f" {tuple(round(UpperCamelCase__ , 2 ) for x in results )}"
print(UpperCamelCase__ )
_A : Optional[int] = variation
return mean_metrics
else:
print(UpperCamelCase__ )
return {variation_key: variation, target_metric_key: nan}
def _UpperCAmelCase ():
_A : int = torch.cuda.get_device_properties(torch.device("cuda" ) )
return f"\nDatetime : {datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S' )}\n\nSoftware:\ntransformers: {transformers.__version__}\ntorch : {torch.__version__}\ncuda : {torch.version.cuda}\npython : {platform.python_version()}\n\nHardware:\n{torch.cuda.device_count()} GPUs : {properties.name}, {properties.total_memory/2**30:0.2f}GB\n"
def _UpperCAmelCase (UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : Any , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Dict ):
_A : Any = pd.DataFrame(UpperCamelCase__ )
_A : List[str] = "variation"
_A : List[Any] = "diff_%"
_A : int = nan
if base_variation is not None and len(df[df[variation_key] == base_variation] ):
# this may still return nan
_A : int = df.loc[df[variation_key] == base_variation][target_metric_key].item()
if math.isnan(UpperCamelCase__ ):
# as a fallback, use the minimal value as the sentinel
_A : List[str] = df.loc[df[target_metric_key] != nan][target_metric_key].min()
# create diff column if possible
if not math.isnan(UpperCamelCase__ ):
_A : Optional[Any] = df.apply(
lambda UpperCamelCase__ : round(100 * (r[target_metric_key] - sentinel_value) / sentinel_value )
if not math.isnan(r[target_metric_key] )
else 0 , axis="columns" , )
# re-order columns
_A : Union[str, Any] = [variation_key, target_metric_key, diff_key, *report_metric_keys]
_A : Any = df.reindex(UpperCamelCase__ , axis="columns" ) # reorder cols
# capitalize
_A : Tuple = df.rename(str.capitalize , axis="columns" )
# make the cols as narrow as possible
_A : List[str] = df.rename(lambda UpperCamelCase__ : c.replace("_" , "<br>" ) , axis="columns" )
_A : Union[str, Any] = df.rename(lambda UpperCamelCase__ : c.replace("_" , "\n" ) , axis="columns" )
_A : Optional[int] = ["", "Copy between the cut-here-lines and paste as is to github or a forum"]
report += ["----------8<-----------------8<--------"]
report += ["*** Results:", df_github.to_markdown(index=UpperCamelCase__ , floatfmt=".2f" )]
report += ["```"]
report += ["*** Setup:", get_versions()]
report += ["*** The benchmark command line was:", get_original_command()]
report += ["```"]
report += ["----------8<-----------------8<--------"]
report += ["*** Results (console):", df_console.to_markdown(index=UpperCamelCase__ , floatfmt=".2f" )]
print("\n\n".join(UpperCamelCase__ ) )
def _UpperCAmelCase ():
_A : int = argparse.ArgumentParser()
parser.add_argument(
"--base-cmd" , default=UpperCamelCase__ , type=UpperCamelCase__ , required=UpperCamelCase__ , help="Base cmd" , )
parser.add_argument(
"--variations" , default=UpperCamelCase__ , type=UpperCamelCase__ , nargs="+" , required=UpperCamelCase__ , help="Multi-dimensional variations, example: '|--fp16|--bf16' '|--tf32'" , )
parser.add_argument(
"--base-variation" , default=UpperCamelCase__ , type=UpperCamelCase__ , help="Baseline variation to compare to. if None the minimal target value will be used to compare against" , )
parser.add_argument(
"--target-metric-key" , default=UpperCamelCase__ , type=UpperCamelCase__ , required=UpperCamelCase__ , help="Target metric key in output_dir/all_results.json, e.g., train_samples_per_second" , )
parser.add_argument(
"--report-metric-keys" , default="" , type=UpperCamelCase__ , help="Report metric keys - other metric keys from output_dir/all_results.json to report, e.g., train_loss. Use a single argument e.g., 'train_loss train_samples" , )
parser.add_argument(
"--repeat-times" , default=1 , type=UpperCamelCase__ , help="How many times to re-run each variation - an average will be reported" , )
parser.add_argument(
"--output_dir" , default="output_benchmark" , type=UpperCamelCase__ , help="The output directory where all the benchmark reports will go to and additionally this directory will be used to override --output_dir in the script that is being benchmarked" , )
parser.add_argument(
"--verbose" , default=UpperCamelCase__ , action="store_true" , help="Whether to show the outputs of each run or just the benchmark progress" , )
_A : int = parser.parse_args()
_A : Union[str, Any] = args.output_dir
Path(UpperCamelCase__ ).mkdir(exist_ok=UpperCamelCase__ )
_A : Tuple = get_base_command(UpperCamelCase__ , UpperCamelCase__ )
# split each dimension into its --foo variations
_A : Dict = [list(map(str.strip , re.split(r"\|" , UpperCamelCase__ ) ) ) for x in args.variations]
# build a cartesian product of dimensions and convert those back into cmd-line arg strings,
# while stripping white space for inputs that were empty
_A : Union[str, Any] = list(map(str.strip , map(" ".join , itertools.product(*UpperCamelCase__ ) ) ) )
_A : Union[str, Any] = max(len(UpperCamelCase__ ) for x in variations )
# split wanted keys
_A : str = args.report_metric_keys.split()
# capture prints into a log file for convenience
_A : Optional[int] = f"benchmark-report-{datetime.datetime.now().strftime('%Y-%m-%d-%H-%M-%S' )}.txt"
print(f"\nNote: each run's output is also logged under {output_dir}/log.*.std*.txt" )
print(f"and this script's output is also piped into {report_fn}" )
_A : Tuple = Tee(UpperCamelCase__ )
print(f"\n*** Running {len(UpperCamelCase__ )} benchmarks:" )
print(f"Base command: {' '.join(UpperCamelCase__ )}" )
_A : str = "variation"
_A : Union[str, Any] = []
for id, variation in enumerate(tqdm(UpperCamelCase__ , desc="Total completion: " , leave=UpperCamelCase__ ) ):
_A : Dict = base_cmd + variation.split()
results.append(
process_run(
id + 1 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , args.target_metric_key , UpperCamelCase__ , args.repeat_times , UpperCamelCase__ , args.verbose , ) )
process_results(UpperCamelCase__ , args.target_metric_key , UpperCamelCase__ , args.base_variation , UpperCamelCase__ )
if __name__ == "__main__":
main()
| 11 | 0 |
"""simple docstring"""
from manim import *
class lowercase ( _UpperCAmelCase ):
def _snake_case ( self ) -> Tuple:
lowerCAmelCase = Rectangle(height=0.5 , width=0.5 )
lowerCAmelCase = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
lowerCAmelCase = [mem.copy() for i in range(6 )]
lowerCAmelCase = [mem.copy() for i in range(6 )]
lowerCAmelCase = VGroup(*lowercase ).arrange(lowercase , buff=0 )
lowerCAmelCase = VGroup(*lowercase ).arrange(lowercase , buff=0 )
lowerCAmelCase = VGroup(lowercase , lowercase ).arrange(lowercase , buff=0 )
lowerCAmelCase = Text("""CPU""" , font_size=24 )
lowerCAmelCase = Group(lowercase , lowercase ).arrange(lowercase , buff=0.5 , aligned_edge=lowercase )
cpu.move_to([-2.5, -0.5, 0] )
self.add(lowercase )
lowerCAmelCase = [mem.copy() for i in range(1 )]
lowerCAmelCase = VGroup(*lowercase ).arrange(lowercase , buff=0 )
lowerCAmelCase = Text("""GPU""" , font_size=24 )
lowerCAmelCase = Group(lowercase , lowercase ).arrange(lowercase , buff=0.5 , aligned_edge=lowercase )
gpu.align_to(lowercase , lowercase )
gpu.set_x(gpu.get_x() - 1 )
self.add(lowercase )
lowerCAmelCase = [mem.copy() for i in range(6 )]
lowerCAmelCase = VGroup(*lowercase ).arrange(lowercase , buff=0 )
lowerCAmelCase = Text("""Model""" , font_size=24 )
lowerCAmelCase = Group(lowercase , lowercase ).arrange(lowercase , buff=0.5 , aligned_edge=lowercase )
model.move_to([3, -1.0, 0] )
self.play(
Create(lowercase , run_time=1 ) , Create(lowercase , run_time=1 ) , Create(lowercase , run_time=1 ) , )
lowerCAmelCase = MarkupText(
f'First, an empty model skeleton is loaded\ninto <span fgcolor=\'{YELLOW}\'>memory</span> without using much RAM.' , font_size=24 , )
lowerCAmelCase = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
lowerCAmelCase = MarkupText(
f'<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
step_a.move_to([2, 2, 0] )
self.play(Write(lowercase , run_time=2.5 ) , Write(lowercase ) , Write(lowercase ) )
self.add(lowercase )
lowerCAmelCase = []
lowerCAmelCase = []
lowerCAmelCase = []
for i, rect in enumerate(lowercase ):
lowerCAmelCase = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(lowercase , opacity=0.7 )
cpu_target.move_to(lowercase )
cpu_target.generate_target()
lowerCAmelCase = 0.46 / 4
lowerCAmelCase = 0.46 / 3
if i == 0:
cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=lowercase )
cpu_target.target.set_x(cpu_target.target.get_x() + 0.1 )
elif i == 3:
cpu_target.target.next_to(cpu_targs[0].target , direction=lowercase , buff=0.0 )
else:
cpu_target.target.next_to(cpu_targs[i - 1].target , direction=lowercase , buff=0.0 )
cpu_targs.append(lowercase )
first_animations.append(rect.animate(run_time=0.5 ).set_stroke(lowercase ) )
second_animations.append(MoveToTarget(lowercase , run_time=1.5 ) )
self.play(*lowercase )
self.play(*lowercase )
self.wait()
| 46 |
import logging
import os
import sys
from dataclasses import dataclass, field
from itertools import chain
from typing import Optional, Union
import datasets
import numpy as np
import torch
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.31.0')
lowerCAmelCase__ = logging.getLogger(__name__)
@dataclass
class lowerCAmelCase__ :
'''simple docstring'''
__SCREAMING_SNAKE_CASE = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"})
__SCREAMING_SNAKE_CASE = field(
default=a , metadata={"help": "Pretrained config name or path if not the same as model_name"})
__SCREAMING_SNAKE_CASE = field(
default=a , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"})
__SCREAMING_SNAKE_CASE = field(
default=a , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
__SCREAMING_SNAKE_CASE = field(
default=a , metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."} , )
__SCREAMING_SNAKE_CASE = field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
__SCREAMING_SNAKE_CASE = field(
default=a , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
@dataclass
class lowerCAmelCase__ :
'''simple docstring'''
__SCREAMING_SNAKE_CASE = field(default=a , metadata={"help": "The input training data file (a text file)."})
__SCREAMING_SNAKE_CASE = field(
default=a , metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."} , )
__SCREAMING_SNAKE_CASE = field(
default=a , metadata={"help": "Overwrite the cached training and evaluation sets"})
__SCREAMING_SNAKE_CASE = field(
default=a , metadata={"help": "The number of processes to use for the preprocessing."} , )
__SCREAMING_SNAKE_CASE = field(
default=a , metadata={
"help": (
"The maximum total input sequence length after tokenization. If passed, sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
__SCREAMING_SNAKE_CASE = field(
default=a , metadata={
"help": (
"Whether to pad all samples to the maximum sentence length. "
"If False, will pad the samples dynamically when batching to the maximum length in the batch. More "
"efficient on GPU but very bad for TPU."
)
} , )
__SCREAMING_SNAKE_CASE = field(
default=a , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
__SCREAMING_SNAKE_CASE = field(
default=a , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} , )
def _lowerCamelCase ( self) -> int:
if self.train_file is not None:
_A : Optional[int] = self.train_file.split(".")[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
_A : Dict = self.validation_file.split(".")[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
@dataclass
class lowerCAmelCase__ :
'''simple docstring'''
__SCREAMING_SNAKE_CASE = 42
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
def __call__( self , __lowerCamelCase) -> str:
_A : List[Any] = "label" if "label" in features[0].keys() else "labels"
_A : Any = [feature.pop(__lowerCamelCase) for feature in features]
_A : Optional[int] = len(__lowerCamelCase)
_A : int = len(features[0]["input_ids"])
_A : Tuple = [
[{k: v[i] for k, v in feature.items()} for i in range(__lowerCamelCase)] for feature in features
]
_A : str = list(chain(*__lowerCamelCase))
_A : Tuple = self.tokenizer.pad(
__lowerCamelCase , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="pt" , )
# Un-flatten
_A : Optional[int] = {k: v.view(__lowerCamelCase , __lowerCamelCase , -1) for k, v in batch.items()}
# Add back labels
_A : Optional[int] = torch.tensor(__lowerCamelCase , dtype=torch.intaa)
return batch
def _UpperCAmelCase ():
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_A : int = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_A , _A , _A : int = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_A , _A , _A : Union[str, Any] = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_swag" , UpperCamelCase__ , UpperCamelCase__ )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
_A : int = training_args.get_process_log_level()
logger.setLevel(UpperCamelCase__ )
datasets.utils.logging.set_verbosity(UpperCamelCase__ )
transformers.utils.logging.set_verbosity(UpperCamelCase__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ f"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" )
logger.info(f"Training/evaluation parameters {training_args}" )
# Detecting last checkpoint.
_A : List[Any] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_A : Optional[int] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
"Use --overwrite_output_dir to overcome." )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.train_file is not None or data_args.validation_file is not None:
_A : List[str] = {}
if data_args.train_file is not None:
_A : Optional[int] = data_args.train_file
if data_args.validation_file is not None:
_A : Tuple = data_args.validation_file
_A : Union[str, Any] = data_args.train_file.split("." )[-1]
_A : List[str] = load_dataset(
UpperCamelCase__ , data_files=UpperCamelCase__ , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
# Downloading and loading the swag dataset from the hub.
_A : Union[str, Any] = load_dataset(
"swag" , "regular" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_A : Optional[Any] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
_A : Optional[Any] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
_A : List[Any] = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=UpperCamelCase__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# When using your own dataset or a different dataset from swag, you will probably need to change this.
_A : str = [f"ending{i}" for i in range(4 )]
_A : Union[str, Any] = "sent1"
_A : str = "sent2"
if data_args.max_seq_length is None:
_A : Any = tokenizer.model_max_length
if max_seq_length > 1024:
logger.warning(
"The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value"
" of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can"
" override this default with `--block_size xxx`." )
_A : Optional[Any] = 1024
else:
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f"The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"
f"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}." )
_A : int = min(data_args.max_seq_length , tokenizer.model_max_length )
# Preprocessing the datasets.
def preprocess_function(UpperCamelCase__ : List[Any] ):
_A : List[Any] = [[context] * 4 for context in examples[context_name]]
_A : Any = examples[question_header_name]
_A : Union[str, Any] = [
[f"{header} {examples[end][i]}" for end in ending_names] for i, header in enumerate(UpperCamelCase__ )
]
# Flatten out
_A : Dict = list(chain(*UpperCamelCase__ ) )
_A : List[Any] = list(chain(*UpperCamelCase__ ) )
# Tokenize
_A : str = tokenizer(
UpperCamelCase__ , UpperCamelCase__ , truncation=UpperCamelCase__ , max_length=UpperCamelCase__ , padding="max_length" if data_args.pad_to_max_length else False , )
# Un-flatten
return {k: [v[i : i + 4] for i in range(0 , len(UpperCamelCase__ ) , 4 )] for k, v in tokenized_examples.items()}
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError("--do_train requires a train dataset" )
_A : Optional[int] = raw_datasets["train"]
if data_args.max_train_samples is not None:
_A : Union[str, Any] = min(len(UpperCamelCase__ ) , data_args.max_train_samples )
_A : Any = train_dataset.select(range(UpperCamelCase__ ) )
with training_args.main_process_first(desc="train dataset map pre-processing" ):
_A : Optional[int] = train_dataset.map(
UpperCamelCase__ , batched=UpperCamelCase__ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
if training_args.do_eval:
if "validation" not in raw_datasets:
raise ValueError("--do_eval requires a validation dataset" )
_A : Optional[int] = raw_datasets["validation"]
if data_args.max_eval_samples is not None:
_A : str = min(len(UpperCamelCase__ ) , data_args.max_eval_samples )
_A : Dict = eval_dataset.select(range(UpperCamelCase__ ) )
with training_args.main_process_first(desc="validation dataset map pre-processing" ):
_A : List[str] = eval_dataset.map(
UpperCamelCase__ , batched=UpperCamelCase__ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
# Data collator
_A : str = (
default_data_collator
if data_args.pad_to_max_length
else DataCollatorForMultipleChoice(tokenizer=UpperCamelCase__ , pad_to_multiple_of=8 if training_args.fpaa else None )
)
# Metric
def compute_metrics(UpperCamelCase__ : Tuple ):
_A , _A : List[str] = eval_predictions
_A : Optional[int] = np.argmax(UpperCamelCase__ , axis=1 )
return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()}
# Initialize our Trainer
_A : List[str] = Trainer(
model=UpperCamelCase__ , args=UpperCamelCase__ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=UpperCamelCase__ , data_collator=UpperCamelCase__ , compute_metrics=UpperCamelCase__ , )
# Training
if training_args.do_train:
_A : Any = None
if training_args.resume_from_checkpoint is not None:
_A : Optional[int] = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
_A : int = last_checkpoint
_A : Any = trainer.train(resume_from_checkpoint=UpperCamelCase__ )
trainer.save_model() # Saves the tokenizer too for easy upload
_A : Optional[int] = train_result.metrics
_A : Tuple = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(UpperCamelCase__ )
)
_A : Tuple = min(UpperCamelCase__ , len(UpperCamelCase__ ) )
trainer.log_metrics("train" , UpperCamelCase__ )
trainer.save_metrics("train" , UpperCamelCase__ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("*** Evaluate ***" )
_A : List[Any] = trainer.evaluate()
_A : int = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(UpperCamelCase__ )
_A : Optional[Any] = min(UpperCamelCase__ , len(UpperCamelCase__ ) )
trainer.log_metrics("eval" , UpperCamelCase__ )
trainer.save_metrics("eval" , UpperCamelCase__ )
_A : Tuple = {
"finetuned_from": model_args.model_name_or_path,
"tasks": "multiple-choice",
"dataset_tags": "swag",
"dataset_args": "regular",
"dataset": "SWAG",
"language": "en",
}
if training_args.push_to_hub:
trainer.push_to_hub(**UpperCamelCase__ )
else:
trainer.create_model_card(**UpperCamelCase__ )
def _UpperCAmelCase (UpperCamelCase__ : Optional[int] ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 11 | 0 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bart import BartTokenizer
lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
lowerCamelCase : Optional[int] = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
# See all BART models at https://huggingface.co/models?filter=bart
lowerCamelCase : Any = {
"vocab_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/vocab.json",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/vocab.json",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json",
},
"merges_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/merges.txt",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/merges.txt",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt",
},
"tokenizer_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/tokenizer.json",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/tokenizer.json",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/tokenizer.json",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/tokenizer.json",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/tokenizer.json",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/tokenizer.json",
},
}
lowerCamelCase : Any = {
"facebook/bart-base": 1_0_2_4,
"facebook/bart-large": 1_0_2_4,
"facebook/bart-large-mnli": 1_0_2_4,
"facebook/bart-large-cnn": 1_0_2_4,
"facebook/bart-large-xsum": 1_0_2_4,
"yjernite/bart_eli5": 1_0_2_4,
}
class A__ ( A__ ):
A__ = VOCAB_FILES_NAMES
A__ = PRETRAINED_VOCAB_FILES_MAP
A__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ = ['input_ids', 'attention_mask']
A__ = BartTokenizer
def __init__( self : List[Any] , _a : Any=None , _a : str=None , _a : Any=None , _a : Dict="replace" , _a : List[Any]="<s>" , _a : List[Any]="</s>" , _a : Optional[Any]="</s>" , _a : str="<s>" , _a : Optional[Any]="<unk>" , _a : Any="<pad>" , _a : List[str]="<mask>" , _a : Union[str, Any]=False , _a : List[Any]=True , **_a : Tuple , ) -> List[str]:
'''simple docstring'''
super().__init__(
_a , _a , tokenizer_file=_a , errors=_a , bos_token=_a , eos_token=_a , sep_token=_a , cls_token=_a , unk_token=_a , pad_token=_a , mask_token=_a , add_prefix_space=_a , trim_offsets=_a , **_a , )
_SCREAMING_SNAKE_CASE =json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , _a ) != add_prefix_space:
_SCREAMING_SNAKE_CASE =getattr(_a , pre_tok_state.pop('type' ) )
_SCREAMING_SNAKE_CASE =add_prefix_space
_SCREAMING_SNAKE_CASE =pre_tok_class(**_a )
_SCREAMING_SNAKE_CASE =add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
_SCREAMING_SNAKE_CASE ='post_processor'
_SCREAMING_SNAKE_CASE =getattr(self.backend_tokenizer , _a , _a )
if tokenizer_component_instance:
_SCREAMING_SNAKE_CASE =json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
_SCREAMING_SNAKE_CASE =tuple(state['sep'] )
if "cls" in state:
_SCREAMING_SNAKE_CASE =tuple(state['cls'] )
_SCREAMING_SNAKE_CASE =False
if state.get('add_prefix_space' , _a ) != add_prefix_space:
_SCREAMING_SNAKE_CASE =add_prefix_space
_SCREAMING_SNAKE_CASE =True
if state.get('trim_offsets' , _a ) != trim_offsets:
_SCREAMING_SNAKE_CASE =trim_offsets
_SCREAMING_SNAKE_CASE =True
if changes_to_apply:
_SCREAMING_SNAKE_CASE =getattr(_a , state.pop('type' ) )
_SCREAMING_SNAKE_CASE =component_class(**_a )
setattr(self.backend_tokenizer , _a , _a )
@property
def A ( self : Tuple ) -> str:
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.' )
return None
return str(self._mask_token )
@mask_token.setter
def A ( self : List[str] , _a : Union[str, Any] ) -> List[str]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else value
_SCREAMING_SNAKE_CASE =value
def A ( self : Dict , *_a : int , **_a : str ) -> BatchEncoding:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =kwargs.get('is_split_into_words' , _a )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
'to use it with pretokenized inputs.' )
return super()._batch_encode_plus(*_a , **_a )
def A ( self : Optional[int] , *_a : Any , **_a : str ) -> BatchEncoding:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =kwargs.get('is_split_into_words' , _a )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
'to use it with pretokenized inputs.' )
return super()._encode_plus(*_a , **_a )
def A ( self : List[str] , _a : str , _a : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self._tokenizer.model.save(_a , name=_a )
return tuple(_a )
def A ( self : Tuple , _a : str , _a : int=None ) -> int:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =[self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def A ( self : Dict , _a : List[int] , _a : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =[self.sep_token_id]
_SCREAMING_SNAKE_CASE =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 47 |
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("TEST_SAGEMAKER" , "False")) is not True , reason="Skipping test because should only be run when releasing minor transformers version" , )
@pytest.mark.usefixtures("sm_env")
@parameterized_class(
[
{
"framework": "pytorch",
"script": "run_glue.py",
"model_name_or_path": "distilbert-base-cased",
"instance_type": "ml.p3.16xlarge",
"results": {"train_runtime": 650, "eval_accuracy": 0.7, "eval_loss": 0.6},
},
{
"framework": "pytorch",
"script": "run_ddp.py",
"model_name_or_path": "distilbert-base-cased",
"instance_type": "ml.p3.16xlarge",
"results": {"train_runtime": 600, "eval_accuracy": 0.7, "eval_loss": 0.6},
},
{
"framework": "tensorflow",
"script": "run_tf_dist.py",
"model_name_or_path": "distilbert-base-cased",
"instance_type": "ml.p3.16xlarge",
"results": {"train_runtime": 600, "eval_accuracy": 0.6, "eval_loss": 0.7},
},
])
class lowerCAmelCase__ ( unittest.TestCase):
'''simple docstring'''
def _lowerCamelCase ( self) -> str:
if self.framework == "pytorch":
subprocess.run(
F"cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py".split() , encoding="utf-8" , check=__lowerCamelCase , )
assert hasattr(self , "env")
def _lowerCamelCase ( self , __lowerCamelCase) -> Tuple:
_A : Dict = F"{self.env.base_job_name}-{instance_count}-{'ddp' if 'ddp' in self.script else 'smd'}"
# distributed data settings
_A : Optional[Any] = {"smdistributed": {"dataparallel": {"enabled": True}}} if self.script != "run_ddp.py" else None
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=__lowerCamelCase , instance_count=__lowerCamelCase , instance_type=self.instance_type , debugger_hook_config=__lowerCamelCase , hyperparameters={**self.env.distributed_hyperparameters, "model_name_or_path": self.model_name_or_path} , metric_definitions=self.env.metric_definitions , distribution=__lowerCamelCase , py_version="py36" , )
def _lowerCamelCase ( self , __lowerCamelCase) -> Optional[Any]:
TrainingJobAnalytics(__lowerCamelCase).export_csv(F"{self.env.test_path}/{job_name}_metrics.csv")
@parameterized.expand([(2,)])
def _lowerCamelCase ( self , __lowerCamelCase) -> Any:
# create estimator
_A : Union[str, Any] = self.create_estimator(__lowerCamelCase)
# run training
estimator.fit()
# result dataframe
_A : Optional[Any] = TrainingJobAnalytics(estimator.latest_training_job.name).dataframe()
# extract kpis
_A : List[Any] = list(result_metrics_df[result_metrics_df.metric_name == "eval_accuracy"]["value"])
_A : Dict = list(result_metrics_df[result_metrics_df.metric_name == "eval_loss"]["value"])
# get train time from SageMaker job, this includes starting, preprocessing, stopping
_A : Optional[Any] = (
Session().describe_training_job(estimator.latest_training_job.name).get("TrainingTimeInSeconds" , 9_9_9_9_9_9)
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["eval_accuracy"] for t in eval_accuracy)
assert all(t <= self.results["eval_loss"] for t in eval_loss)
# dump tests result into json file to share in PR
with open(F"{estimator.latest_training_job.name}.json" , "w") as outfile:
json.dump({"train_time": train_runtime, "eval_accuracy": eval_accuracy, "eval_loss": eval_loss} , __lowerCamelCase)
| 11 | 0 |
import requests
SCREAMING_SNAKE_CASE__ : List[Any] = '' # <-- Put your OpenWeatherMap appid here!
SCREAMING_SNAKE_CASE__ : List[Any] = 'https://api.openweathermap.org/data/2.5/'
def A ( _SCREAMING_SNAKE_CASE = "Chicago" ,_SCREAMING_SNAKE_CASE = APPID ) -> dict:
return requests.get(URL_BASE + "weather" ,params=locals() ).json()
def A ( _SCREAMING_SNAKE_CASE = "Kolkata, India" ,_SCREAMING_SNAKE_CASE = APPID ) -> dict:
return requests.get(URL_BASE + "forecast" ,params=locals() ).json()
def A ( _SCREAMING_SNAKE_CASE = 55.68 ,_SCREAMING_SNAKE_CASE = 12.57 ,_SCREAMING_SNAKE_CASE = APPID ) -> dict:
return requests.get(URL_BASE + "onecall" ,params=locals() ).json()
if __name__ == "__main__":
from pprint import pprint
while True:
SCREAMING_SNAKE_CASE__ : Dict = input('Enter a location:').strip()
if location:
pprint(current_weather(location))
else:
break
| 48 |
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class lowerCAmelCase__ ( a):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = ["image_processor", "tokenizer"]
__SCREAMING_SNAKE_CASE = "OwlViTImageProcessor"
__SCREAMING_SNAKE_CASE = ("CLIPTokenizer", "CLIPTokenizerFast")
def __init__( self , __lowerCamelCase=None , __lowerCamelCase=None , **__lowerCamelCase) -> Union[str, Any]:
_A : int = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , __lowerCamelCase , )
_A : List[Any] = kwargs.pop("feature_extractor")
_A : Dict = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`.")
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`.")
super().__init__(__lowerCamelCase , __lowerCamelCase)
def __call__( self , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase="max_length" , __lowerCamelCase="np" , **__lowerCamelCase) -> Any:
if text is None and query_images is None and images is None:
raise ValueError(
"You have to specify at least one text or query image or image. All three cannot be none.")
if text is not None:
if isinstance(__lowerCamelCase , __lowerCamelCase) or (isinstance(__lowerCamelCase , __lowerCamelCase) and not isinstance(text[0] , __lowerCamelCase)):
_A : Union[str, Any] = [self.tokenizer(__lowerCamelCase , padding=__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase)]
elif isinstance(__lowerCamelCase , __lowerCamelCase) and isinstance(text[0] , __lowerCamelCase):
_A : Optional[Any] = []
# Maximum number of queries across batch
_A : str = max([len(__lowerCamelCase) for t in text])
# Pad all batch samples to max number of text queries
for t in text:
if len(__lowerCamelCase) != max_num_queries:
_A : Optional[int] = t + [" "] * (max_num_queries - len(__lowerCamelCase))
_A : List[Any] = self.tokenizer(__lowerCamelCase , padding=__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase)
encodings.append(__lowerCamelCase)
else:
raise TypeError("Input text should be a string, a list of strings or a nested list of strings")
if return_tensors == "np":
_A : Tuple = np.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0)
_A : Optional[Any] = np.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0)
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
_A : Optional[int] = jnp.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0)
_A : Optional[int] = jnp.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0)
elif return_tensors == "pt" and is_torch_available():
import torch
_A : Optional[Any] = torch.cat([encoding["input_ids"] for encoding in encodings] , dim=0)
_A : Union[str, Any] = torch.cat([encoding["attention_mask"] for encoding in encodings] , dim=0)
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
_A : Any = tf.stack([encoding["input_ids"] for encoding in encodings] , axis=0)
_A : Tuple = tf.stack([encoding["attention_mask"] for encoding in encodings] , axis=0)
else:
raise ValueError("Target return tensor type could not be returned")
_A : Optional[Any] = BatchEncoding()
_A : Tuple = input_ids
_A : Dict = attention_mask
if query_images is not None:
_A : Optional[Any] = BatchEncoding()
_A : List[str] = self.image_processor(
__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase).pixel_values
_A : Union[str, Any] = query_pixel_values
if images is not None:
_A : int = self.image_processor(__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase)
if text is not None and images is not None:
_A : Tuple = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
_A : int = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**__lowerCamelCase) , tensor_type=__lowerCamelCase)
def _lowerCamelCase ( self , *__lowerCamelCase , **__lowerCamelCase) -> str:
return self.image_processor.post_process(*__lowerCamelCase , **__lowerCamelCase)
def _lowerCamelCase ( self , *__lowerCamelCase , **__lowerCamelCase) -> List[str]:
return self.image_processor.post_process_object_detection(*__lowerCamelCase , **__lowerCamelCase)
def _lowerCamelCase ( self , *__lowerCamelCase , **__lowerCamelCase) -> Optional[int]:
return self.image_processor.post_process_image_guided_detection(*__lowerCamelCase , **__lowerCamelCase)
def _lowerCamelCase ( self , *__lowerCamelCase , **__lowerCamelCase) -> int:
return self.tokenizer.batch_decode(*__lowerCamelCase , **__lowerCamelCase)
def _lowerCamelCase ( self , *__lowerCamelCase , **__lowerCamelCase) -> Optional[int]:
return self.tokenizer.decode(*__lowerCamelCase , **__lowerCamelCase)
@property
def _lowerCamelCase ( self) -> int:
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , __lowerCamelCase , )
return self.image_processor_class
@property
def _lowerCamelCase ( self) -> List[str]:
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , __lowerCamelCase , )
return self.image_processor
| 11 | 0 |
__snake_case :Union[str, Any] = '''
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
__snake_case :Union[str, Any] = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
__snake_case :Optional[int] = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 49 |
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_torch_available
from transformers.testing_utils import require_torch, torch_device
if is_torch_available():
from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments
@require_torch
class lowerCAmelCase__ ( unittest.TestCase):
'''simple docstring'''
def _lowerCamelCase ( self , __lowerCamelCase) -> Dict:
for model_result in results.values():
for batch_size, sequence_length in zip(model_result["bs"] , model_result["ss"]):
_A : Optional[int] = model_result["result"][batch_size][sequence_length]
self.assertIsNotNone(__lowerCamelCase)
def _lowerCamelCase ( self) -> int:
_A : Optional[int] = "sshleifer/tiny-gpt2"
_A : int = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , )
_A : List[str] = PyTorchBenchmark(__lowerCamelCase)
_A : Optional[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def _lowerCamelCase ( self) -> Dict:
_A : int = "sgugger/tiny-distilbert-classification"
_A : str = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , only_pretrain_model=__lowerCamelCase , )
_A : Dict = PyTorchBenchmark(__lowerCamelCase)
_A : Union[str, Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def _lowerCamelCase ( self) -> Optional[Any]:
_A : Tuple = "sshleifer/tiny-gpt2"
_A : Optional[Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , torchscript=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , )
_A : Union[str, Any] = PyTorchBenchmark(__lowerCamelCase)
_A : List[str] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
@unittest.skipIf(torch_device == "cpu" , "Cant do half precision")
def _lowerCamelCase ( self) -> int:
_A : Any = "sshleifer/tiny-gpt2"
_A : Tuple = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , fpaa=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , )
_A : Any = PyTorchBenchmark(__lowerCamelCase)
_A : Union[str, Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def _lowerCamelCase ( self) -> Any:
_A : Union[str, Any] = "sshleifer/tiny-gpt2"
_A : Any = AutoConfig.from_pretrained(__lowerCamelCase)
# set architectures equal to `None`
_A : Dict = None
_A : Any = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , )
_A : Union[str, Any] = PyTorchBenchmark(__lowerCamelCase , configs=[config])
_A : int = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def _lowerCamelCase ( self) -> int:
_A : List[Any] = "sshleifer/tiny-gpt2"
_A : int = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , )
_A : Optional[Any] = PyTorchBenchmark(__lowerCamelCase)
_A : int = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result)
self.check_results_dict_not_empty(results.memory_train_result)
@unittest.skipIf(torch_device == "cpu" , "Can't do half precision")
def _lowerCamelCase ( self) -> Optional[Any]:
_A : Any = "sshleifer/tiny-gpt2"
_A : Tuple = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , fpaa=__lowerCamelCase , multi_process=__lowerCamelCase , )
_A : List[Any] = PyTorchBenchmark(__lowerCamelCase)
_A : Tuple = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result)
self.check_results_dict_not_empty(results.memory_train_result)
def _lowerCamelCase ( self) -> str:
_A : List[str] = "sshleifer/tiny-gpt2"
_A : Union[str, Any] = AutoConfig.from_pretrained(__lowerCamelCase)
_A : Any = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , )
_A : Optional[Any] = PyTorchBenchmark(__lowerCamelCase , configs=[config])
_A : Tuple = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def _lowerCamelCase ( self) -> int:
_A : Tuple = "sshleifer/tinier_bart"
_A : Optional[Any] = AutoConfig.from_pretrained(__lowerCamelCase)
_A : Optional[int] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , )
_A : Dict = PyTorchBenchmark(__lowerCamelCase , configs=[config])
_A : Optional[int] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def _lowerCamelCase ( self) -> str:
_A : List[Any] = "sshleifer/tiny-gpt2"
_A : Optional[Any] = AutoConfig.from_pretrained(__lowerCamelCase)
_A : Optional[Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , )
_A : List[str] = PyTorchBenchmark(__lowerCamelCase , configs=[config])
_A : Dict = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result)
self.check_results_dict_not_empty(results.memory_train_result)
def _lowerCamelCase ( self) -> int:
_A : int = "sshleifer/tinier_bart"
_A : str = AutoConfig.from_pretrained(__lowerCamelCase)
_A : Tuple = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , )
_A : Tuple = PyTorchBenchmark(__lowerCamelCase , configs=[config])
_A : Union[str, Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result)
self.check_results_dict_not_empty(results.memory_train_result)
def _lowerCamelCase ( self) -> Dict:
_A : List[str] = "sshleifer/tiny-gpt2"
with tempfile.TemporaryDirectory() as tmp_dir:
_A : Optional[Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , save_to_csv=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(__lowerCamelCase , "inf_time.csv") , train_memory_csv_file=os.path.join(__lowerCamelCase , "train_mem.csv") , inference_memory_csv_file=os.path.join(__lowerCamelCase , "inf_mem.csv") , train_time_csv_file=os.path.join(__lowerCamelCase , "train_time.csv") , env_info_csv_file=os.path.join(__lowerCamelCase , "env.csv") , multi_process=__lowerCamelCase , )
_A : Tuple = PyTorchBenchmark(__lowerCamelCase)
benchmark.run()
self.assertTrue(Path(os.path.join(__lowerCamelCase , "inf_time.csv")).exists())
self.assertTrue(Path(os.path.join(__lowerCamelCase , "train_time.csv")).exists())
self.assertTrue(Path(os.path.join(__lowerCamelCase , "inf_mem.csv")).exists())
self.assertTrue(Path(os.path.join(__lowerCamelCase , "train_mem.csv")).exists())
self.assertTrue(Path(os.path.join(__lowerCamelCase , "env.csv")).exists())
def _lowerCamelCase ( self) -> int:
_A : Dict = "sshleifer/tiny-gpt2"
def _check_summary_is_not_empty(__lowerCamelCase):
self.assertTrue(hasattr(__lowerCamelCase , "sequential"))
self.assertTrue(hasattr(__lowerCamelCase , "cumulative"))
self.assertTrue(hasattr(__lowerCamelCase , "current"))
self.assertTrue(hasattr(__lowerCamelCase , "total"))
with tempfile.TemporaryDirectory() as tmp_dir:
_A : Union[str, Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(__lowerCamelCase , "log.txt") , log_print=__lowerCamelCase , trace_memory_line_by_line=__lowerCamelCase , multi_process=__lowerCamelCase , )
_A : Optional[int] = PyTorchBenchmark(__lowerCamelCase)
_A : Dict = benchmark.run()
_check_summary_is_not_empty(result.inference_summary)
_check_summary_is_not_empty(result.train_summary)
self.assertTrue(Path(os.path.join(__lowerCamelCase , "log.txt")).exists())
| 11 | 0 |
import numpy
# List of input, output pairs
_UpperCAmelCase : Union[str, Any] = (
((5, 2, 3), 15),
((6, 5, 9), 25),
((11, 12, 13), 41),
((1, 1, 1), 8),
((11, 12, 13), 41),
)
_UpperCAmelCase : Dict = (((5_15, 22, 13), 5_55), ((61, 35, 49), 1_50))
_UpperCAmelCase : Tuple = [2, 4, 1, 5]
_UpperCAmelCase : Optional[Any] = len(train_data)
_UpperCAmelCase : Optional[Any] = 0.009
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase="train" ) -> List[str]:
return calculate_hypothesis_value(_UpperCAmelCase , _UpperCAmelCase ) - output(
_UpperCAmelCase , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> int:
lowerCamelCase__ : str = 0
for i in range(len(_UpperCAmelCase ) - 1 ):
hyp_val += data_input_tuple[i] * parameter_vector[i + 1]
hyp_val += parameter_vector[0]
return hyp_val
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase ) -> int:
if data_set == "train":
return train_data[example_no][1]
elif data_set == "test":
return test_data[example_no][1]
return None
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase ) -> Optional[int]:
if data_set == "train":
return _hypothesis_value(train_data[example_no][0] )
elif data_set == "test":
return _hypothesis_value(test_data[example_no][0] )
return None
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase=m ) -> Dict:
lowerCamelCase__ : Union[str, Any] = 0
for i in range(_UpperCAmelCase ):
if index == -1:
summation_value += _error(_UpperCAmelCase )
else:
summation_value += _error(_UpperCAmelCase ) * train_data[i][0][index]
return summation_value
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> str:
lowerCamelCase__ : List[Any] = summation_of_cost_derivative(_UpperCAmelCase , _UpperCAmelCase ) / m
return cost_derivative_value
def SCREAMING_SNAKE_CASE ( ) -> Optional[Any]:
global parameter_vector
# Tune these values to set a tolerance value for predicted output
lowerCamelCase__ : List[str] = 0.000_002
lowerCamelCase__ : Any = 0
lowerCamelCase__ : Tuple = 0
while True:
j += 1
lowerCamelCase__ : str = [0, 0, 0, 0]
for i in range(0 , len(_UpperCAmelCase ) ):
lowerCamelCase__ : Optional[Any] = get_cost_derivative(i - 1 )
lowerCamelCase__ : Tuple = (
parameter_vector[i] - LEARNING_RATE * cost_derivative
)
if numpy.allclose(
_UpperCAmelCase , _UpperCAmelCase , atol=_UpperCAmelCase , rtol=_UpperCAmelCase , ):
break
lowerCamelCase__ : str = temp_parameter_vector
print(('Number of iterations:', j) )
def SCREAMING_SNAKE_CASE ( ) -> Optional[int]:
for i in range(len(_UpperCAmelCase ) ):
print(('Actual output value:', output(_UpperCAmelCase , 'test' )) )
print(('Hypothesis output:', calculate_hypothesis_value(_UpperCAmelCase , 'test' )) )
if __name__ == "__main__":
run_gradient_descent()
print("""\nTesting gradient descent for a linear hypothesis function.\n""")
test_gradient_descent()
| 50 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_nllb import NllbTokenizer
else:
lowerCAmelCase__ = None
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'}
lowerCAmelCase__ = {
'vocab_file': {
'facebook/nllb-200-distilled-600M': (
'https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model'
),
},
'tokenizer_file': {
'facebook/nllb-200-distilled-600M': (
'https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json'
),
},
}
lowerCAmelCase__ = {
'facebook/nllb-large-en-ro': 10_24,
'facebook/nllb-200-distilled-600M': 10_24,
}
# fmt: off
lowerCAmelCase__ = ['ace_Arab', 'ace_Latn', 'acm_Arab', 'acq_Arab', 'aeb_Arab', 'afr_Latn', 'ajp_Arab', 'aka_Latn', 'amh_Ethi', 'apc_Arab', 'arb_Arab', 'ars_Arab', 'ary_Arab', 'arz_Arab', 'asm_Beng', 'ast_Latn', 'awa_Deva', 'ayr_Latn', 'azb_Arab', 'azj_Latn', 'bak_Cyrl', 'bam_Latn', 'ban_Latn', 'bel_Cyrl', 'bem_Latn', 'ben_Beng', 'bho_Deva', 'bjn_Arab', 'bjn_Latn', 'bod_Tibt', 'bos_Latn', 'bug_Latn', 'bul_Cyrl', 'cat_Latn', 'ceb_Latn', 'ces_Latn', 'cjk_Latn', 'ckb_Arab', 'crh_Latn', 'cym_Latn', 'dan_Latn', 'deu_Latn', 'dik_Latn', 'dyu_Latn', 'dzo_Tibt', 'ell_Grek', 'eng_Latn', 'epo_Latn', 'est_Latn', 'eus_Latn', 'ewe_Latn', 'fao_Latn', 'pes_Arab', 'fij_Latn', 'fin_Latn', 'fon_Latn', 'fra_Latn', 'fur_Latn', 'fuv_Latn', 'gla_Latn', 'gle_Latn', 'glg_Latn', 'grn_Latn', 'guj_Gujr', 'hat_Latn', 'hau_Latn', 'heb_Hebr', 'hin_Deva', 'hne_Deva', 'hrv_Latn', 'hun_Latn', 'hye_Armn', 'ibo_Latn', 'ilo_Latn', 'ind_Latn', 'isl_Latn', 'ita_Latn', 'jav_Latn', 'jpn_Jpan', 'kab_Latn', 'kac_Latn', 'kam_Latn', 'kan_Knda', 'kas_Arab', 'kas_Deva', 'kat_Geor', 'knc_Arab', 'knc_Latn', 'kaz_Cyrl', 'kbp_Latn', 'kea_Latn', 'khm_Khmr', 'kik_Latn', 'kin_Latn', 'kir_Cyrl', 'kmb_Latn', 'kon_Latn', 'kor_Hang', 'kmr_Latn', 'lao_Laoo', 'lvs_Latn', 'lij_Latn', 'lim_Latn', 'lin_Latn', 'lit_Latn', 'lmo_Latn', 'ltg_Latn', 'ltz_Latn', 'lua_Latn', 'lug_Latn', 'luo_Latn', 'lus_Latn', 'mag_Deva', 'mai_Deva', 'mal_Mlym', 'mar_Deva', 'min_Latn', 'mkd_Cyrl', 'plt_Latn', 'mlt_Latn', 'mni_Beng', 'khk_Cyrl', 'mos_Latn', 'mri_Latn', 'zsm_Latn', 'mya_Mymr', 'nld_Latn', 'nno_Latn', 'nob_Latn', 'npi_Deva', 'nso_Latn', 'nus_Latn', 'nya_Latn', 'oci_Latn', 'gaz_Latn', 'ory_Orya', 'pag_Latn', 'pan_Guru', 'pap_Latn', 'pol_Latn', 'por_Latn', 'prs_Arab', 'pbt_Arab', 'quy_Latn', 'ron_Latn', 'run_Latn', 'rus_Cyrl', 'sag_Latn', 'san_Deva', 'sat_Beng', 'scn_Latn', 'shn_Mymr', 'sin_Sinh', 'slk_Latn', 'slv_Latn', 'smo_Latn', 'sna_Latn', 'snd_Arab', 'som_Latn', 'sot_Latn', 'spa_Latn', 'als_Latn', 'srd_Latn', 'srp_Cyrl', 'ssw_Latn', 'sun_Latn', 'swe_Latn', 'swh_Latn', 'szl_Latn', 'tam_Taml', 'tat_Cyrl', 'tel_Telu', 'tgk_Cyrl', 'tgl_Latn', 'tha_Thai', 'tir_Ethi', 'taq_Latn', 'taq_Tfng', 'tpi_Latn', 'tsn_Latn', 'tso_Latn', 'tuk_Latn', 'tum_Latn', 'tur_Latn', 'twi_Latn', 'tzm_Tfng', 'uig_Arab', 'ukr_Cyrl', 'umb_Latn', 'urd_Arab', 'uzn_Latn', 'vec_Latn', 'vie_Latn', 'war_Latn', 'wol_Latn', 'xho_Latn', 'ydd_Hebr', 'yor_Latn', 'yue_Hant', 'zho_Hans', 'zho_Hant', 'zul_Latn']
class lowerCAmelCase__ ( a):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE = ["input_ids", "attention_mask"]
__SCREAMING_SNAKE_CASE = NllbTokenizer
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = []
def __init__( self , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase="<s>" , __lowerCamelCase="</s>" , __lowerCamelCase="</s>" , __lowerCamelCase="<s>" , __lowerCamelCase="<unk>" , __lowerCamelCase="<pad>" , __lowerCamelCase="<mask>" , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=False , **__lowerCamelCase , ) -> Tuple:
# Mask token behave like a normal word, i.e. include the space before it
_A : Any = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase) if isinstance(__lowerCamelCase , __lowerCamelCase) else mask_token
_A : Optional[int] = legacy_behaviour
super().__init__(
vocab_file=__lowerCamelCase , tokenizer_file=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , sep_token=__lowerCamelCase , cls_token=__lowerCamelCase , unk_token=__lowerCamelCase , pad_token=__lowerCamelCase , mask_token=__lowerCamelCase , src_lang=__lowerCamelCase , tgt_lang=__lowerCamelCase , additional_special_tokens=__lowerCamelCase , legacy_behaviour=__lowerCamelCase , **__lowerCamelCase , )
_A : int = vocab_file
_A : Optional[Any] = False if not self.vocab_file else True
_A : Tuple = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens])
self.add_special_tokens({"additional_special_tokens": _additional_special_tokens})
_A : Union[str, Any] = {
lang_code: self.convert_tokens_to_ids(__lowerCamelCase) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
_A : Optional[int] = src_lang if src_lang is not None else "eng_Latn"
_A : Union[str, Any] = self.convert_tokens_to_ids(self._src_lang)
_A : List[str] = tgt_lang
self.set_src_lang_special_tokens(self._src_lang)
@property
def _lowerCamelCase ( self) -> str:
return self._src_lang
@src_lang.setter
def _lowerCamelCase ( self , __lowerCamelCase) -> None:
_A : Tuple = new_src_lang
self.set_src_lang_special_tokens(self._src_lang)
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = None) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = None) -> List[int]:
_A : Tuple = [self.sep_token_id]
_A : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase) -> Optional[int]:
if src_lang is None or tgt_lang is None:
raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model")
_A : List[Any] = src_lang
_A : Optional[int] = self(__lowerCamelCase , add_special_tokens=__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase)
_A : Tuple = self.convert_tokens_to_ids(__lowerCamelCase)
_A : Tuple = tgt_lang_id
return inputs
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = "eng_Latn" , __lowerCamelCase = None , __lowerCamelCase = "fra_Latn" , **__lowerCamelCase , ) -> BatchEncoding:
_A : Tuple = src_lang
_A : int = tgt_lang
return super().prepare_seqaseq_batch(__lowerCamelCase , __lowerCamelCase , **__lowerCamelCase)
def _lowerCamelCase ( self) -> str:
return self.set_src_lang_special_tokens(self.src_lang)
def _lowerCamelCase ( self) -> List[str]:
return self.set_tgt_lang_special_tokens(self.tgt_lang)
def _lowerCamelCase ( self , __lowerCamelCase) -> None:
_A : Dict = self.convert_tokens_to_ids(__lowerCamelCase)
if self.legacy_behaviour:
_A : List[str] = []
_A : Dict = [self.eos_token_id, self.cur_lang_code]
else:
_A : Tuple = [self.cur_lang_code]
_A : Optional[Any] = [self.eos_token_id]
_A : Optional[int] = self.convert_ids_to_tokens(self.prefix_tokens)
_A : int = self.convert_ids_to_tokens(self.suffix_tokens)
_A : List[Any] = processors.TemplateProcessing(
single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens)) , )
def _lowerCamelCase ( self , __lowerCamelCase) -> None:
_A : Optional[Any] = self.convert_tokens_to_ids(__lowerCamelCase)
if self.legacy_behaviour:
_A : Tuple = []
_A : Any = [self.eos_token_id, self.cur_lang_code]
else:
_A : Union[str, Any] = [self.cur_lang_code]
_A : str = [self.eos_token_id]
_A : Optional[Any] = self.convert_ids_to_tokens(self.prefix_tokens)
_A : Dict = self.convert_ids_to_tokens(self.suffix_tokens)
_A : Union[str, Any] = processors.TemplateProcessing(
single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens)) , )
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = None) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer.")
if not os.path.isdir(__lowerCamelCase):
logger.error(F"Vocabulary path ({save_directory}) should be a directory.")
return
_A : Dict = os.path.join(
__lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
if os.path.abspath(self.vocab_file) != os.path.abspath(__lowerCamelCase):
copyfile(self.vocab_file , __lowerCamelCase)
return (out_vocab_file,)
| 11 | 0 |
import logging
import os
import sys
from pathlib import Path
from unittest.mock import patch
from parameterized import parameterized
from run_eval import run_generate
from run_eval_search import run_search
from transformers.testing_utils import CaptureStdout, TestCasePlus, slow
from utils import ROUGE_KEYS
logging.basicConfig(level=logging.DEBUG)
snake_case_ : str = logging.getLogger()
def A (__A : Path , __A : list ) -> Tuple:
"""simple docstring"""
UpperCAmelCase_ = '''\n'''.join(__A )
Path(__A ).open('''w''' ).writelines(__A )
snake_case_ : Union[str, Any] = "patrickvonplaten/t5-tiny-random"
snake_case_ : int = "sshleifer/bart-tiny-random"
snake_case_ : List[str] = "sshleifer/tiny-mbart"
snake_case_ : int = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
logging.disable(logging.CRITICAL) # remove noisy download output from tracebacks
class __snake_case ( a ):
def lowerCamelCase ( self : List[str] , _snake_case : int):
"""simple docstring"""
UpperCAmelCase_ = Path(self.get_auto_remove_tmp_dir()) / '''utest_input.source'''
UpperCAmelCase_ = input_file_name.parent / '''utest_output.txt'''
assert not output_file_name.exists()
UpperCAmelCase_ = [''' New York (CNN)When Liana Barrientos was 23 years old, she got married in Westchester County.''']
_dump_articles(_snake_case , _snake_case)
UpperCAmelCase_ = str(Path(self.get_auto_remove_tmp_dir()) / '''scores.json''')
UpperCAmelCase_ = '''translation_en_to_de''' if model == T5_TINY else '''summarization'''
UpperCAmelCase_ = F"""
run_eval_search.py
{model}
{input_file_name}
{output_file_name}
--score_path {score_path}
--task {task}
--num_beams 2
--length_penalty 2.0
""".split()
with patch.object(_snake_case , '''argv''' , _snake_case):
run_generate()
assert Path(_snake_case).exists()
# os.remove(Path(output_file_name))
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
self.run_eval_tester(_snake_case)
@parameterized.expand([BART_TINY, MBART_TINY])
@slow
def lowerCamelCase ( self : List[Any] , _snake_case : Tuple):
"""simple docstring"""
self.run_eval_tester(_snake_case)
@parameterized.expand([T5_TINY, MBART_TINY])
@slow
def lowerCamelCase ( self : str , _snake_case : List[Any]):
"""simple docstring"""
UpperCAmelCase_ = Path(self.get_auto_remove_tmp_dir()) / '''utest_input.source'''
UpperCAmelCase_ = input_file_name.parent / '''utest_output.txt'''
assert not output_file_name.exists()
UpperCAmelCase_ = {
'''en''': ['''Machine learning is great, isn\'t it?''', '''I like to eat bananas''', '''Tomorrow is another great day!'''],
'''de''': [
'''Maschinelles Lernen ist großartig, oder?''',
'''Ich esse gerne Bananen''',
'''Morgen ist wieder ein toller Tag!''',
],
}
UpperCAmelCase_ = Path(self.get_auto_remove_tmp_dir())
UpperCAmelCase_ = str(tmp_dir / '''scores.json''')
UpperCAmelCase_ = str(tmp_dir / '''val.target''')
_dump_articles(_snake_case , text['''en'''])
_dump_articles(_snake_case , text['''de'''])
UpperCAmelCase_ = '''translation_en_to_de''' if model == T5_TINY else '''summarization'''
UpperCAmelCase_ = F"""
run_eval_search.py
{model}
{str(_snake_case)}
{str(_snake_case)}
--score_path {score_path}
--reference_path {reference_path}
--task {task}
""".split()
testargs.extend(['''--search''', '''num_beams=1:2 length_penalty=0.9:1.0'''])
with patch.object(_snake_case , '''argv''' , _snake_case):
with CaptureStdout() as cs:
run_search()
UpperCAmelCase_ = [''' num_beams | length_penalty''', model, '''Best score args''']
UpperCAmelCase_ = ['''Info''']
if "translation" in task:
expected_strings.append('''bleu''')
else:
expected_strings.extend(_snake_case)
for w in expected_strings:
assert w in cs.out
for w in un_expected_strings:
assert w not in cs.out
assert Path(_snake_case).exists()
os.remove(Path(_snake_case))
| 51 |
# flake8: noqa
# Lint as: python3
from typing import Dict, List, Optional, Type
from .. import config
from ..utils import logging
from .formatting import (
ArrowFormatter,
CustomFormatter,
Formatter,
PandasFormatter,
PythonFormatter,
TensorFormatter,
format_table,
query_table,
)
from .np_formatter import NumpyFormatter
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {}
lowerCAmelCase__ = {}
lowerCAmelCase__ = {}
def _UpperCAmelCase (UpperCamelCase__ : type , UpperCamelCase__ : Optional[str] , UpperCamelCase__ : Optional[List[str]] = None , ):
_A : Union[str, Any] = aliases if aliases is not None else []
if format_type in _FORMAT_TYPES:
logger.warning(
f"Overwriting format type '{format_type}' ({_FORMAT_TYPES[format_type].__name__} -> {formatter_cls.__name__})" )
_A : Dict = formatter_cls
for alias in set(aliases + [format_type] ):
if alias in _FORMAT_TYPES_ALIASES:
logger.warning(
f"Overwriting format type alias '{alias}' ({_FORMAT_TYPES_ALIASES[alias]} -> {format_type})" )
_A : Dict = format_type
def _UpperCAmelCase (UpperCamelCase__ : Exception , UpperCamelCase__ : Optional[str] , UpperCamelCase__ : Optional[List[str]] = None ):
_A : Union[str, Any] = aliases if aliases is not None else []
for alias in set(aliases + [format_type] ):
_A : Union[str, Any] = unavailable_error
# Here we define all the available formatting functions that can be used by `Dataset.set_format`
_register_formatter(PythonFormatter, None, aliases=['python'])
_register_formatter(ArrowFormatter, 'arrow', aliases=['pa', 'pyarrow'])
_register_formatter(NumpyFormatter, 'numpy', aliases=['np'])
_register_formatter(PandasFormatter, 'pandas', aliases=['pd'])
_register_formatter(CustomFormatter, 'custom')
if config.TORCH_AVAILABLE:
from .torch_formatter import TorchFormatter
_register_formatter(TorchFormatter, 'torch', aliases=['pt', 'pytorch'])
else:
lowerCAmelCase__ = ValueError('PyTorch needs to be installed to be able to return PyTorch tensors.')
_register_unavailable_formatter(_torch_error, 'torch', aliases=['pt', 'pytorch'])
if config.TF_AVAILABLE:
from .tf_formatter import TFFormatter
_register_formatter(TFFormatter, 'tensorflow', aliases=['tf'])
else:
lowerCAmelCase__ = ValueError('Tensorflow needs to be installed to be able to return Tensorflow tensors.')
_register_unavailable_formatter(_tf_error, 'tensorflow', aliases=['tf'])
if config.JAX_AVAILABLE:
from .jax_formatter import JaxFormatter
_register_formatter(JaxFormatter, 'jax', aliases=[])
else:
lowerCAmelCase__ = ValueError('JAX needs to be installed to be able to return JAX arrays.')
_register_unavailable_formatter(_jax_error, 'jax', aliases=[])
def _UpperCAmelCase (UpperCamelCase__ : Optional[str] ):
if format_type in _FORMAT_TYPES_ALIASES:
return _FORMAT_TYPES_ALIASES[format_type]
else:
return format_type
def _UpperCAmelCase (UpperCamelCase__ : Optional[str] , **UpperCamelCase__ : List[Any] ):
_A : List[str] = get_format_type_from_alias(UpperCamelCase__ )
if format_type in _FORMAT_TYPES:
return _FORMAT_TYPES[format_type](**UpperCamelCase__ )
if format_type in _FORMAT_TYPES_ALIASES_UNAVAILABLE:
raise _FORMAT_TYPES_ALIASES_UNAVAILABLE[format_type]
else:
raise ValueError(
f"Return type should be None or selected in {list(type for type in _FORMAT_TYPES.keys() if type != None )}, but got '{format_type}'" )
| 11 | 0 |
import os
import warnings
from typing import List, Optional
from ...tokenization_utils_base import BatchEncoding
from ...utils import logging
from .configuration_rag import RagConfig
__lowerCamelCase : Dict = logging.get_logger(__name__)
class A__ :
def __init__( self , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : int = question_encoder
UpperCamelCase : int = generator
UpperCamelCase : Optional[int] = self.question_encoder
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
if os.path.isfile(A_ ):
raise ValueError(F"""Provided path ({save_directory}) should be a directory, not a file""" )
os.makedirs(A_ , exist_ok=A_ )
UpperCamelCase : List[Any] = os.path.join(A_ , "question_encoder_tokenizer" )
UpperCamelCase : Union[str, Any] = os.path.join(A_ , "generator_tokenizer" )
self.question_encoder.save_pretrained(A_ )
self.generator.save_pretrained(A_ )
@classmethod
def __UpperCamelCase( cls , A_ , **A_ ):
'''simple docstring'''
from ..auto.tokenization_auto import AutoTokenizer
UpperCamelCase : str = kwargs.pop("config" , A_ )
if config is None:
UpperCamelCase : List[Any] = RagConfig.from_pretrained(A_ )
UpperCamelCase : Tuple = AutoTokenizer.from_pretrained(
A_ , config=config.question_encoder , subfolder="question_encoder_tokenizer" )
UpperCamelCase : str = AutoTokenizer.from_pretrained(
A_ , config=config.generator , subfolder="generator_tokenizer" )
return cls(question_encoder=A_ , generator=A_ )
def __call__( self , *A_ , **A_ ):
'''simple docstring'''
return self.current_tokenizer(*A_ , **A_ )
def __UpperCamelCase( self , *A_ , **A_ ):
'''simple docstring'''
return self.generator.batch_decode(*A_ , **A_ )
def __UpperCamelCase( self , *A_ , **A_ ):
'''simple docstring'''
return self.generator.decode(*A_ , **A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : str = self.question_encoder
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : int = self.generator
def __UpperCamelCase( self , A_ , A_ = None , A_ = None , A_ = None , A_ = "longest" , A_ = None , A_ = True , **A_ , ):
'''simple docstring'''
warnings.warn(
"`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the "
"regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` "
"context manager to prepare your targets. See the documentation of your specific tokenizer for more "
"details" , A_ , )
if max_length is None:
UpperCamelCase : str = self.current_tokenizer.model_max_length
UpperCamelCase : Dict = self(
A_ , add_special_tokens=A_ , return_tensors=A_ , max_length=A_ , padding=A_ , truncation=A_ , **A_ , )
if tgt_texts is None:
return model_inputs
# Process tgt_texts
if max_target_length is None:
UpperCamelCase : Optional[int] = self.current_tokenizer.model_max_length
UpperCamelCase : str = self(
text_target=A_ , add_special_tokens=A_ , return_tensors=A_ , padding=A_ , max_length=A_ , truncation=A_ , **A_ , )
UpperCamelCase : List[str] = labels["input_ids"]
return model_inputs
| 52 |
def _UpperCAmelCase (UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Union[str, Any] ):
# "extended trapezoidal rule"
# int(f) = dx/2 * (f1 + 2f2 + ... + fn)
_A : int = (boundary[1] - boundary[0]) / steps
_A : Any = boundary[0]
_A : List[Any] = boundary[1]
_A : str = make_points(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
_A : str = 0.0
y += (h / 2.0) * f(UpperCamelCase__ )
for i in x_i:
# print(i)
y += h * f(UpperCamelCase__ )
y += (h / 2.0) * f(UpperCamelCase__ )
return y
def _UpperCAmelCase (UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Any ):
_A : Optional[int] = a + h
while x < (b - h):
yield x
_A : Dict = x + h
def _UpperCAmelCase (UpperCamelCase__ : Optional[int] ): # enter your function here
_A : Any = (x - 0) * (x - 0)
return y
def _UpperCAmelCase ():
_A : Optional[Any] = 0.0 # Lower bound of integration
_A : Optional[int] = 1.0 # Upper bound of integration
_A : List[Any] = 10.0 # define number of steps or resolution
_A : Any = [a, b] # define boundary of integration
_A : Tuple = method_a(UpperCamelCase__ , UpperCamelCase__ )
print(f"y = {y}" )
if __name__ == "__main__":
main()
| 11 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_torch_available
from ...utils import OptionalDependencyNotAvailable
a__ : Optional[int] ={
'''configuration_gpt_neox_japanese''': ['''GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTNeoXJapaneseConfig'''],
'''tokenization_gpt_neox_japanese''': ['''GPTNeoXJapaneseTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : str =[
'''GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GPTNeoXJapaneseForCausalLM''',
'''GPTNeoXJapaneseLayer''',
'''GPTNeoXJapaneseModel''',
'''GPTNeoXJapanesePreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_gpt_neox_japanese import GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXJapaneseConfig
from .tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox_japanese import (
GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXJapaneseForCausalLM,
GPTNeoXJapaneseLayer,
GPTNeoXJapaneseModel,
GPTNeoXJapanesePreTrainedModel,
)
else:
import sys
a__ : Optional[int] =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 53 |
import copy
import tempfile
import unittest
from huggingface_hub import HfFolder, delete_repo
from parameterized import parameterized
from requests.exceptions import HTTPError
from transformers import AutoConfig, GenerationConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
class lowerCAmelCase__ ( unittest.TestCase):
'''simple docstring'''
@parameterized.expand([(None,), ("foo.json",)])
def _lowerCamelCase ( self , __lowerCamelCase) -> List[str]:
_A : str = GenerationConfig(
do_sample=__lowerCamelCase , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(__lowerCamelCase , config_name=__lowerCamelCase)
_A : Tuple = GenerationConfig.from_pretrained(__lowerCamelCase , config_name=__lowerCamelCase)
# Checks parameters that were specified
self.assertEqual(loaded_config.do_sample , __lowerCamelCase)
self.assertEqual(loaded_config.temperature , 0.7)
self.assertEqual(loaded_config.length_penalty , 1.0)
self.assertEqual(loaded_config.bad_words_ids , [[1, 2, 3], [4, 5]])
# Checks parameters that were not specified (defaults)
self.assertEqual(loaded_config.top_k , 5_0)
self.assertEqual(loaded_config.max_length , 2_0)
self.assertEqual(loaded_config.max_time , __lowerCamelCase)
def _lowerCamelCase ( self) -> Optional[int]:
_A : Optional[int] = AutoConfig.from_pretrained("gpt2")
_A : int = GenerationConfig.from_model_config(__lowerCamelCase)
_A : List[Any] = GenerationConfig()
# The generation config has loaded a few non-default parameters from the model config
self.assertNotEqual(__lowerCamelCase , __lowerCamelCase)
# One of those parameters is eos_token_id -- check if it matches
self.assertNotEqual(generation_config_from_model.eos_token_id , default_generation_config.eos_token_id)
self.assertEqual(generation_config_from_model.eos_token_id , model_config.eos_token_id)
def _lowerCamelCase ( self) -> Optional[Any]:
_A : Optional[Any] = GenerationConfig()
_A : List[Any] = {
"max_new_tokens": 1_0_2_4,
"foo": "bar",
}
_A : List[str] = copy.deepcopy(__lowerCamelCase)
_A : int = generation_config.update(**__lowerCamelCase)
# update_kwargs was not modified (no side effects)
self.assertEqual(__lowerCamelCase , __lowerCamelCase)
# update_kwargs was used to update the config on valid attributes
self.assertEqual(generation_config.max_new_tokens , 1_0_2_4)
# `.update()` returns a dictionary of unused kwargs
self.assertEqual(__lowerCamelCase , {"foo": "bar"})
def _lowerCamelCase ( self) -> Any:
_A : int = GenerationConfig()
_A : int = "bar"
with tempfile.TemporaryDirectory("test-generation-config") as tmp_dir:
generation_config.save_pretrained(__lowerCamelCase)
_A : Any = GenerationConfig.from_pretrained(__lowerCamelCase)
# update_kwargs was used to update the config on valid attributes
self.assertEqual(new_config.foo , "bar")
_A : Optional[Any] = GenerationConfig.from_model_config(__lowerCamelCase)
assert not hasattr(__lowerCamelCase , "foo") # no new kwargs should be initialized if from config
def _lowerCamelCase ( self) -> List[str]:
_A : Union[str, Any] = GenerationConfig()
self.assertEqual(default_config.temperature , 1.0)
self.assertEqual(default_config.do_sample , __lowerCamelCase)
self.assertEqual(default_config.num_beams , 1)
_A : Optional[int] = GenerationConfig(
do_sample=__lowerCamelCase , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
self.assertEqual(config.temperature , 0.7)
self.assertEqual(config.do_sample , __lowerCamelCase)
self.assertEqual(config.num_beams , 1)
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(__lowerCamelCase)
_A : Optional[int] = GenerationConfig.from_pretrained(__lowerCamelCase , temperature=1.0)
self.assertEqual(loaded_config.temperature , 1.0)
self.assertEqual(loaded_config.do_sample , __lowerCamelCase)
self.assertEqual(loaded_config.num_beams , 1) # default value
@is_staging_test
class lowerCAmelCase__ ( unittest.TestCase):
'''simple docstring'''
@classmethod
def _lowerCamelCase ( cls) -> Optional[int]:
_A : Dict = TOKEN
HfFolder.save_token(__lowerCamelCase)
@classmethod
def _lowerCamelCase ( cls) -> List[Any]:
try:
delete_repo(token=cls._token , repo_id="test-generation-config")
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-generation-config-org")
except HTTPError:
pass
def _lowerCamelCase ( self) -> Any:
_A : Optional[int] = GenerationConfig(
do_sample=__lowerCamelCase , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub("test-generation-config" , use_auth_token=self._token)
_A : Union[str, Any] = GenerationConfig.from_pretrained(F"{USER}/test-generation-config")
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__lowerCamelCase , getattr(__lowerCamelCase , __lowerCamelCase))
# Reset repo
delete_repo(token=self._token , repo_id="test-generation-config")
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
__lowerCamelCase , repo_id="test-generation-config" , push_to_hub=__lowerCamelCase , use_auth_token=self._token)
_A : Optional[Any] = GenerationConfig.from_pretrained(F"{USER}/test-generation-config")
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__lowerCamelCase , getattr(__lowerCamelCase , __lowerCamelCase))
def _lowerCamelCase ( self) -> Union[str, Any]:
_A : Union[str, Any] = GenerationConfig(
do_sample=__lowerCamelCase , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub("valid_org/test-generation-config-org" , use_auth_token=self._token)
_A : int = GenerationConfig.from_pretrained("valid_org/test-generation-config-org")
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__lowerCamelCase , getattr(__lowerCamelCase , __lowerCamelCase))
# Reset repo
delete_repo(token=self._token , repo_id="valid_org/test-generation-config-org")
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
__lowerCamelCase , repo_id="valid_org/test-generation-config-org" , push_to_hub=__lowerCamelCase , use_auth_token=self._token)
_A : Optional[int] = GenerationConfig.from_pretrained("valid_org/test-generation-config-org")
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__lowerCamelCase , getattr(__lowerCamelCase , __lowerCamelCase))
| 11 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a__ : int = {
'''configuration_convbert''': ['''CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ConvBertConfig''', '''ConvBertOnnxConfig'''],
'''tokenization_convbert''': ['''ConvBertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : List[Any] = ['''ConvBertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : int = [
'''CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ConvBertForMaskedLM''',
'''ConvBertForMultipleChoice''',
'''ConvBertForQuestionAnswering''',
'''ConvBertForSequenceClassification''',
'''ConvBertForTokenClassification''',
'''ConvBertLayer''',
'''ConvBertModel''',
'''ConvBertPreTrainedModel''',
'''load_tf_weights_in_convbert''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Optional[int] = [
'''TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFConvBertForMaskedLM''',
'''TFConvBertForMultipleChoice''',
'''TFConvBertForQuestionAnswering''',
'''TFConvBertForSequenceClassification''',
'''TFConvBertForTokenClassification''',
'''TFConvBertLayer''',
'''TFConvBertModel''',
'''TFConvBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig
from .tokenization_convbert import ConvBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_convbert_fast import ConvBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convbert import (
CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvBertForMaskedLM,
ConvBertForMultipleChoice,
ConvBertForQuestionAnswering,
ConvBertForSequenceClassification,
ConvBertForTokenClassification,
ConvBertLayer,
ConvBertModel,
ConvBertPreTrainedModel,
load_tf_weights_in_convbert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convbert import (
TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertLayer,
TFConvBertModel,
TFConvBertPreTrainedModel,
)
else:
import sys
a__ : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 54 |
import pickle
import numpy as np
from matplotlib import pyplot as plt
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=0.2 , __lowerCamelCase=0.2) -> str:
_A : Optional[int] = bp_numa
_A : Dict = bp_numa
_A : Tuple = bp_numa
_A : List[str] = conva_get[:2]
_A : Tuple = conva_get[2]
_A : Optional[int] = size_pa
_A : Optional[Any] = rate_w
_A : Optional[Any] = rate_t
_A : Union[str, Any] = [
np.mat(-1 * np.random.rand(self.conva[0] , self.conva[0]) + 0.5)
for i in range(self.conva[1])
]
_A : int = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa) + 0.5)
_A : Dict = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa) + 0.5)
_A : Any = -2 * np.random.rand(self.conva[1]) + 1
_A : Optional[int] = -2 * np.random.rand(self.num_bpa) + 1
_A : Optional[Any] = -2 * np.random.rand(self.num_bpa) + 1
def _lowerCamelCase ( self , __lowerCamelCase) -> Dict:
# save model dict with pickle
_A : Dict = {
"num_bp1": self.num_bpa,
"num_bp2": self.num_bpa,
"num_bp3": self.num_bpa,
"conv1": self.conva,
"step_conv1": self.step_conva,
"size_pooling1": self.size_poolinga,
"rate_weight": self.rate_weight,
"rate_thre": self.rate_thre,
"w_conv1": self.w_conva,
"wkj": self.wkj,
"vji": self.vji,
"thre_conv1": self.thre_conva,
"thre_bp2": self.thre_bpa,
"thre_bp3": self.thre_bpa,
}
with open(__lowerCamelCase , "wb") as f:
pickle.dump(__lowerCamelCase , __lowerCamelCase)
print(F"Model saved: {save_path}")
@classmethod
def _lowerCamelCase ( cls , __lowerCamelCase) -> Any:
# read saved model
with open(__lowerCamelCase , "rb") as f:
_A : Any = pickle.load(__lowerCamelCase) # noqa: S301
_A : Optional[int] = model_dic.get("conv1")
conv_get.append(model_dic.get("step_conv1"))
_A : str = model_dic.get("size_pooling1")
_A : List[str] = model_dic.get("num_bp1")
_A : Union[str, Any] = model_dic.get("num_bp2")
_A : List[Any] = model_dic.get("num_bp3")
_A : Dict = model_dic.get("rate_weight")
_A : List[Any] = model_dic.get("rate_thre")
# create model instance
_A : str = CNN(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase)
# modify model parameter
_A : List[Any] = model_dic.get("w_conv1")
_A : Union[str, Any] = model_dic.get("wkj")
_A : str = model_dic.get("vji")
_A : List[str] = model_dic.get("thre_conv1")
_A : Optional[Any] = model_dic.get("thre_bp2")
_A : Dict = model_dic.get("thre_bp3")
return conv_ins
def _lowerCamelCase ( self , __lowerCamelCase) -> Dict:
return 1 / (1 + np.exp(-1 * x))
def _lowerCamelCase ( self , __lowerCamelCase) -> Union[str, Any]:
return round(__lowerCamelCase , 3)
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase) -> Union[str, Any]:
# convolution process
_A : Tuple = convs[0]
_A : Union[str, Any] = convs[1]
_A : List[Any] = np.shape(__lowerCamelCase)[0]
# get the data slice of original image data, data_focus
_A : Tuple = []
for i_focus in range(0 , size_data - size_conv + 1 , __lowerCamelCase):
for j_focus in range(0 , size_data - size_conv + 1 , __lowerCamelCase):
_A : Optional[int] = data[
i_focus : i_focus + size_conv, j_focus : j_focus + size_conv
]
data_focus.append(__lowerCamelCase)
# calculate the feature map of every single kernel, and saved as list of matrix
_A : Optional[Any] = []
_A : Optional[int] = int((size_data - size_conv) / conv_step + 1)
for i_map in range(__lowerCamelCase):
_A : Optional[int] = []
for i_focus in range(len(__lowerCamelCase)):
_A : Any = (
np.sum(np.multiply(data_focus[i_focus] , w_convs[i_map]))
- thre_convs[i_map]
)
featuremap.append(self.sig(__lowerCamelCase))
_A : Optional[Any] = np.asmatrix(__lowerCamelCase).reshape(
__lowerCamelCase , __lowerCamelCase)
data_featuremap.append(__lowerCamelCase)
# expanding the data slice to One dimenssion
_A : Optional[Any] = []
for each_focus in data_focus:
focusa_list.extend(self.Expand_Mat(__lowerCamelCase))
_A : Dict = np.asarray(__lowerCamelCase)
return focus_list, data_featuremap
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase="average_pool") -> Dict:
# pooling process
_A : Optional[Any] = len(featuremaps[0])
_A : str = int(size_map / size_pooling)
_A : Optional[int] = []
for i_map in range(len(__lowerCamelCase)):
_A : int = featuremaps[i_map]
_A : Optional[int] = []
for i_focus in range(0 , __lowerCamelCase , __lowerCamelCase):
for j_focus in range(0 , __lowerCamelCase , __lowerCamelCase):
_A : str = feature_map[
i_focus : i_focus + size_pooling,
j_focus : j_focus + size_pooling,
]
if pooling_type == "average_pool":
# average pooling
map_pooled.append(np.average(__lowerCamelCase))
elif pooling_type == "max_pooling":
# max pooling
map_pooled.append(np.max(__lowerCamelCase))
_A : Tuple = np.asmatrix(__lowerCamelCase).reshape(__lowerCamelCase , __lowerCamelCase)
featuremap_pooled.append(__lowerCamelCase)
return featuremap_pooled
def _lowerCamelCase ( self , __lowerCamelCase) -> Tuple:
# expanding three dimension data to one dimension list
_A : Tuple = []
for i in range(len(__lowerCamelCase)):
_A : Union[str, Any] = np.shape(data[i])
_A : List[Any] = data[i].reshape(1 , shapes[0] * shapes[1])
_A : Optional[Any] = data_listed.getA().tolist()[0]
data_expanded.extend(__lowerCamelCase)
_A : Optional[Any] = np.asarray(__lowerCamelCase)
return data_expanded
def _lowerCamelCase ( self , __lowerCamelCase) -> Union[str, Any]:
# expanding matrix to one dimension list
_A : List[Any] = np.asarray(__lowerCamelCase)
_A : Union[str, Any] = np.shape(__lowerCamelCase)
_A : Dict = data_mat.reshape(1 , shapes[0] * shapes[1])
return data_expanded
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase) -> Optional[int]:
_A : Dict = []
_A : Any = 0
for i_map in range(__lowerCamelCase):
_A : Union[str, Any] = np.ones((size_map, size_map))
for i in range(0 , __lowerCamelCase , __lowerCamelCase):
for j in range(0 , __lowerCamelCase , __lowerCamelCase):
_A : List[Any] = pd_pool[
i_pool
]
_A : Tuple = i_pool + 1
_A : Optional[Any] = np.multiply(
__lowerCamelCase , np.multiply(out_map[i_map] , (1 - out_map[i_map])))
pd_all.append(__lowerCamelCase)
return pd_all
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=bool) -> Union[str, Any]:
# model traning
print("----------------------Start Training-------------------------")
print((" - - Shape: Train_Data ", np.shape(__lowerCamelCase)))
print((" - - Shape: Teach_Data ", np.shape(__lowerCamelCase)))
_A : Tuple = 0
_A : Dict = []
_A : Optional[Any] = 1_0_0_0_0
while rp < n_repeat and mse >= error_accuracy:
_A : Union[str, Any] = 0
print(F"-------------Learning Time {rp}--------------")
for p in range(len(__lowerCamelCase)):
# print('------------Learning Image: %d--------------'%p)
_A : str = np.asmatrix(datas_train[p])
_A : Union[str, Any] = np.asarray(datas_teach[p])
_A , _A : Any = self.convolute(
__lowerCamelCase , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
_A : Optional[Any] = self.pooling(__lowerCamelCase , self.size_poolinga)
_A : Optional[int] = np.shape(__lowerCamelCase)
_A : List[str] = self._expand(__lowerCamelCase)
_A : Tuple = data_bp_input
_A : int = np.dot(__lowerCamelCase , self.vji.T) - self.thre_bpa
_A : List[Any] = self.sig(__lowerCamelCase)
_A : Union[str, Any] = np.dot(__lowerCamelCase , self.wkj.T) - self.thre_bpa
_A : List[str] = self.sig(__lowerCamelCase)
# --------------Model Leaning ------------------------
# calculate error and gradient---------------
_A : int = np.multiply(
(data_teach - bp_outa) , np.multiply(__lowerCamelCase , (1 - bp_outa)))
_A : Optional[Any] = np.multiply(
np.dot(__lowerCamelCase , self.wkj) , np.multiply(__lowerCamelCase , (1 - bp_outa)))
_A : Union[str, Any] = np.dot(__lowerCamelCase , self.vji)
_A : Any = pd_i_all / (self.size_poolinga * self.size_poolinga)
_A : Dict = pd_conva_pooled.T.getA().tolist()
_A : Optional[Any] = self._calculate_gradient_from_pool(
__lowerCamelCase , __lowerCamelCase , shape_featuremapa[0] , shape_featuremapa[1] , self.size_poolinga , )
# weight and threshold learning process---------
# convolution layer
for k_conv in range(self.conva[1]):
_A : int = self._expand_mat(pd_conva_all[k_conv])
_A : Optional[int] = self.rate_weight * np.dot(__lowerCamelCase , __lowerCamelCase)
_A : List[Any] = self.w_conva[k_conv] + delta_w.reshape(
(self.conva[0], self.conva[0]))
_A : Any = (
self.thre_conva[k_conv]
- np.sum(pd_conva_all[k_conv]) * self.rate_thre
)
# all connected layer
_A : Tuple = self.wkj + pd_k_all.T * bp_outa * self.rate_weight
_A : int = self.vji + pd_j_all.T * bp_outa * self.rate_weight
_A : Tuple = self.thre_bpa - pd_k_all * self.rate_thre
_A : List[str] = self.thre_bpa - pd_j_all * self.rate_thre
# calculate the sum error of all single image
_A : Optional[int] = np.sum(abs(data_teach - bp_outa))
error_count += errors
# print(' ----Teach ',data_teach)
# print(' ----BP_output ',bp_out3)
_A : Any = rp + 1
_A : Dict = error_count / patterns
all_mse.append(__lowerCamelCase)
def draw_error():
_A : Optional[int] = [error_accuracy for i in range(int(n_repeat * 1.2))]
plt.plot(__lowerCamelCase , "+-")
plt.plot(__lowerCamelCase , "r--")
plt.xlabel("Learning Times")
plt.ylabel("All_mse")
plt.grid(__lowerCamelCase , alpha=0.5)
plt.show()
print("------------------Training Complished---------------------")
print((" - - Training epoch: ", rp, F" - - Mse: {mse:.6f}"))
if draw_e:
draw_error()
return mse
def _lowerCamelCase ( self , __lowerCamelCase) -> int:
# model predict
_A : Union[str, Any] = []
print("-------------------Start Testing-------------------------")
print((" - - Shape: Test_Data ", np.shape(__lowerCamelCase)))
for p in range(len(__lowerCamelCase)):
_A : int = np.asmatrix(datas_test[p])
_A , _A : List[Any] = self.convolute(
__lowerCamelCase , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
_A : str = self.pooling(__lowerCamelCase , self.size_poolinga)
_A : Optional[int] = self._expand(__lowerCamelCase)
_A : List[Any] = data_bp_input
_A : Optional[int] = bp_outa * self.vji.T - self.thre_bpa
_A : int = self.sig(__lowerCamelCase)
_A : int = bp_outa * self.wkj.T - self.thre_bpa
_A : Optional[int] = self.sig(__lowerCamelCase)
produce_out.extend(bp_outa.getA().tolist())
_A : int = [list(map(self.do_round , __lowerCamelCase)) for each in produce_out]
return np.asarray(__lowerCamelCase)
def _lowerCamelCase ( self , __lowerCamelCase) -> Dict:
# return the data of image after convoluting process so we can check it out
_A : Optional[int] = np.asmatrix(__lowerCamelCase)
_A , _A : Tuple = self.convolute(
__lowerCamelCase , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
_A : Union[str, Any] = self.pooling(__lowerCamelCase , self.size_poolinga)
return data_conveda, data_pooleda
if __name__ == "__main__":
pass
| 11 | 0 |
'''simple docstring'''
from operator import delitem, getitem, setitem
import pytest
from data_structures.hashing.hash_map import HashMap
def __snake_case ( UpperCAmelCase_ : List[str] ):
return getitem, k
def __snake_case ( UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : str ):
return setitem, k, v
def __snake_case ( UpperCAmelCase_ : Optional[Any] ):
return delitem, k
def __snake_case ( UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Tuple , *UpperCAmelCase_ : Optional[int] ):
try:
return fun(UpperCAmelCase_ , *UpperCAmelCase_ ), None
except Exception as e:
return None, e
a_ : List[str] = (
_set("""key_a""", """val_a"""),
_set("""key_b""", """val_b"""),
)
a_ : int = [
_set("""key_a""", """val_a"""),
_set("""key_a""", """val_b"""),
]
a_ : Tuple = [
_set("""key_a""", """val_a"""),
_set("""key_b""", """val_b"""),
_del("""key_a"""),
_del("""key_b"""),
_set("""key_a""", """val_a"""),
_del("""key_a"""),
]
a_ : Optional[Any] = [
_get("""key_a"""),
_del("""key_a"""),
_set("""key_a""", """val_a"""),
_del("""key_a"""),
_del("""key_a"""),
_get("""key_a"""),
]
a_ : int = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
]
a_ : List[Any] = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
*[_del(x) for x in range(5)],
_set("""key_a""", """val_b"""),
]
@pytest.mark.parametrize(
"operations" , (
pytest.param(_add_items , id="add items" ),
pytest.param(_overwrite_items , id="overwrite items" ),
pytest.param(_delete_items , id="delete items" ),
pytest.param(_access_absent_items , id="access absent items" ),
pytest.param(_add_with_resize_up , id="add with resize up" ),
pytest.param(_add_with_resize_down , id="add with resize down" ),
) , )
def __snake_case ( UpperCAmelCase_ : int ):
lowerCamelCase_ = HashMap(initial_block_size=4 )
lowerCamelCase_ = {}
for _, (fun, *args) in enumerate(UpperCAmelCase_ ):
lowerCamelCase_ ,lowerCamelCase_ = _run_operation(UpperCAmelCase_ , UpperCAmelCase_ , *UpperCAmelCase_ )
lowerCamelCase_ ,lowerCamelCase_ = _run_operation(UpperCAmelCase_ , UpperCAmelCase_ , *UpperCAmelCase_ )
assert my_res == py_res
assert str(UpperCAmelCase_ ) == str(UpperCAmelCase_ )
assert set(UpperCAmelCase_ ) == set(UpperCAmelCase_ )
assert len(UpperCAmelCase_ ) == len(UpperCAmelCase_ )
assert set(my.items() ) == set(py.items() )
def __snake_case ( ):
def is_public(UpperCAmelCase_ : str ) -> bool:
return not name.startswith("_" )
lowerCamelCase_ = {name for name in dir({} ) if is_public(UpperCAmelCase_ )}
lowerCamelCase_ = {name for name in dir(HashMap() ) if is_public(UpperCAmelCase_ )}
assert dict_public_names > hash_public_names
| 55 |
import re
from flax.core.frozen_dict import freeze
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.experimental import PartitionSpec as P
# Sentinels
lowerCAmelCase__ = object()
# For specifying empty leaf dict `{}`
lowerCAmelCase__ = object()
def _UpperCAmelCase (UpperCamelCase__ : str , UpperCamelCase__ : Union[str, Any] ):
_A : str = tuple((re.compile(x + "$" ) for x in qs) )
for i in range(len(UpperCamelCase__ ) - len(UpperCamelCase__ ) + 1 ):
_A : Tuple = [x.match(UpperCamelCase__ ) for x, y in zip(UpperCamelCase__ , ks[i:] )]
if matches and all(UpperCamelCase__ ):
return True
return False
def _UpperCAmelCase (UpperCamelCase__ : str ):
def replace(UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[int] ):
for rule, replacement in rules:
if _match(UpperCamelCase__ , UpperCamelCase__ ):
return replacement
return val
return replace
def _UpperCAmelCase ():
return [
# embeddings
(("transformer", "wpe", "embedding"), P("mp" , UpperCamelCase__ )),
(("transformer", "wte", "embedding"), P("mp" , UpperCamelCase__ )),
# atention
(("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(UpperCamelCase__ , "mp" )),
(("attention", "out_proj", "kernel"), P("mp" , UpperCamelCase__ )),
(("attention", "out_proj", "bias"), None),
# mlp
(("mlp", "c_fc", "kernel"), P(UpperCamelCase__ , "mp" )),
(("mlp", "c_fc", "bias"), P("mp" )),
(("mlp", "c_proj", "kernel"), P("mp" , UpperCamelCase__ )),
(("mlp", "c_proj", "bias"), None),
# layer norms
((r"ln_\d+", "bias"), None),
((r"\d+", r"ln_\d+", "scale"), None),
(("ln_f", "bias"), None),
(("ln_f", "scale"), None),
]
def _UpperCAmelCase (UpperCamelCase__ : List[str] ):
_A : int = _get_partition_rules()
_A : Optional[int] = _replacement_rules(UpperCamelCase__ )
_A : Optional[int] = {k: _unmatched for k in flatten_dict(UpperCamelCase__ )}
_A : List[str] = {k: replace(UpperCamelCase__ , UpperCamelCase__ ) for k, v in initd.items()}
assert _unmatched not in result.values(), "Incomplete partition spec."
return freeze(unflatten_dict(UpperCamelCase__ ) )
| 11 | 0 |
'''simple docstring'''
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase = None, ) -> str:
'''simple docstring'''
snake_case_ = {}
if train_file is not None:
snake_case_ = [train_file]
if eval_file is not None:
snake_case_ = [eval_file]
if test_file is not None:
snake_case_ = [test_file]
snake_case_ = datasets.load_dataset('''csv''', data_files=__UpperCAmelCase )
snake_case_ = list(ds[list(files.keys() )[0]].features.keys() )
snake_case_ = features_name.pop(__UpperCAmelCase )
snake_case_ = list(set(ds[list(files.keys() )[0]][label_name] ) )
snake_case_ = {label: i for i, label in enumerate(__UpperCAmelCase )}
snake_case_ = tokenizer.model_input_names
snake_case_ = {}
if len(__UpperCAmelCase ) == 1:
for k in files.keys():
snake_case_ = ds[k].map(
lambda __UpperCAmelCase : tokenizer.batch_encode_plus(
example[features_name[0]], truncation=__UpperCAmelCase, max_length=__UpperCAmelCase, padding='''max_length''' ), batched=__UpperCAmelCase, )
elif len(__UpperCAmelCase ) == 2:
for k in files.keys():
snake_case_ = ds[k].map(
lambda __UpperCAmelCase : tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]), truncation=__UpperCAmelCase, max_length=__UpperCAmelCase, padding='''max_length''', ), batched=__UpperCAmelCase, )
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
snake_case_ = {k: v for k, v in ex.items() if k in input_names}
snake_case_ = labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
snake_case_ = {k: v for k, v in ex.items() if k in input_names}
snake_case_ = labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
snake_case_ = {k: v for k, v in ex.items() if k in input_names}
snake_case_ = labelaid[ex[label_name]]
yield (d, label)
snake_case_ = (
tf.data.Dataset.from_generator(
__UpperCAmelCase, ({k: tf.intaa for k in input_names}, tf.intaa), ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )), )
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
snake_case_ = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) )
snake_case_ = (
tf.data.Dataset.from_generator(
__UpperCAmelCase, ({k: tf.intaa for k in input_names}, tf.intaa), ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )), )
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
snake_case_ = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) )
snake_case_ = (
tf.data.Dataset.from_generator(
__UpperCAmelCase, ({k: tf.intaa for k in input_names}, tf.intaa), ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )), )
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
snake_case_ = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) )
return train_ds, val_ds, test_ds, labelaid
a : Tuple = logging.getLogger(__name__)
@dataclass
class a :
snake_case_ = field(metadata={"help": "Which column contains the label"} )
snake_case_ = field(default=_lowerCamelCase , metadata={"help": "The path of the training file"} )
snake_case_ = field(default=_lowerCamelCase , metadata={"help": "The path of the development file"} )
snake_case_ = field(default=_lowerCamelCase , metadata={"help": "The path of the test file"} )
snake_case_ = field(
default=128 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
snake_case_ = field(
default=_lowerCamelCase , metadata={"help": "Overwrite the cached training and evaluation sets"} )
@dataclass
class a :
snake_case_ = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
snake_case_ = field(
default=_lowerCamelCase , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
snake_case_ = field(
default=_lowerCamelCase , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
snake_case_ = field(default=_lowerCamelCase , metadata={"help": "Set this flag to use fast tokenization."} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
snake_case_ = field(
default=_lowerCamelCase , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
def __magic_name__ ( ) -> Tuple:
'''simple docstring'''
snake_case_ = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) )
snake_case_ ,snake_case_ ,snake_case_ = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F"Output directory ({training_args.output_dir}) already exists and is not empty. Use"
''' --overwrite_output_dir to overcome.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO, )
logger.info(
F"n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, "
F"16-bits training: {training_args.fpaa}" )
logger.info(F"Training/evaluation parameters {training_args}" )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
snake_case_ = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, )
snake_case_ ,snake_case_ ,snake_case_ ,snake_case_ = get_tfds(
train_file=data_args.train_file, eval_file=data_args.dev_file, test_file=data_args.test_file, tokenizer=__UpperCAmelCase, label_column_id=data_args.label_column_id, max_seq_length=data_args.max_seq_length, )
snake_case_ = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path, num_labels=len(__UpperCAmelCase ), labelaid=__UpperCAmelCase, idalabel={id: label for label, id in labelaid.items()}, finetuning_task='''text-classification''', cache_dir=model_args.cache_dir, )
with training_args.strategy.scope():
snake_case_ = TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path, from_pt=bool('''.bin''' in model_args.model_name_or_path ), config=__UpperCAmelCase, cache_dir=model_args.cache_dir, )
def compute_metrics(__UpperCAmelCase ) -> Dict:
snake_case_ = np.argmax(p.predictions, axis=1 )
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
snake_case_ = TFTrainer(
model=__UpperCAmelCase, args=__UpperCAmelCase, train_dataset=__UpperCAmelCase, eval_dataset=__UpperCAmelCase, compute_metrics=__UpperCAmelCase, )
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
snake_case_ = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
snake_case_ = trainer.evaluate()
snake_case_ = os.path.join(training_args.output_dir, '''eval_results.txt''' )
with open(__UpperCAmelCase, '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key, value in result.items():
logger.info(F" {key} = {value}" )
writer.write(F"{key} = {value}\n" )
results.update(__UpperCAmelCase )
return results
if __name__ == "__main__":
main()
| 56 |
def _UpperCAmelCase (UpperCamelCase__ : str , UpperCamelCase__ : bool = False ):
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
_A : Optional[Any] = f"Expected string as input, found {type(UpperCamelCase__ )}"
raise ValueError(UpperCamelCase__ )
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
_A : Union[str, Any] = f"Expected boolean as use_pascal parameter, found {type(UpperCamelCase__ )}"
raise ValueError(UpperCamelCase__ )
_A : int = input_str.split("_" )
_A : str = 0 if use_pascal else 1
_A : str = words[start_index:]
_A : Optional[Any] = [word[0].upper() + word[1:] for word in words_to_capitalize]
_A : Any = "" if use_pascal else words[0]
return "".join([initial_word, *capitalized_words] )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 11 | 0 |
"""simple docstring"""
A : int = "\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n"
A : List[Any] = [{"type": "code", "content": INSTALL_CONTENT}]
A : List[str] = {
"{processor_class}": "FakeProcessorClass",
"{model_class}": "FakeModelClass",
"{object_class}": "FakeObjectClass",
}
| 57 |
from __future__ import annotations
def _UpperCAmelCase (UpperCamelCase__ : list[int] , UpperCamelCase__ : list[int] , UpperCamelCase__ : int ):
_A : Dict = list(range(len(UpperCamelCase__ ) ) )
_A : Any = [v / w for v, w in zip(UpperCamelCase__ , UpperCamelCase__ )]
index.sort(key=lambda UpperCamelCase__ : ratio[i] , reverse=UpperCamelCase__ )
_A : float = 0
_A : list[float] = [0] * len(UpperCamelCase__ )
for i in index:
if weight[i] <= capacity:
_A : Union[str, Any] = 1
max_value += value[i]
capacity -= weight[i]
else:
_A : Optional[Any] = capacity / weight[i]
max_value += value[i] * capacity / weight[i]
break
return max_value, fractions
if __name__ == "__main__":
import doctest
doctest.testmod()
| 11 | 0 |
'''simple docstring'''
from .testing import (
are_the_same_tensors,
execute_subprocess_async,
require_bnb,
require_cpu,
require_cuda,
require_huggingface_suite,
require_mps,
require_multi_gpu,
require_multi_xpu,
require_safetensors,
require_single_gpu,
require_single_xpu,
require_torch_min_version,
require_tpu,
require_xpu,
skip,
slow,
)
from .training import RegressionDataset, RegressionModel, RegressionModelaXPU
from .scripts import test_script, test_sync, test_ops # isort: skip
| 58 |
import warnings
from ...utils import logging
from .image_processing_beit import BeitImageProcessor
lowerCAmelCase__ = logging.get_logger(__name__)
class lowerCAmelCase__ ( a):
'''simple docstring'''
def __init__( self , *__lowerCamelCase , **__lowerCamelCase) -> None:
warnings.warn(
"The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use BeitImageProcessor instead." , __lowerCamelCase , )
super().__init__(*__lowerCamelCase , **__lowerCamelCase)
| 11 | 0 |
import argparse
import logging
import os
import datasets
import tensorflow as tf
from transformers import AutoTokenizer
__lowerCamelCase = logging.getLogger(__name__)
def UpperCamelCase ( ):
snake_case : Tuple = argparse.ArgumentParser(
description="Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset." )
parser.add_argument(
"--dataset_name" , type=__lowerCamelCase , default="wikitext" , help="Name of the training. Explore datasets at: hf.co/datasets." , )
parser.add_argument(
"--dataset_config" , type=__lowerCamelCase , default="wikitext-103-raw-v1" , help="Configuration name of the dataset." )
parser.add_argument(
"--tokenizer_name_or_path" , type=__lowerCamelCase , default="sayakpaul/unigram-tokenizer-wikitext" , help="Tokenizer identifier. Can be a local filepath or a Hub identifier." , )
parser.add_argument(
"--shard_size" , type=__lowerCamelCase , default=1000 , help="Number of entries to go in a single shard." , )
parser.add_argument("--split" , type=__lowerCamelCase , default="train" , choices=["train", "test", "validation"] )
parser.add_argument(
"--limit" , default=__lowerCamelCase , type=__lowerCamelCase , help="Limit the number of shards (used for debugging)." , )
parser.add_argument(
"--max_length" , type=__lowerCamelCase , default=512 , help="Maximum sequence length. For training on TPUs, it helps to have a maximum"
" sequence length that is a multiple of 8." , )
parser.add_argument(
"--output_dir" , default="tf-tpu" , type=__lowerCamelCase , help="Output directory where the TFRecord shards will be saved. If the"
" path is appended with `gs://` ('gs://tf-tpu', for example) then the TFRecord"
" shards will be directly saved to a Google Cloud Storage bucket." , )
snake_case : Dict = parser.parse_args()
return args
def UpperCamelCase ( __lowerCamelCase : Union[str, Any] ):
def fn(__lowerCamelCase : Union[str, Any] ):
return tokenizer(examples["text"] )
return fn
def UpperCamelCase ( __lowerCamelCase : Optional[int] ):
snake_case : Optional[int] = []
for i in range(len(tokenized_data["input_ids"] ) ):
snake_case : str = {
"input_ids": tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data["input_ids"][i] ) ),
"attention_mask": tf.train.Feature(
intaa_list=tf.train.IntaaList(value=tokenized_data["attention_mask"][i] ) ),
}
snake_case : Optional[int] = tf.train.Features(feature=__lowerCamelCase )
snake_case : List[str] = tf.train.Example(features=__lowerCamelCase )
snake_case : Union[str, Any] = example.SerializeToString()
records.append(__lowerCamelCase )
return records
def UpperCamelCase ( __lowerCamelCase : List[Any] ):
snake_case : Any = datasets.load_dataset(args.dataset_name , args.dataset_config , split=args.split )
if args.limit is not None:
snake_case : Optional[Any] = min(len(__lowerCamelCase ) , args.limit )
snake_case : Tuple = dataset.select(range(__lowerCamelCase ) )
print(f"""Limiting the dataset to {args.limit} entries.""" )
snake_case : Tuple = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path )
# Handle output directory creation.
# For serializing into a Google Cloud Storage Bucket, one needs to first
# create a bucket.
if "gs" not in args.output_dir:
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
snake_case : List[Any] = os.path.join(args.output_dir , args.split )
if not os.path.exists(__lowerCamelCase ):
os.makedirs(__lowerCamelCase )
else:
snake_case : Tuple = os.path.join(args.output_dir , args.split )
# Tokenize the whole dataset at once.
snake_case : Any = tokenize_function(__lowerCamelCase )
snake_case : Optional[Any] = dataset.map(__lowerCamelCase , batched=__lowerCamelCase , num_proc=4 , remove_columns=["text"] )
# We need to concatenate all our texts together, and then split the result
# into chunks of a fixed size, which we will call block_size. To do this, we
# will use the map method again, with the option batched=True. When we use batched=True,
# the function we pass to map() will be passed multiple inputs at once, allowing us
# to group them into more or fewer examples than we had in the input.
# This allows us to create our new fixed-length samples. The advantage of this
# method is that we don't lose a whole lot of content from the dataset compared to the
# case where we simply tokenize with a pre-defined max_length.
def group_texts(__lowerCamelCase : List[str] ):
# Concatenate all texts.
snake_case : Optional[Any] = {k: sum(examples[k] , [] ) for k in examples.keys()}
snake_case : str = len(concatenated_examples[list(examples.keys() )[0]] )
# We drop the small remainder, though you could add padding instead if the model supports it
# In this, as in all things, we advise you to follow your heart 🫀
snake_case : Any = (total_length // args.max_length) * args.max_length
# Split by chunks of max_len.
snake_case : Union[str, Any] = {
k: [t[i : i + args.max_length] for i in range(0 , __lowerCamelCase , args.max_length )]
for k, t in concatenated_examples.items()
}
return result
snake_case : Any = dataset_tokenized.map(__lowerCamelCase , batched=__lowerCamelCase , batch_size=1000 , num_proc=4 )
snake_case : Dict = 0
snake_case : Optional[int] = 0
for shard in range(0 , len(__lowerCamelCase ) , args.shard_size ):
snake_case : int = grouped_dataset[shard : shard + args.shard_size]
snake_case : Any = len(dataset_snapshot["input_ids"] )
snake_case : List[Any] = os.path.join(__lowerCamelCase , f"""dataset-{shard_count}-{records_containing}.tfrecord""" )
snake_case : Tuple = get_serialized_examples(__lowerCamelCase )
with tf.io.TFRecordWriter(__lowerCamelCase ) as out_file:
for i in range(len(__lowerCamelCase ) ):
snake_case : Dict = serialized_examples[i]
out_file.write(__lowerCamelCase )
print("Wrote file {} containing {} records".format(__lowerCamelCase , __lowerCamelCase ) )
shard_count += 1
total_records += records_containing
with open(f"""split-{args.split}-records-count.txt""" , "w" ) as f:
print(f"""Total {args.split} records: {total_records}""" , file=__lowerCamelCase )
if __name__ == "__main__":
__lowerCamelCase = parse_args()
main(args)
| 59 |
import collections
import inspect
import unittest
from typing import Dict, List, Tuple
from transformers import MaskFormerSwinConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device
from transformers.utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MaskFormerSwinBackbone
from transformers.models.maskformer import MaskFormerSwinModel
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self , __lowerCamelCase , __lowerCamelCase=1_3 , __lowerCamelCase=3_2 , __lowerCamelCase=2 , __lowerCamelCase=3 , __lowerCamelCase=1_6 , __lowerCamelCase=[1, 2, 1] , __lowerCamelCase=[2, 2, 4] , __lowerCamelCase=2 , __lowerCamelCase=2.0 , __lowerCamelCase=True , __lowerCamelCase=0.0 , __lowerCamelCase=0.0 , __lowerCamelCase=0.1 , __lowerCamelCase="gelu" , __lowerCamelCase=False , __lowerCamelCase=True , __lowerCamelCase=0.0_2 , __lowerCamelCase=1e-5 , __lowerCamelCase=True , __lowerCamelCase=None , __lowerCamelCase=True , __lowerCamelCase=1_0 , __lowerCamelCase=8 , __lowerCamelCase=["stage1", "stage2", "stage3"] , __lowerCamelCase=[1, 2, 3] , ) -> Optional[Any]:
_A : int = parent
_A : Optional[Any] = batch_size
_A : str = image_size
_A : Tuple = patch_size
_A : Tuple = num_channels
_A : Optional[int] = embed_dim
_A : Dict = depths
_A : Any = num_heads
_A : Any = window_size
_A : int = mlp_ratio
_A : Any = qkv_bias
_A : Union[str, Any] = hidden_dropout_prob
_A : Optional[Any] = attention_probs_dropout_prob
_A : Dict = drop_path_rate
_A : List[Any] = hidden_act
_A : Any = use_absolute_embeddings
_A : Optional[int] = patch_norm
_A : Tuple = layer_norm_eps
_A : List[str] = initializer_range
_A : Optional[int] = is_training
_A : Optional[Any] = scope
_A : Optional[int] = use_labels
_A : Dict = type_sequence_label_size
_A : str = encoder_stride
_A : Optional[int] = out_features
_A : Optional[int] = out_indices
def _lowerCamelCase ( self) -> Union[str, Any]:
_A : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
_A : Optional[Any] = None
if self.use_labels:
_A : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size)
_A : Optional[int] = self.get_config()
return config, pixel_values, labels
def _lowerCamelCase ( self) -> Union[str, Any]:
return MaskFormerSwinConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase) -> List[Any]:
_A : Dict = MaskFormerSwinModel(config=__lowerCamelCase)
model.to(__lowerCamelCase)
model.eval()
_A : int = model(__lowerCamelCase)
_A : Any = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths) - 1))
_A : List[str] = int(config.embed_dim * 2 ** (len(config.depths) - 1))
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim))
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase) -> Dict:
_A : Optional[Any] = MaskFormerSwinBackbone(config=__lowerCamelCase)
model.to(__lowerCamelCase)
model.eval()
_A : Dict = model(__lowerCamelCase)
# verify feature maps
self.parent.assertEqual(len(result.feature_maps) , len(config.out_features))
self.parent.assertListEqual(list(result.feature_maps[0].shape) , [1_3, 1_6, 1_6, 1_6])
# verify channels
self.parent.assertEqual(len(model.channels) , len(config.out_features))
self.parent.assertListEqual(model.channels , [1_6, 3_2, 6_4])
# verify ValueError
with self.parent.assertRaises(__lowerCamelCase):
_A : Union[str, Any] = ["stem"]
_A : Union[str, Any] = MaskFormerSwinBackbone(config=__lowerCamelCase)
def _lowerCamelCase ( self) -> Dict:
_A : Any = self.prepare_config_and_inputs()
_A , _A , _A : List[Any] = config_and_inputs
_A : Optional[int] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class lowerCAmelCase__ ( a , a , unittest.TestCase):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = (
(
MaskFormerSwinModel,
MaskFormerSwinBackbone,
)
if is_torch_available()
else ()
)
__SCREAMING_SNAKE_CASE = {"feature-extraction": MaskFormerSwinModel} if is_torch_available() else {}
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
def _lowerCamelCase ( self) -> str:
_A : Union[str, Any] = MaskFormerSwinModelTester(self)
_A : Optional[int] = ConfigTester(self , config_class=__lowerCamelCase , embed_dim=3_7)
@require_torch_multi_gpu
@unittest.skip(
reason=(
"`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn't work well with"
" `nn.DataParallel`"
))
def _lowerCamelCase ( self) -> Union[str, Any]:
pass
def _lowerCamelCase ( self) -> int:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _lowerCamelCase ( self) -> str:
return
def _lowerCamelCase ( self) -> List[Any]:
_A : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase)
def _lowerCamelCase ( self) -> Union[str, Any]:
_A : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*__lowerCamelCase)
@unittest.skip("Swin does not use inputs_embeds")
def _lowerCamelCase ( self) -> str:
pass
@unittest.skip("Swin does not support feedforward chunking")
def _lowerCamelCase ( self) -> List[Any]:
pass
def _lowerCamelCase ( self) -> Optional[int]:
_A , _A : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A : Union[str, Any] = model_class(__lowerCamelCase)
self.assertIsInstance(model.get_input_embeddings() , (nn.Module))
_A : Dict = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__lowerCamelCase , nn.Linear))
def _lowerCamelCase ( self) -> Any:
_A , _A : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A : int = model_class(__lowerCamelCase)
_A : Optional[int] = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_A : int = [*signature.parameters.keys()]
_A : Optional[int] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __lowerCamelCase)
@unittest.skip(reason="MaskFormerSwin is only used as backbone and doesn't support output_attentions")
def _lowerCamelCase ( self) -> Tuple:
pass
@unittest.skip(reason="MaskFormerSwin is only used as an internal backbone")
def _lowerCamelCase ( self) -> str:
pass
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase) -> Optional[int]:
_A : Any = model_class(__lowerCamelCase)
model.to(__lowerCamelCase)
model.eval()
with torch.no_grad():
_A : str = model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase))
_A : Tuple = outputs.hidden_states
_A : Any = getattr(
self.model_tester , "expected_num_hidden_layers" , len(self.model_tester.depths) + 1)
self.assertEqual(len(__lowerCamelCase) , __lowerCamelCase)
# Swin has a different seq_length
_A : Optional[int] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable)
else (config.patch_size, config.patch_size)
)
_A : str = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:]) , [num_patches, self.model_tester.embed_dim] , )
def _lowerCamelCase ( self) -> Dict:
_A , _A : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
_A : Any = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable)
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
_A : List[Any] = True
self.check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_A : Optional[int] = True
self.check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase)
def _lowerCamelCase ( self) -> Tuple:
_A , _A : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
_A : Optional[int] = 3
_A : Dict = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable)
else (self.model_tester.image_size, self.model_tester.image_size)
)
_A : Optional[int] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable)
else (config.patch_size, config.patch_size)
)
_A : int = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
_A : Dict = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
_A : List[Any] = True
self.check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , (padded_height, padded_width))
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_A : Union[str, Any] = True
self.check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , (padded_height, padded_width))
@unittest.skip(reason="MaskFormerSwin doesn't have pretrained checkpoints")
def _lowerCamelCase ( self) -> List[str]:
pass
@unittest.skip(reason="This will be fixed once MaskFormerSwin is replaced by native Swin")
def _lowerCamelCase ( self) -> List[str]:
pass
@unittest.skip(reason="This will be fixed once MaskFormerSwin is replaced by native Swin")
def _lowerCamelCase ( self) -> str:
pass
def _lowerCamelCase ( self) -> Optional[Any]:
_A , _A : Any = self.model_tester.prepare_config_and_inputs_for_common()
def set_nan_tensor_to_zero(__lowerCamelCase):
_A : Optional[int] = 0
return t
def check_equivalence(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase={}):
with torch.no_grad():
_A : Any = model(**__lowerCamelCase , return_dict=__lowerCamelCase , **__lowerCamelCase)
_A : int = model(**__lowerCamelCase , return_dict=__lowerCamelCase , **__lowerCamelCase).to_tuple()
def recursive_check(__lowerCamelCase , __lowerCamelCase):
if isinstance(__lowerCamelCase , (List, Tuple)):
for tuple_iterable_value, dict_iterable_value in zip(__lowerCamelCase , __lowerCamelCase):
recursive_check(__lowerCamelCase , __lowerCamelCase)
elif isinstance(__lowerCamelCase , __lowerCamelCase):
for tuple_iterable_value, dict_iterable_value in zip(
tuple_object.values() , dict_object.values()):
recursive_check(__lowerCamelCase , __lowerCamelCase)
elif tuple_object is None:
return
else:
self.assertTrue(
torch.allclose(
set_nan_tensor_to_zero(__lowerCamelCase) , set_nan_tensor_to_zero(__lowerCamelCase) , atol=1e-5) , msg=(
"Tuple and dict output are not equal. Difference:"
F" {torch.max(torch.abs(tuple_object - dict_object))}. Tuple has `nan`:"
F" {torch.isnan(__lowerCamelCase).any()} and `inf`: {torch.isinf(__lowerCamelCase)}. Dict has"
F" `nan`: {torch.isnan(__lowerCamelCase).any()} and `inf`: {torch.isinf(__lowerCamelCase)}."
) , )
recursive_check(__lowerCamelCase , __lowerCamelCase)
for model_class in self.all_model_classes:
_A : List[Any] = model_class(__lowerCamelCase)
model.to(__lowerCamelCase)
model.eval()
_A : str = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase)
_A : Tuple = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase)
check_equivalence(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase)
_A : Any = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase)
_A : List[Any] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase)
check_equivalence(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase)
_A : List[Any] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase)
_A : str = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase)
check_equivalence(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , {"output_hidden_states": True})
_A : Union[str, Any] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase)
_A : Optional[Any] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase)
check_equivalence(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , {"output_hidden_states": True})
@require_torch
class lowerCAmelCase__ ( unittest.TestCase , a):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = (MaskFormerSwinBackbone,) if is_torch_available() else ()
__SCREAMING_SNAKE_CASE = MaskFormerSwinConfig
def _lowerCamelCase ( self) -> Optional[Any]:
_A : Tuple = MaskFormerSwinModelTester(self)
def _lowerCamelCase ( self) -> Optional[Any]:
_A , _A : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
_A : Union[str, Any] = inputs_dict["pixel_values"].shape[0]
for backbone_class in self.all_model_classes:
_A : Optional[Any] = backbone_class(__lowerCamelCase)
backbone.to(__lowerCamelCase)
backbone.eval()
_A : List[Any] = backbone(**__lowerCamelCase)
# Test default outputs and verify feature maps
self.assertIsInstance(outputs.feature_maps , __lowerCamelCase)
self.assertTrue(len(outputs.feature_maps) == len(backbone.channels))
for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels):
self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels))
self.assertIsNone(outputs.hidden_states)
self.assertIsNone(outputs.attentions)
# Test output_hidden_states=True
_A : List[str] = backbone(**__lowerCamelCase , output_hidden_states=__lowerCamelCase)
self.assertIsNotNone(outputs.hidden_states)
self.assertTrue(len(outputs.hidden_states) , len(backbone.stage_names))
# We skip the stem layer
for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels):
for hidden_state in hidden_states:
# Hidden states are in the format (batch_size, (height * width), n_channels)
_A , _A , _A : List[str] = hidden_state.shape
self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels))
# Test output_attentions=True
if self.has_attentions:
_A : int = backbone(**__lowerCamelCase , output_attentions=__lowerCamelCase)
self.assertIsNotNone(outputs.attentions)
| 11 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case__ : List[str] = logging.get_logger(__name__)
snake_case__ : Tuple = {
'''microsoft/markuplm-base''': '''https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json''',
'''microsoft/markuplm-large''': '''https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json''',
}
class snake_case_( a__ ):
__UpperCamelCase = '''markuplm'''
def __init__( self : Tuple , UpperCamelCase_ : Optional[int]=3_0_5_2_2 , UpperCamelCase_ : Any=7_6_8 , UpperCamelCase_ : str=1_2 , UpperCamelCase_ : Dict=1_2 , UpperCamelCase_ : str=3_0_7_2 , UpperCamelCase_ : Any="gelu" , UpperCamelCase_ : Tuple=0.1 , UpperCamelCase_ : int=0.1 , UpperCamelCase_ : Optional[int]=5_1_2 , UpperCamelCase_ : List[Any]=2 , UpperCamelCase_ : Tuple=0.02 , UpperCamelCase_ : int=1E-12 , UpperCamelCase_ : Union[str, Any]=0 , UpperCamelCase_ : Tuple=0 , UpperCamelCase_ : Optional[int]=2 , UpperCamelCase_ : Optional[int]=2_5_6 , UpperCamelCase_ : Tuple=1_0_2_4 , UpperCamelCase_ : Any=2_1_6 , UpperCamelCase_ : int=1_0_0_1 , UpperCamelCase_ : Union[str, Any]=3_2 , UpperCamelCase_ : Dict=5_0 , UpperCamelCase_ : Optional[int]="absolute" , UpperCamelCase_ : Dict=True , UpperCamelCase_ : List[Any]=None , **UpperCamelCase_ : str , ):
super().__init__(
pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , **UpperCamelCase_ , )
lowerCAmelCase : List[Any] = vocab_size
lowerCAmelCase : Tuple = hidden_size
lowerCAmelCase : Optional[Any] = num_hidden_layers
lowerCAmelCase : int = num_attention_heads
lowerCAmelCase : str = hidden_act
lowerCAmelCase : Dict = intermediate_size
lowerCAmelCase : List[str] = hidden_dropout_prob
lowerCAmelCase : Tuple = attention_probs_dropout_prob
lowerCAmelCase : List[Any] = max_position_embeddings
lowerCAmelCase : str = type_vocab_size
lowerCAmelCase : Dict = initializer_range
lowerCAmelCase : Union[str, Any] = layer_norm_eps
lowerCAmelCase : str = position_embedding_type
lowerCAmelCase : str = use_cache
lowerCAmelCase : Tuple = classifier_dropout
# additional properties
lowerCAmelCase : List[str] = max_depth
lowerCAmelCase : Optional[int] = max_xpath_tag_unit_embeddings
lowerCAmelCase : str = max_xpath_subs_unit_embeddings
lowerCAmelCase : List[str] = tag_pad_id
lowerCAmelCase : int = subs_pad_id
lowerCAmelCase : int = xpath_unit_hidden_size
| 60 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_torch_available,
)
lowerCAmelCase__ = {
'configuration_speecht5': [
'SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP',
'SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP',
'SpeechT5Config',
'SpeechT5HifiGanConfig',
],
'feature_extraction_speecht5': ['SpeechT5FeatureExtractor'],
'processing_speecht5': ['SpeechT5Processor'],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['SpeechT5Tokenizer']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST',
'SpeechT5ForSpeechToText',
'SpeechT5ForSpeechToSpeech',
'SpeechT5ForTextToSpeech',
'SpeechT5Model',
'SpeechT5PreTrainedModel',
'SpeechT5HifiGan',
]
if TYPE_CHECKING:
from .configuration_speechta import (
SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP,
SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP,
SpeechTaConfig,
SpeechTaHifiGanConfig,
)
from .feature_extraction_speechta import SpeechTaFeatureExtractor
from .processing_speechta import SpeechTaProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speechta import SpeechTaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speechta import (
SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaHifiGan,
SpeechTaModel,
SpeechTaPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 11 | 0 |
"""simple docstring"""
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
_a = False
class A_ (unittest.TestCase ):
'''simple docstring'''
pass
@nightly
@require_torch_gpu
class A_ (unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ ( self ):
"""simple docstring"""
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = VersatileDiffusionPipeline.from_pretrained("shi-labs/versatile-diffusion" , torch_dtype=torch.floataa )
pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
UpperCAmelCase_ : List[str] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg" )
UpperCAmelCase_ : List[str] = torch.manual_seed(0 )
UpperCAmelCase_ : int = pipe.dual_guided(
prompt="first prompt" , image=lowercase_ , text_to_image_strength=0.75 , generator=lowercase_ , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(lowercase_ )
UpperCAmelCase_ : Tuple = VersatileDiffusionPipeline.from_pretrained(lowercase_ , torch_dtype=torch.floataa )
pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
UpperCAmelCase_ : Optional[int] = generator.manual_seed(0 )
UpperCAmelCase_ : List[Any] = pipe.dual_guided(
prompt="first prompt" , image=lowercase_ , text_to_image_strength=0.75 , generator=lowercase_ , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" , ).images
assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass"
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : List[str] = VersatileDiffusionPipeline.from_pretrained("shi-labs/versatile-diffusion" , torch_dtype=torch.floataa )
pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
UpperCAmelCase_ : Union[str, Any] = "cyberpunk 2077"
UpperCAmelCase_ : Optional[Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg" )
UpperCAmelCase_ : List[Any] = torch.manual_seed(0 )
UpperCAmelCase_ : Tuple = pipe.dual_guided(
prompt=lowercase_ , image=lowercase_ , text_to_image_strength=0.75 , generator=lowercase_ , guidance_scale=7.5 , num_inference_steps=50 , output_type="numpy" , ).images
UpperCAmelCase_ : Union[str, Any] = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
UpperCAmelCase_ : Optional[Any] = np.array([0.14_48, 0.16_19, 0.17_41, 0.10_86, 0.11_47, 0.11_28, 0.11_99, 0.11_65, 0.10_01] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
UpperCAmelCase_ : Any = "A painting of a squirrel eating a burger "
UpperCAmelCase_ : Dict = torch.manual_seed(0 )
UpperCAmelCase_ : Tuple = pipe.text_to_image(
prompt=lowercase_ , generator=lowercase_ , guidance_scale=7.5 , num_inference_steps=50 , output_type="numpy" ).images
UpperCAmelCase_ : List[str] = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
UpperCAmelCase_ : int = np.array([0.33_67, 0.31_69, 0.26_56, 0.38_70, 0.47_90, 0.37_96, 0.40_09, 0.48_78, 0.47_78] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
UpperCAmelCase_ : Optional[Any] = pipe.image_variation(lowercase_ , generator=lowercase_ , output_type="numpy" ).images
UpperCAmelCase_ : Tuple = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
UpperCAmelCase_ : Union[str, Any] = np.array([0.30_76, 0.31_23, 0.32_84, 0.37_82, 0.37_70, 0.38_94, 0.42_97, 0.43_31, 0.44_56] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
| 61 |
import inspect
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
import torch.utils.checkpoint
from ...models import UNetaDModel, VQModel
from ...schedulers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
)
from ...utils import PIL_INTERPOLATION, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
def _UpperCAmelCase (UpperCamelCase__ : Union[str, Any] ):
_A , _A : Any = image.size
_A , _A : str = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
_A : List[str] = image.resize((w, h) , resample=PIL_INTERPOLATION["lanczos"] )
_A : Any = np.array(UpperCamelCase__ ).astype(np.floataa ) / 2_55.0
_A : Optional[Any] = image[None].transpose(0 , 3 , 1 , 2 )
_A : Union[str, Any] = torch.from_numpy(UpperCamelCase__ )
return 2.0 * image - 1.0
class lowerCAmelCase__ ( a):
'''simple docstring'''
def __init__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , ) -> Optional[int]:
super().__init__()
self.register_modules(vqvae=__lowerCamelCase , unet=__lowerCamelCase , scheduler=__lowerCamelCase)
@torch.no_grad()
def __call__( self , __lowerCamelCase = None , __lowerCamelCase = 1 , __lowerCamelCase = 1_0_0 , __lowerCamelCase = 0.0 , __lowerCamelCase = None , __lowerCamelCase = "pil" , __lowerCamelCase = True , ) -> Union[Tuple, ImagePipelineOutput]:
if isinstance(__lowerCamelCase , PIL.Image.Image):
_A : Tuple = 1
elif isinstance(__lowerCamelCase , torch.Tensor):
_A : Union[str, Any] = image.shape[0]
else:
raise ValueError(F"`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(__lowerCamelCase)}")
if isinstance(__lowerCamelCase , PIL.Image.Image):
_A : Union[str, Any] = preprocess(__lowerCamelCase)
_A , _A : Union[str, Any] = image.shape[-2:]
# in_channels should be 6: 3 for latents, 3 for low resolution image
_A : Optional[Any] = (batch_size, self.unet.config.in_channels // 2, height, width)
_A : str = next(self.unet.parameters()).dtype
_A : Union[str, Any] = randn_tensor(__lowerCamelCase , generator=__lowerCamelCase , device=self.device , dtype=__lowerCamelCase)
_A : List[Any] = image.to(device=self.device , dtype=__lowerCamelCase)
# set timesteps and move to the correct device
self.scheduler.set_timesteps(__lowerCamelCase , device=self.device)
_A : Any = self.scheduler.timesteps
# scale the initial noise by the standard deviation required by the scheduler
_A : List[str] = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature.
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
_A : str = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
_A : Optional[int] = {}
if accepts_eta:
_A : List[Any] = eta
for t in self.progress_bar(__lowerCamelCase):
# concat latents and low resolution image in the channel dimension.
_A : List[Any] = torch.cat([latents, image] , dim=1)
_A : str = self.scheduler.scale_model_input(__lowerCamelCase , __lowerCamelCase)
# predict the noise residual
_A : Any = self.unet(__lowerCamelCase , __lowerCamelCase).sample
# compute the previous noisy sample x_t -> x_t-1
_A : Optional[int] = self.scheduler.step(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase).prev_sample
# decode the image latents with the VQVAE
_A : Union[str, Any] = self.vqvae.decode(__lowerCamelCase).sample
_A : Dict = torch.clamp(__lowerCamelCase , -1.0 , 1.0)
_A : Tuple = image / 2 + 0.5
_A : int = image.cpu().permute(0 , 2 , 3 , 1).numpy()
if output_type == "pil":
_A : Optional[int] = self.numpy_to_pil(__lowerCamelCase)
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__lowerCamelCase)
| 11 | 0 |
import unittest
from transformers import DebertaVaTokenizer, DebertaVaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
_A = get_tests_dir('fixtures/spiece.model')
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase__ ( A_ , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : List[str] = DebertaVaTokenizer
UpperCAmelCase__ : Tuple = DebertaVaTokenizerFast
UpperCAmelCase__ : Optional[int] = True
UpperCAmelCase__ : Optional[Any] = True
def _a ( self ) -> List[str]:
super().setUp()
# We have a SentencePiece fixture for testing
__UpperCamelCase =DebertaVaTokenizer(A_ , unk_token='<unk>' )
tokenizer.save_pretrained(self.tmpdirname )
def _a ( self , A_ ) -> Dict:
__UpperCamelCase ='this is a test'
__UpperCamelCase ='this is a test'
return input_text, output_text
def _a ( self ) -> Tuple:
__UpperCamelCase ='<pad>'
__UpperCamelCase =0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(A_ ) , A_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(A_ ) , A_ )
def _a ( self ) -> Optional[Any]:
__UpperCamelCase =list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<pad>' )
self.assertEqual(vocab_keys[1] , '<unk>' )
self.assertEqual(vocab_keys[-1] , '[PAD]' )
self.assertEqual(len(A_ ) , 30001 )
def _a ( self ) -> Dict:
self.assertEqual(self.get_tokenizer().vocab_size , 30000 )
def _a ( self ) -> List[Any]:
# fmt: off
__UpperCamelCase =' \tHeLLo!how \n Are yoU? '
__UpperCamelCase =['▁hello', '!', 'how', '▁are', '▁you', '?']
# fmt: on
__UpperCamelCase =DebertaVaTokenizer(A_ , do_lower_case=A_ )
__UpperCamelCase =tokenizer.convert_ids_to_tokens(tokenizer.encode(A_ , add_special_tokens=A_ ) )
self.assertListEqual(A_ , A_ )
__UpperCamelCase =DebertaVaTokenizerFast(A_ , do_lower_case=A_ )
__UpperCamelCase =rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(A_ , add_special_tokens=A_ ) )
self.assertListEqual(A_ , A_ )
@unittest.skip('There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.' )
def _a ( self ) -> Optional[int]:
pass
@unittest.skip('There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.' )
def _a ( self ) -> str:
pass
def _a ( self ) -> Dict:
# fmt: off
__UpperCamelCase ='I was born in 92000, and this is falsé.'
__UpperCamelCase =['▁', '<unk>', '▁was', '▁born', '▁in', '▁9', '2000', '▁', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '▁', '.', ]
# fmt: on
__UpperCamelCase =DebertaVaTokenizer(A_ , split_by_punct=A_ )
__UpperCamelCase =tokenizer.convert_ids_to_tokens(tokenizer.encode(A_ , add_special_tokens=A_ ) )
self.assertListEqual(A_ , A_ )
__UpperCamelCase =DebertaVaTokenizerFast(A_ , split_by_punct=A_ )
__UpperCamelCase =rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(A_ , add_special_tokens=A_ ) )
self.assertListEqual(A_ , A_ )
def _a ( self ) -> Optional[int]:
# fmt: off
__UpperCamelCase ='I was born in 92000, and this is falsé.'
__UpperCamelCase =['▁i', '▁was', '▁born', '▁in', '▁9', '2000', '▁', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '▁', '.', ]
# fmt: on
__UpperCamelCase =DebertaVaTokenizer(A_ , do_lower_case=A_ , split_by_punct=A_ )
__UpperCamelCase =tokenizer.convert_ids_to_tokens(tokenizer.encode(A_ , add_special_tokens=A_ ) )
self.assertListEqual(A_ , A_ )
__UpperCamelCase =DebertaVaTokenizerFast(A_ , do_lower_case=A_ , split_by_punct=A_ )
__UpperCamelCase =rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(A_ , add_special_tokens=A_ ) )
self.assertListEqual(A_ , A_ )
def _a ( self ) -> Any:
# fmt: off
__UpperCamelCase ='I was born in 92000, and this is falsé.'
__UpperCamelCase =['▁i', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '.', ]
# fmt: on
__UpperCamelCase =DebertaVaTokenizer(A_ , do_lower_case=A_ , split_by_punct=A_ )
__UpperCamelCase =tokenizer.convert_ids_to_tokens(tokenizer.encode(A_ , add_special_tokens=A_ ) )
self.assertListEqual(A_ , A_ )
__UpperCamelCase =DebertaVaTokenizerFast(A_ , do_lower_case=A_ , split_by_punct=A_ )
__UpperCamelCase =rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(A_ , add_special_tokens=A_ ) )
self.assertListEqual(A_ , A_ )
def _a ( self ) -> Union[str, Any]:
# fmt: off
__UpperCamelCase ='I was born in 92000, and this is falsé.'
__UpperCamelCase =['▁', '<unk>', '▁was', '▁born', '▁in', '▁9', '2000', '▁', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '▁', '.', ]
# fmt: on
__UpperCamelCase =DebertaVaTokenizer(A_ , do_lower_case=A_ , split_by_punct=A_ )
__UpperCamelCase =tokenizer.convert_ids_to_tokens(tokenizer.encode(A_ , add_special_tokens=A_ ) )
self.assertListEqual(A_ , A_ )
__UpperCamelCase =DebertaVaTokenizerFast(A_ , do_lower_case=A_ , split_by_punct=A_ )
__UpperCamelCase =rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(A_ , add_special_tokens=A_ ) )
self.assertListEqual(A_ , A_ )
def _a ( self ) -> Tuple:
# fmt: off
__UpperCamelCase =' \tHeLLo!how \n Are yoU? '
__UpperCamelCase =['▁', '<unk>', 'e', '<unk>', 'o', '!', 'how', '▁', '<unk>', 're', '▁yo', '<unk>', '?']
# fmt: on
__UpperCamelCase =DebertaVaTokenizer(A_ , do_lower_case=A_ , split_by_punct=A_ )
__UpperCamelCase =tokenizer.convert_ids_to_tokens(tokenizer.encode(A_ , add_special_tokens=A_ ) )
self.assertListEqual(A_ , A_ )
__UpperCamelCase =DebertaVaTokenizerFast(A_ , do_lower_case=A_ , split_by_punct=A_ )
__UpperCamelCase =rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(A_ , add_special_tokens=A_ ) )
self.assertListEqual(A_ , A_ )
def _a ( self ) -> Union[str, Any]:
__UpperCamelCase =self.get_tokenizer()
__UpperCamelCase =self.get_rust_tokenizer()
__UpperCamelCase ='I was born in 92000, and this is falsé.'
__UpperCamelCase =tokenizer.convert_ids_to_tokens(tokenizer.encode(A_ , add_special_tokens=A_ ) )
__UpperCamelCase =rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(A_ , add_special_tokens=A_ ) )
self.assertListEqual(A_ , A_ )
__UpperCamelCase =tokenizer.encode(A_ , add_special_tokens=A_ )
__UpperCamelCase =rust_tokenizer.encode(A_ , add_special_tokens=A_ )
self.assertListEqual(A_ , A_ )
__UpperCamelCase =self.get_rust_tokenizer()
__UpperCamelCase =tokenizer.encode(A_ )
__UpperCamelCase =rust_tokenizer.encode(A_ )
self.assertListEqual(A_ , A_ )
def _a ( self ) -> Tuple:
__UpperCamelCase ='This is a test'
__UpperCamelCase =[13, 1, 4398, 25, 21, 1289]
__UpperCamelCase =['▁', 'T', 'his', '▁is', '▁a', '▁test']
__UpperCamelCase =['▁', '<unk>', 'his', '▁is', '▁a', '▁test']
__UpperCamelCase =DebertaVaTokenizer(A_ , keep_accents=A_ )
__UpperCamelCase =DebertaVaTokenizerFast(A_ , keep_accents=A_ )
__UpperCamelCase =tokenizer.encode(A_ , add_special_tokens=A_ )
self.assertListEqual(A_ , A_ )
__UpperCamelCase =tokenizer.tokenize(A_ )
self.assertListEqual(A_ , A_ )
__UpperCamelCase =tokenizer.convert_ids_to_tokens(A_ )
self.assertListEqual(A_ , A_ )
__UpperCamelCase =rust_tokenizer.encode(A_ , add_special_tokens=A_ )
self.assertListEqual(A_ , A_ )
__UpperCamelCase =rust_tokenizer.tokenize(A_ )
self.assertListEqual(A_ , A_ )
__UpperCamelCase =rust_tokenizer.convert_ids_to_tokens(A_ )
self.assertListEqual(A_ , A_ )
# fmt: off
__UpperCamelCase ='I was born in 92000, and this is falsé.'
__UpperCamelCase =[13, 1, 23, 386, 19, 561, 3050, 15, 17, 48, 25, 8256, 18, 1, 9]
__UpperCamelCase =['▁', 'I', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', 'é', '.', ]
__UpperCamelCase =['▁', '<unk>', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '.', ]
# fmt: on
__UpperCamelCase =tokenizer.encode(A_ , add_special_tokens=A_ )
self.assertListEqual(A_ , A_ )
__UpperCamelCase =tokenizer.tokenize(A_ )
self.assertListEqual(A_ , A_ )
__UpperCamelCase =tokenizer.convert_ids_to_tokens(A_ )
self.assertListEqual(A_ , A_ )
__UpperCamelCase =rust_tokenizer.encode(A_ , add_special_tokens=A_ )
self.assertListEqual(A_ , A_ )
__UpperCamelCase =rust_tokenizer.tokenize(A_ )
self.assertListEqual(A_ , A_ )
__UpperCamelCase =rust_tokenizer.convert_ids_to_tokens(A_ )
self.assertListEqual(A_ , A_ )
def _a ( self ) -> Optional[int]:
__UpperCamelCase =DebertaVaTokenizer(A_ )
__UpperCamelCase =tokenizer.encode('sequence builders' )
__UpperCamelCase =tokenizer.encode('multi-sequence build' )
__UpperCamelCase =tokenizer.build_inputs_with_special_tokens(A_ )
__UpperCamelCase =tokenizer.build_inputs_with_special_tokens(A_ , A_ )
self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] , A_ )
self.assertEqual(
[tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [tokenizer.sep_token_id] , A_ , )
@slow
def _a ( self ) -> Optional[int]:
# fmt: off
__UpperCamelCase ={'input_ids': [[1, 39867, 36, 19390, 486, 27, 35052, 81436, 18, 60685, 1225, 7, 35052, 81436, 18, 9367, 16899, 18, 15937, 53, 594, 773, 18, 16287, 30465, 36, 15937, 6, 41139, 38, 36979, 60763, 191, 6, 34132, 99, 6, 50538, 390, 43230, 6, 34132, 2779, 20850, 14, 699, 1072, 1194, 36, 382, 10901, 53, 7, 699, 1072, 2084, 36, 20422, 630, 53, 19, 105, 3049, 1896, 1053, 16899, 1506, 11, 37978, 4243, 7, 1237, 31869, 200, 16566, 654, 6, 35052, 81436, 7, 55630, 13593, 4, 2], [1, 26, 15011, 13, 667, 8, 1053, 18, 23611, 1237, 72356, 12820, 34, 104134, 1209, 35, 13313, 6627, 21, 202, 347, 7, 164, 2399, 11, 46, 4485, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 1232, 2864, 15785, 14951, 105, 5, 8581, 1250, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=A_ , model_name='microsoft/deberta-v2-xlarge' , revision='ad6e42c1532ddf3a15c39246b63f5559d558b670' , )
| 62 |
import unittest
import torch
from diffusers import VQModel
from diffusers.utils import floats_tensor, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class lowerCAmelCase__ ( a , a , unittest.TestCase):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = VQModel
__SCREAMING_SNAKE_CASE = "sample"
@property
def _lowerCamelCase ( self , __lowerCamelCase=(3_2, 3_2)) -> Optional[Any]:
_A : Optional[int] = 4
_A : Tuple = 3
_A : List[Any] = floats_tensor((batch_size, num_channels) + sizes).to(__lowerCamelCase)
return {"sample": image}
@property
def _lowerCamelCase ( self) -> int:
return (3, 3_2, 3_2)
@property
def _lowerCamelCase ( self) -> List[Any]:
return (3, 3_2, 3_2)
def _lowerCamelCase ( self) -> Union[str, Any]:
_A : List[Any] = {
"block_out_channels": [3_2, 6_4],
"in_channels": 3,
"out_channels": 3,
"down_block_types": ["DownEncoderBlock2D", "DownEncoderBlock2D"],
"up_block_types": ["UpDecoderBlock2D", "UpDecoderBlock2D"],
"latent_channels": 3,
}
_A : int = self.dummy_input
return init_dict, inputs_dict
def _lowerCamelCase ( self) -> Union[str, Any]:
pass
def _lowerCamelCase ( self) -> Any:
pass
def _lowerCamelCase ( self) -> Any:
_A , _A : List[Any] = VQModel.from_pretrained("fusing/vqgan-dummy" , output_loading_info=__lowerCamelCase)
self.assertIsNotNone(__lowerCamelCase)
self.assertEqual(len(loading_info["missing_keys"]) , 0)
model.to(__lowerCamelCase)
_A : str = model(**self.dummy_input)
assert image is not None, "Make sure output is not None"
def _lowerCamelCase ( self) -> Union[str, Any]:
_A : Optional[Any] = VQModel.from_pretrained("fusing/vqgan-dummy")
model.to(__lowerCamelCase).eval()
torch.manual_seed(0)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(0)
_A : Tuple = torch.randn(1 , model.config.in_channels , model.config.sample_size , model.config.sample_size)
_A : Optional[int] = image.to(__lowerCamelCase)
with torch.no_grad():
_A : List[str] = model(__lowerCamelCase).sample
_A : int = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
_A : Optional[Any] = torch.tensor([-0.0_1_5_3, -0.4_0_4_4, -0.1_8_8_0, -0.5_1_6_1, -0.2_4_1_8, -0.4_0_7_2, -0.1_6_1_2, -0.0_6_3_3, -0.0_1_4_3])
# fmt: on
self.assertTrue(torch.allclose(__lowerCamelCase , __lowerCamelCase , atol=1e-3))
| 11 | 0 |
'''simple docstring'''
def _lowerCamelCase ( lowercase : int = 200 ) -> int:
_a = [1, 2, 5, 10, 20, 50, 100, 200]
_a = [0] * (pence + 1)
_a = 1 # base case: 1 way to make 0 pence
for coin in coins:
for i in range(lowercase , pence + 1 , 1 ):
number_of_ways[i] += number_of_ways[i - coin]
return number_of_ways[pence]
if __name__ == "__main__":
assert solution(2_00) == 7_36_82
| 63 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_mbart import MBartTokenizer
else:
lowerCAmelCase__ = None
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'}
lowerCAmelCase__ = {
'vocab_file': {
'facebook/mbart-large-en-ro': (
'https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model'
),
'facebook/mbart-large-cc25': (
'https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model'
),
},
'tokenizer_file': {
'facebook/mbart-large-en-ro': 'https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json',
'facebook/mbart-large-cc25': 'https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json',
},
}
lowerCAmelCase__ = {
'facebook/mbart-large-en-ro': 10_24,
'facebook/mbart-large-cc25': 10_24,
}
# fmt: off
lowerCAmelCase__ = ['ar_AR', 'cs_CZ', 'de_DE', 'en_XX', 'es_XX', 'et_EE', 'fi_FI', 'fr_XX', 'gu_IN', 'hi_IN', 'it_IT', 'ja_XX', 'kk_KZ', 'ko_KR', 'lt_LT', 'lv_LV', 'my_MM', 'ne_NP', 'nl_XX', 'ro_RO', 'ru_RU', 'si_LK', 'tr_TR', 'vi_VN', 'zh_CN']
class lowerCAmelCase__ ( a):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE = ["input_ids", "attention_mask"]
__SCREAMING_SNAKE_CASE = MBartTokenizer
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = []
def __init__( self , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase="<s>" , __lowerCamelCase="</s>" , __lowerCamelCase="</s>" , __lowerCamelCase="<s>" , __lowerCamelCase="<unk>" , __lowerCamelCase="<pad>" , __lowerCamelCase="<mask>" , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=None , **__lowerCamelCase , ) -> Optional[int]:
# Mask token behave like a normal word, i.e. include the space before it
_A : List[str] = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase) if isinstance(__lowerCamelCase , __lowerCamelCase) else mask_token
super().__init__(
vocab_file=__lowerCamelCase , tokenizer_file=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , sep_token=__lowerCamelCase , cls_token=__lowerCamelCase , unk_token=__lowerCamelCase , pad_token=__lowerCamelCase , mask_token=__lowerCamelCase , src_lang=__lowerCamelCase , tgt_lang=__lowerCamelCase , additional_special_tokens=__lowerCamelCase , **__lowerCamelCase , )
_A : Union[str, Any] = vocab_file
_A : int = False if not self.vocab_file else True
_A : Optional[int] = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens])
self.add_special_tokens({"additional_special_tokens": _additional_special_tokens})
_A : Union[str, Any] = {
lang_code: self.convert_tokens_to_ids(__lowerCamelCase) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
_A : Optional[int] = src_lang if src_lang is not None else "en_XX"
_A : Union[str, Any] = self.convert_tokens_to_ids(self._src_lang)
_A : int = tgt_lang
self.set_src_lang_special_tokens(self._src_lang)
@property
def _lowerCamelCase ( self) -> str:
return self._src_lang
@src_lang.setter
def _lowerCamelCase ( self , __lowerCamelCase) -> None:
_A : Dict = new_src_lang
self.set_src_lang_special_tokens(self._src_lang)
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = None) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = None) -> List[int]:
_A : List[str] = [self.sep_token_id]
_A : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase) -> Dict:
if src_lang is None or tgt_lang is None:
raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model")
_A : str = src_lang
_A : Any = self(__lowerCamelCase , add_special_tokens=__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase)
_A : Tuple = self.convert_tokens_to_ids(__lowerCamelCase)
_A : Dict = tgt_lang_id
return inputs
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = "en_XX" , __lowerCamelCase = None , __lowerCamelCase = "ro_RO" , **__lowerCamelCase , ) -> BatchEncoding:
_A : Any = src_lang
_A : int = tgt_lang
return super().prepare_seqaseq_batch(__lowerCamelCase , __lowerCamelCase , **__lowerCamelCase)
def _lowerCamelCase ( self) -> List[str]:
return self.set_src_lang_special_tokens(self.src_lang)
def _lowerCamelCase ( self) -> List[Any]:
return self.set_tgt_lang_special_tokens(self.tgt_lang)
def _lowerCamelCase ( self , __lowerCamelCase) -> None:
_A : int = self.convert_tokens_to_ids(__lowerCamelCase)
_A : int = []
_A : List[str] = [self.eos_token_id, self.cur_lang_code]
_A : Union[str, Any] = self.convert_ids_to_tokens(self.prefix_tokens)
_A : str = self.convert_ids_to_tokens(self.suffix_tokens)
_A : List[Any] = processors.TemplateProcessing(
single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens)) , )
def _lowerCamelCase ( self , __lowerCamelCase) -> None:
_A : Optional[int] = self.convert_tokens_to_ids(__lowerCamelCase)
_A : List[Any] = []
_A : str = [self.eos_token_id, self.cur_lang_code]
_A : Optional[int] = self.convert_ids_to_tokens(self.prefix_tokens)
_A : int = self.convert_ids_to_tokens(self.suffix_tokens)
_A : str = processors.TemplateProcessing(
single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens)) , )
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = None) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer.")
if not os.path.isdir(__lowerCamelCase):
logger.error(F"Vocabulary path ({save_directory}) should be a directory.")
return
_A : int = os.path.join(
__lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
if os.path.abspath(self.vocab_file) != os.path.abspath(__lowerCamelCase):
copyfile(self.vocab_file , __lowerCamelCase)
return (out_vocab_file,)
| 11 | 0 |
"""simple docstring"""
from __future__ import annotations
A_ = '''Muhammad Umer Farooq'''
A_ = '''MIT'''
A_ = '''1.0.0'''
A_ = '''Muhammad Umer Farooq'''
A_ = '''[email protected]'''
A_ = '''Alpha'''
import re
from html.parser import HTMLParser
from urllib import parse
import requests
class lowercase( __a ):
'''simple docstring'''
def __init__( self: Optional[Any], a_: str ):
'''simple docstring'''
super().__init__()
_snake_case : list[str] = []
_snake_case : List[Any] = domain
def UpperCamelCase_ ( self: List[Any], a_: str, a_: list[tuple[str, str | None]] ):
'''simple docstring'''
if tag == "a":
# Check the list of defined attributes.
for name, value in attrs:
# If href is defined, and not empty nor # print it.
if name == "href" and value != "#" and value != "":
# If not already in urls.
if value not in self.urls:
_snake_case : Any = parse.urljoin(self.domain, a_ )
self.urls.append(a_ )
def UpperCAmelCase__ (snake_case__ : str ):
"""simple docstring"""
return ".".join(get_sub_domain_name(snake_case__ ).split(""".""" )[-2:] )
def UpperCAmelCase__ (snake_case__ : str ):
"""simple docstring"""
return parse.urlparse(snake_case__ ).netloc
def UpperCAmelCase__ (snake_case__ : str = "https://github.com" ):
"""simple docstring"""
_snake_case : Dict = get_domain_name(snake_case__ )
# Initialize the parser
_snake_case : List[str] = Parser(snake_case__ )
try:
# Open URL
_snake_case : Union[str, Any] = requests.get(snake_case__ )
# pass the raw HTML to the parser to get links
parser.feed(r.text )
# Get links and loop through
_snake_case : str = set()
for link in parser.urls:
# open URL.
# read = requests.get(link)
try:
_snake_case : Optional[Any] = requests.get(snake_case__ )
# Get the valid email.
_snake_case : Tuple = re.findall("""[a-zA-Z0-9]+@""" + domain , read.text )
# If not in list then append it.
for email in emails:
valid_emails.add(snake_case__ )
except ValueError:
pass
except ValueError:
raise SystemExit(1 )
# Finally return a sorted list of email addresses with no duplicates.
return sorted(snake_case__ )
if __name__ == "__main__":
A_ = emails_from_url('''https://github.com''')
print(F'''{len(emails)} emails found:''')
print('''\n'''.join(sorted(emails)))
| 64 |
import json
import os
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'vocab_file': 'vocab.json',
'tokenizer_config_file': 'tokenizer_config.json',
'merges_file': 'merges.txt',
}
lowerCAmelCase__ = {
'vocab_file': {
'facebook/s2t-wav2vec2-large-en-de': (
'https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/vocab.json'
),
},
'tokenizer_config_file': {
'facebook/s2t-wav2vec2-large-en-de': (
'https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/tokenizer_config.json'
),
},
'merges_file': {
'facebook/s2t-wav2vec2-large-en-de': (
'https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/merges.txt'
),
},
}
lowerCAmelCase__ = '</w>'
lowerCAmelCase__ = '@@ '
def _UpperCAmelCase (UpperCamelCase__ : Optional[Any] ):
_A : Optional[int] = set()
_A : Optional[Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_A : List[Any] = char
return pairs
# Speech2Text2 has no max input length
lowerCAmelCase__ = {'facebook/s2t-wav2vec2-large-en-de': 10_24}
class lowerCAmelCase__ ( a):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE = ["input_ids", "attention_mask"]
def __init__( self , __lowerCamelCase , __lowerCamelCase="<s>" , __lowerCamelCase="<pad>" , __lowerCamelCase="</s>" , __lowerCamelCase="<unk>" , __lowerCamelCase=False , __lowerCamelCase=None , **__lowerCamelCase , ) -> Optional[Any]:
super().__init__(
unk_token=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , pad_token=__lowerCamelCase , do_lower_case=__lowerCamelCase , **__lowerCamelCase , )
_A : Dict = do_lower_case
with open(__lowerCamelCase , encoding="utf-8") as vocab_handle:
_A : Optional[int] = json.load(__lowerCamelCase)
_A : Optional[Any] = {v: k for k, v in self.encoder.items()}
if merges_file is None:
logger.info(F"No merges files provided. {self.__class__.__name__} can only be used for decoding.")
_A : Optional[Any] = None
_A : Tuple = None
else:
with open(__lowerCamelCase , encoding="utf-8") as merges_handle:
_A : Optional[int] = merges_handle.read().split("\n")[:-1]
_A : Union[str, Any] = [tuple(merge.split()[:2]) for merge in merges]
_A : Optional[int] = dict(zip(__lowerCamelCase , range(len(__lowerCamelCase))))
_A : List[Any] = {}
@property
def _lowerCamelCase ( self) -> int:
return len(self.decoder)
def _lowerCamelCase ( self) -> Dict:
return dict(self.encoder , **self.added_tokens_encoder)
def _lowerCamelCase ( self , __lowerCamelCase) -> Dict:
_A : Tuple = tuple(token[:-1]) + (token[-1] + BPE_TOKEN_MERGES,)
if token in self.cache:
return self.cache[token]
_A : int = get_pairs(__lowerCamelCase)
if not pairs:
return token
while True:
_A : Any = min(__lowerCamelCase , key=lambda __lowerCamelCase: self.bpe_ranks.get(__lowerCamelCase , float("inf")))
if bigram not in self.bpe_ranks:
break
_A , _A : Optional[int] = bigram
_A : int = []
_A : str = 0
while i < len(__lowerCamelCase):
try:
_A : str = word.index(__lowerCamelCase , __lowerCamelCase)
except ValueError:
new_word.extend(word[i:])
break
else:
new_word.extend(word[i:j])
_A : str = j
if word[i] == first and i < len(__lowerCamelCase) - 1 and word[i + 1] == second:
new_word.append(first + second)
i += 2
else:
new_word.append(word[i])
i += 1
_A : List[str] = tuple(__lowerCamelCase)
_A : List[str] = new_word
if len(__lowerCamelCase) == 1:
break
else:
_A : List[Any] = get_pairs(__lowerCamelCase)
_A : Tuple = " ".join(__lowerCamelCase)
if word == "\n " + BPE_TOKEN_MERGES:
_A : List[str] = "\n" + BPE_TOKEN_MERGES
if word.endswith(__lowerCamelCase):
_A : int = word.replace(__lowerCamelCase , "")
_A : int = word.replace(" " , __lowerCamelCase)
_A : Union[str, Any] = word
return word
def _lowerCamelCase ( self , __lowerCamelCase) -> Optional[Any]:
if self.bpe_ranks is None:
raise ValueError(
"This tokenizer was instantiated without a `merges.txt` file, so"
" that it can only be used for decoding, not for encoding."
"Make sure to provide `merges.txt` file at instantiation to enable "
"encoding.")
if self.do_lower_case:
_A : List[Any] = text.lower()
_A : Optional[int] = text.split()
_A : List[str] = []
for token in text:
if token:
split_tokens.extend(list(self.bpe(__lowerCamelCase).split(" ")))
return split_tokens
def _lowerCamelCase ( self , __lowerCamelCase) -> int:
return self.encoder.get(__lowerCamelCase , self.encoder.get(self.unk_token))
def _lowerCamelCase ( self , __lowerCamelCase) -> str:
_A : List[str] = self.decoder.get(__lowerCamelCase , self.unk_token)
return result
def _lowerCamelCase ( self , __lowerCamelCase) -> str:
_A : str = " ".join(__lowerCamelCase)
# make sure @@ tokens are concatenated
_A : int = "".join(string.split(__lowerCamelCase))
return string
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = None) -> Tuple[str]:
if not os.path.isdir(__lowerCamelCase):
logger.error(F"Vocabulary path ({save_directory}) should be a directory")
return
_A : Any = os.path.join(
__lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
_A : Any = os.path.join(
__lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"])
with open(__lowerCamelCase , "w" , encoding="utf-8") as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__lowerCamelCase , ensure_ascii=__lowerCamelCase) + "\n")
_A : Union[str, Any] = 0
if self.bpe_ranks is None:
return (vocab_file,)
with open(__lowerCamelCase , "w" , encoding="utf-8") as writer:
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __lowerCamelCase: kv[1]):
if index != token_index:
logger.warning(
F"Saving vocabulary to {merges_file}: BPE merge indices are not consecutive."
" Please check that the tokenizer is not corrupted!")
_A : Optional[int] = token_index
writer.write(" ".join(__lowerCamelCase) + "\n")
index += 1
return (vocab_file, merges_file)
| 11 | 0 |
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {
'EleutherAI/gpt-neo-1.3B': 'https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json',
# See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo
}
class A ( UpperCAmelCase_ ):
__UpperCAmelCase : Tuple = 'gpt_neo'
__UpperCAmelCase : Any = ['past_key_values']
__UpperCAmelCase : List[Any] = {'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers'}
def __init__(self : Optional[Any] , __UpperCAmelCase : Any=5_0_2_5_7 , __UpperCAmelCase : List[Any]=2_0_4_8 , __UpperCAmelCase : int=2_0_4_8 , __UpperCAmelCase : Tuple=2_4 , __UpperCAmelCase : List[str]=[[["global", "local"], 1_2]] , __UpperCAmelCase : Union[str, Any]=1_6 , __UpperCAmelCase : Union[str, Any]=None , __UpperCAmelCase : List[Any]=2_5_6 , __UpperCAmelCase : List[Any]="gelu_new" , __UpperCAmelCase : Optional[Any]=0.0 , __UpperCAmelCase : Optional[int]=0.0 , __UpperCAmelCase : Any=0.0 , __UpperCAmelCase : int=0.1 , __UpperCAmelCase : str=1E-5 , __UpperCAmelCase : List[str]=0.02 , __UpperCAmelCase : Optional[Any]=True , __UpperCAmelCase : Optional[int]=5_0_2_5_6 , __UpperCAmelCase : int=5_0_2_5_6 , **__UpperCAmelCase : str , ) -> int:
"""simple docstring"""
UpperCAmelCase__ = vocab_size
UpperCAmelCase__ = max_position_embeddings
UpperCAmelCase__ = hidden_size
UpperCAmelCase__ = num_layers
UpperCAmelCase__ = num_heads
UpperCAmelCase__ = intermediate_size
UpperCAmelCase__ = window_size
UpperCAmelCase__ = activation_function
UpperCAmelCase__ = resid_dropout
UpperCAmelCase__ = embed_dropout
UpperCAmelCase__ = attention_dropout
UpperCAmelCase__ = classifier_dropout
UpperCAmelCase__ = layer_norm_epsilon
UpperCAmelCase__ = initializer_range
UpperCAmelCase__ = use_cache
UpperCAmelCase__ = bos_token_id
UpperCAmelCase__ = eos_token_id
UpperCAmelCase__ = attention_types
UpperCAmelCase__ = self.expand_attention_types_params(__UpperCAmelCase )
if len(self.attention_layers ) != self.num_layers:
raise ValueError(
"Configuration for convolutional module is incorrect. "
"It is required that `len(config.attention_layers)` == `config.num_layers` "
f"""but is `len(config.attention_layers) = {len(self.attention_layers )}`, """
f"""`config.num_layers = {self.num_layers}`. """
"`config.attention_layers` is prepared using `config.attention_types`. "
"Please verify the value of `config.attention_types` argument." )
super().__init__(bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , **__UpperCAmelCase )
@staticmethod
def lowercase_ (__UpperCAmelCase : List[str] ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase__ = []
for item in attention_types:
for _ in range(item[1] ):
attentions.extend(item[0] )
return attentions
def lowerCAmelCase_ ( __A, __A, __A, __A ) -> int:
'''simple docstring'''
import torch
UpperCAmelCase__ = input.size()
UpperCAmelCase__ = len(__A )
UpperCAmelCase__ = shape[dimension]
UpperCAmelCase__ = torch.arange(0, __A, __A )
UpperCAmelCase__ = torch.div(sizedim - size, __A, rounding_mode="floor" ) + 1
UpperCAmelCase__ = torch.arange(__A ) + low_indices[:min_length][:, None]
UpperCAmelCase__ = [slice(__A )] * rank
UpperCAmelCase__ = indices
UpperCAmelCase__ = input[s]
UpperCAmelCase__ = list(range(0, rank + 1 ) )
perm.append(perm.pop(dimension + 1 ) )
return sliced.permute(__A )
def lowerCAmelCase_ ( __A, __A ) -> Any:
'''simple docstring'''
import torch
UpperCAmelCase__ = torch.arange(1, __A )
UpperCAmelCase__ = torch.remainder(__A, __A )
UpperCAmelCase__ = remainders == 0
UpperCAmelCase__ = candidates[divisor_indices]
UpperCAmelCase__ = torch.max(__A )
return largest_divisor, torch.div(__A, __A, rounding_mode="floor" )
class A ( UpperCAmelCase_ ):
@property
def lowercase_ (self : Any ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
UpperCAmelCase__ = OrderedDict({"input_ids": {0: "batch", 1: "sequence"}} )
if self.use_past:
self.fill_with_past_key_values_(__UpperCAmelCase , direction="inputs" )
UpperCAmelCase__ = {0: "batch", 1: "past_sequence + sequence"}
else:
UpperCAmelCase__ = {0: "batch", 1: "sequence"}
return common_inputs
@property
def lowercase_ (self : List[Any] ) -> int:
"""simple docstring"""
return self._config.num_heads
def lowercase_ (self : str , __UpperCAmelCase : PreTrainedTokenizer , __UpperCAmelCase : int = -1 , __UpperCAmelCase : int = -1 , __UpperCAmelCase : bool = False , __UpperCAmelCase : Optional[TensorType] = None , ) -> Mapping[str, Any]:
"""simple docstring"""
UpperCAmelCase__ = super(__UpperCAmelCase , self ).generate_dummy_inputs(
__UpperCAmelCase , batch_size=__UpperCAmelCase , seq_length=__UpperCAmelCase , is_pair=__UpperCAmelCase , framework=__UpperCAmelCase )
# We need to order the input in the way they appears in the forward()
UpperCAmelCase__ = OrderedDict({"input_ids": common_inputs["input_ids"]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
UpperCAmelCase__ , UpperCAmelCase__ = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
UpperCAmelCase__ = seqlen + 2
UpperCAmelCase__ = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
UpperCAmelCase__ = [
(torch.zeros(__UpperCAmelCase ), torch.zeros(__UpperCAmelCase )) for _ in range(self.num_layers )
]
UpperCAmelCase__ = common_inputs["attention_mask"]
if self.use_past:
UpperCAmelCase__ = ordered_inputs["attention_mask"].dtype
UpperCAmelCase__ = torch.cat(
[ordered_inputs["attention_mask"], torch.ones(__UpperCAmelCase , __UpperCAmelCase , dtype=__UpperCAmelCase )] , dim=1 )
return ordered_inputs
@property
def lowercase_ (self : Optional[Any] ) -> int:
"""simple docstring"""
return 1_3
| 65 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'facebook/vit-mae-base': 'https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json',
# See all ViT MAE models at https://huggingface.co/models?filter=vit-mae
}
class lowerCAmelCase__ ( a):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = "vit_mae"
def __init__( self , __lowerCamelCase=7_6_8 , __lowerCamelCase=1_2 , __lowerCamelCase=1_2 , __lowerCamelCase=3_0_7_2 , __lowerCamelCase="gelu" , __lowerCamelCase=0.0 , __lowerCamelCase=0.0 , __lowerCamelCase=0.0_2 , __lowerCamelCase=1e-12 , __lowerCamelCase=2_2_4 , __lowerCamelCase=1_6 , __lowerCamelCase=3 , __lowerCamelCase=True , __lowerCamelCase=1_6 , __lowerCamelCase=5_1_2 , __lowerCamelCase=8 , __lowerCamelCase=2_0_4_8 , __lowerCamelCase=0.7_5 , __lowerCamelCase=False , **__lowerCamelCase , ) -> int:
super().__init__(**__lowerCamelCase)
_A : int = hidden_size
_A : List[str] = num_hidden_layers
_A : List[Any] = num_attention_heads
_A : Optional[Any] = intermediate_size
_A : Optional[int] = hidden_act
_A : List[Any] = hidden_dropout_prob
_A : List[Any] = attention_probs_dropout_prob
_A : Union[str, Any] = initializer_range
_A : str = layer_norm_eps
_A : Any = image_size
_A : int = patch_size
_A : int = num_channels
_A : Dict = qkv_bias
_A : Tuple = decoder_num_attention_heads
_A : Tuple = decoder_hidden_size
_A : List[str] = decoder_num_hidden_layers
_A : Optional[Any] = decoder_intermediate_size
_A : List[str] = mask_ratio
_A : Union[str, Any] = norm_pix_loss
| 11 | 0 |
"""simple docstring"""
import argparse
from pathlib import Path
import torch
from transformers import OPTConfig, OPTModel
from transformers.utils import logging
logging.set_verbosity_info()
__a = logging.get_logger(__name__)
def A_ ( _lowercase ):
'''simple docstring'''
snake_case_ :Optional[int] = torch.load(_lowercase, map_location="""cpu""" )
if "model" in sd.keys():
snake_case_ :str = torch.load(_lowercase, map_location="""cpu""" )["""model"""]
# pop unnecessary weights
snake_case_ :Tuple = [
"""decoder.version""",
"""decoder.output_projection.weight""",
]
for key in keys_to_delete:
if key in sd:
sd.pop(_lowercase )
snake_case_ :str = {
"""decoder.project_in_dim.weight""": """decoder.project_in.weight""",
"""decoder.project_out_dim.weight""": """decoder.project_out.weight""",
"""decoder.layer_norm.weight""": """decoder.final_layer_norm.weight""",
"""decoder.layer_norm.bias""": """decoder.final_layer_norm.bias""",
}
for old_key, new_key in keys_to_rename.items():
if old_key in sd:
snake_case_ :List[Any] = sd.pop(_lowercase )
snake_case_ :Tuple = list(sd.keys() )
for key in keys:
if ".qkv_proj." in key:
snake_case_ :Any = sd[key]
# We split QKV in separate Q,K,V
snake_case_ :Dict = key.replace(""".qkv_proj.""", """.q_proj.""" )
snake_case_ :Optional[Any] = key.replace(""".qkv_proj.""", """.k_proj.""" )
snake_case_ :Optional[Any] = key.replace(""".qkv_proj.""", """.v_proj.""" )
snake_case_ :Dict = value.shape[0]
assert depth % 3 == 0
# `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming:
# https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97
snake_case_, snake_case_, snake_case_ :Any = torch.split(_lowercase, depth // 3, dim=0 )
snake_case_ :List[Any] = q
snake_case_ :Union[str, Any] = k
snake_case_ :Optional[int] = v
del sd[key]
return sd
@torch.no_grad()
def A_ ( _lowercase, _lowercase, _lowercase=None ):
'''simple docstring'''
snake_case_ :Optional[int] = load_checkpoint(_lowercase )
if config is not None:
snake_case_ :List[str] = OPTConfig.from_pretrained(_lowercase )
else:
snake_case_ :List[Any] = OPTConfig()
snake_case_ :str = OPTModel(_lowercase ).half().eval()
model.load_state_dict(_lowercase )
# Check results
Path(_lowercase ).mkdir(exist_ok=_lowercase )
model.save_pretrained(_lowercase )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--fairseq_path",
type=str,
help=(
"path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:"
" https://huggingface.co/models?other=opt_metasq"
),
)
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--hf_config", default=None, type=str, help="Define HF config.")
__a = parser.parse_args()
convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config)
| 66 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
lowerCAmelCase__ = {'configuration_speech_encoder_decoder': ['SpeechEncoderDecoderConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['SpeechEncoderDecoderModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['FlaxSpeechEncoderDecoderModel']
if TYPE_CHECKING:
from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 11 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__UpperCAmelCase ={
"configuration_mobilebert": [
"MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"MobileBertConfig",
"MobileBertOnnxConfig",
],
"tokenization_mobilebert": ["MobileBertTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase =["MobileBertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase =[
"MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"MobileBertForMaskedLM",
"MobileBertForMultipleChoice",
"MobileBertForNextSentencePrediction",
"MobileBertForPreTraining",
"MobileBertForQuestionAnswering",
"MobileBertForSequenceClassification",
"MobileBertForTokenClassification",
"MobileBertLayer",
"MobileBertModel",
"MobileBertPreTrainedModel",
"load_tf_weights_in_mobilebert",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase =[
"TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFMobileBertForMaskedLM",
"TFMobileBertForMultipleChoice",
"TFMobileBertForNextSentencePrediction",
"TFMobileBertForPreTraining",
"TFMobileBertForQuestionAnswering",
"TFMobileBertForSequenceClassification",
"TFMobileBertForTokenClassification",
"TFMobileBertMainLayer",
"TFMobileBertModel",
"TFMobileBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mobilebert import (
MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileBertConfig,
MobileBertOnnxConfig,
)
from .tokenization_mobilebert import MobileBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mobilebert_fast import MobileBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilebert import (
MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertLayer,
MobileBertModel,
MobileBertPreTrainedModel,
load_tf_weights_in_mobilebert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilebert import (
TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertMainLayer,
TFMobileBertModel,
TFMobileBertPreTrainedModel,
)
else:
import sys
__UpperCAmelCase =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 67 |
# HF Trainer benchmarking tool
#
# This tool can be used to run and compare multiple dimensions of the HF Trainers args.
#
# It then prints a report once in github format with all the information that needs to be shared
# with others and second time in a console-friendly format, so it's easier to use for tuning things up.
#
# The main idea is:
#
# ./trainer-benchmark.py --base-cmd '<cmd args that don't change>' \
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1' \
# --target-metric-key train_samples_per_second
#
# The variations can be any command line argument that you want to compare and not just dtype as in
# the example.
#
# --variations allows you to compare variations in multiple dimensions.
#
# as the first dimention has 2 options and the second 3 in our example, this will run the trainer 6
# times adding one of:
#
# 1. --tf32 0 --fp16 0
# 2. --tf32 0 --fp16 1
# 3. --tf32 0 --bf16 1
# 4. --tf32 1 --fp16 0
# 5. --tf32 1 --fp16 1
# 6. --tf32 1 --bf16 1
#
# and print the results. This is just a cartesian product - and more than 2 dimensions can be used.
#
# If you want to rely on defaults, this:
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1'
# is identical to this:
# --variations '--tf32 0|--tf32 1' '|--fp16|--bf16'
#
# the leading empty variation in the 2nd dimension is a valid variation.
#
# So here we get the following 6 variations:
#
# 1. --tf32 0
# 2. --tf32 0 --fp16
# 3. --tf32 0 --bf16
# 4. --tf32 1
# 5. --tf32 1 --fp16
# 6. --tf32 1 --bf16
#
# In this particular case we don't know what the default tf32 setting is as it's normally
# pytorch-version dependent). That's why it's best to do an explicit setting of each variation:
# `--tf32 0|--tf32 1`
#
# Here is a full example of a train:
#
# CUDA_VISIBLE_DEVICES=0 python ./scripts/benchmark/trainer-benchmark.py \
# --base-cmd \
# ' examples/pytorch/translation/run_translation.py --model_name_or_path t5-small \
# --output_dir output_dir --do_train --label_smoothing 0.1 --logging_strategy no \
# --save_strategy no --per_device_train_batch_size 32 --max_source_length 512 \
# --max_target_length 512 --num_train_epochs 1 --overwrite_output_dir \
# --source_lang en --target_lang ro --dataset_name wmt16 --dataset_config "ro-en" \
# --source_prefix "translate English to Romanian: " --warmup_steps 50 \
# --max_train_samples 20000 --dataloader_num_workers 2 ' \
# --target-metric-key train_samples_per_second --repeat-times 1 --variations \
# '|--fp16|--bf16' '--tf32 0|--tf32 1' --report-metric-keys train_loss \
# --repeat-times 1 --base-variation '--tf32 0'
#
# and here is a possible output:
#
#
# | Variation | Train | Diff | Train |
# | | samples | % | loss |
# | | per | | |
# | | second | | |
# |:----------------|----------:|-------:|--------:|
# | --tf32 0 | 285.11 | 0 | 2.51 |
# | --tf32 1 | 342.09 | 20 | 2.51 |
# | --fp16 --tf32 0 | 423.49 | 49 | 2.51 |
# | --fp16 --tf32 1 | 423.13 | 48 | 2.51 |
# | --bf16 --tf32 0 | 416.80 | 46 | 2.52 |
# | --bf16 --tf32 1 | 415.87 | 46 | 2.52 |
#
#
# So you can quickly compare the different outcomes.
#
# Typically running each experiment once is enough, but if the environment is unstable you can
# re-run each multiple times, e.g., 3 using --repeat-times 3 and it will report the averaged results.
#
# By default it'll use the lowest result as the base line to use as 100% and then compare the rest to
# it as can be seen from the table above, but you can also specify which combination is the one to use as
# the baseline, e.g., to change to another entry use: --base-variation '--tf32 1 --fp16 0'
#
# --target-metric-key is there to tell the program which metrics to compare - the different metric keys are
# inside output_dir/all_results.json. e.g., to measure eval performance instead of train use:
# --target-metric-key eval_samples_per_second
# but of course you will need to adjust the --base-cmd value in the example to perform evaluation as
# well (as currently it doesn't)
#
import argparse
import datetime
import io
import itertools
import json
import math
import os
import platform
import re
import shlex
import subprocess
import sys
from pathlib import Path
from statistics import fmean
import pandas as pd
import torch
from tqdm import tqdm
import transformers
lowerCAmelCase__ = float('nan')
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self , __lowerCamelCase) -> Optional[Any]:
_A : List[Any] = sys.stdout
_A : str = open(__lowerCamelCase , "a")
def __getattr__( self , __lowerCamelCase) -> List[str]:
return getattr(self.stdout , __lowerCamelCase)
def _lowerCamelCase ( self , __lowerCamelCase) -> str:
self.stdout.write(__lowerCamelCase)
# strip tqdm codes
self.file.write(re.sub(r"^.*\r" , "" , __lowerCamelCase , 0 , re.M))
def _UpperCAmelCase (UpperCamelCase__ : str=80 , UpperCamelCase__ : Tuple=False ):
_A : Tuple = []
# deal with critical env vars
_A : Dict = ["CUDA_VISIBLE_DEVICES"]
for key in env_keys:
_A : Optional[int] = os.environ.get(UpperCamelCase__ , UpperCamelCase__ )
if val is not None:
cmd.append(f"{key}={val}" )
# python executable (not always needed if the script is executable)
_A : Optional[int] = sys.executable if full_python_path else sys.executable.split("/" )[-1]
cmd.append(UpperCamelCase__ )
# now the normal args
cmd += list(map(shlex.quote , sys.argv ) )
# split up into up to MAX_WIDTH lines with shell multi-line escapes
_A : Tuple = []
_A : Dict = ""
while len(UpperCamelCase__ ) > 0:
current_line += f"{cmd.pop(0 )} "
if len(UpperCamelCase__ ) == 0 or len(UpperCamelCase__ ) + len(cmd[0] ) + 1 > max_width - 1:
lines.append(UpperCamelCase__ )
_A : Union[str, Any] = ""
return "\\\n".join(UpperCamelCase__ )
def _UpperCAmelCase (UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Tuple ):
# unwrap multi-line input
_A : Union[str, Any] = re.sub(r"[\\\n]+" , " " , args.base_cmd )
# remove --output_dir if any and set our own
_A : int = re.sub("--output_dir\s+[^\s]+" , "" , args.base_cmd )
args.base_cmd += f" --output_dir {output_dir}"
# ensure we have --overwrite_output_dir
_A : int = re.sub("--overwrite_output_dir\s+" , "" , args.base_cmd )
args.base_cmd += " --overwrite_output_dir"
return [sys.executable] + shlex.split(args.base_cmd )
def _UpperCAmelCase (UpperCamelCase__ : List[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Tuple , UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[int] ):
# Enable to debug everything but the run itself, to do it fast and see the progress.
# This is useful for debugging the output formatting quickly - we can remove it later once
# everybody is happy with the output
if 0:
import random
from time import sleep
sleep(0 )
return dict(
{k: random.uniform(0 , 100 ) for k in metric_keys} , **{target_metric_key: random.choice([nan, 10.31, 1_00.2, 55.66_66, 2_22.22_22_22_22] )} , )
_A : Dict = subprocess.run(UpperCamelCase__ , capture_output=UpperCamelCase__ , text=UpperCamelCase__ )
if verbose:
print("STDOUT" , result.stdout )
print("STDERR" , result.stderr )
# save the streams
_A : Tuple = variation.replace(" " , "-" )
with open(Path(UpperCamelCase__ ) / f"log.{prefix}.stdout.txt" , "w" ) as f:
f.write(result.stdout )
with open(Path(UpperCamelCase__ ) / f"log.{prefix}.stderr.txt" , "w" ) as f:
f.write(result.stderr )
if result.returncode != 0:
if verbose:
print("failed" )
return {target_metric_key: nan}
with io.open(f"{output_dir}/all_results.json" , "r" , encoding="utf-8" ) as f:
_A : List[str] = json.load(UpperCamelCase__ )
# filter out just the keys we want
return {k: v for k, v in metrics.items() if k in metric_keys}
def _UpperCAmelCase (UpperCamelCase__ : int , UpperCamelCase__ : Dict , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : int , UpperCamelCase__ : Tuple , UpperCamelCase__ : List[Any] , UpperCamelCase__ : str , UpperCamelCase__ : Any , ):
_A : Union[str, Any] = []
_A : Optional[int] = []
_A : Any = f"{id}: {variation:<{longest_variation_len}}"
_A : Dict = f"{preamble}: "
_A : Union[str, Any] = set(report_metric_keys + [target_metric_key] )
for i in tqdm(range(UpperCamelCase__ ) , desc=UpperCamelCase__ , leave=UpperCamelCase__ ):
_A : Optional[Any] = process_run_single(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
_A : Optional[Any] = single_run_metrics[target_metric_key]
if not math.isnan(UpperCamelCase__ ):
metrics.append(UpperCamelCase__ )
results.append(UpperCamelCase__ )
outcome += "✓"
else:
outcome += "✘"
_A : str = f"\33[2K\r{outcome}"
if len(UpperCamelCase__ ) > 0:
_A : List[str] = {k: fmean([x[k] for x in metrics] ) for k in metrics[0].keys()}
_A : Any = round(mean_metrics[target_metric_key] , 2 )
_A : Tuple = f"{outcome} {mean_target}"
if len(UpperCamelCase__ ) > 1:
results_str += f" {tuple(round(UpperCamelCase__ , 2 ) for x in results )}"
print(UpperCamelCase__ )
_A : Optional[int] = variation
return mean_metrics
else:
print(UpperCamelCase__ )
return {variation_key: variation, target_metric_key: nan}
def _UpperCAmelCase ():
_A : int = torch.cuda.get_device_properties(torch.device("cuda" ) )
return f"\nDatetime : {datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S' )}\n\nSoftware:\ntransformers: {transformers.__version__}\ntorch : {torch.__version__}\ncuda : {torch.version.cuda}\npython : {platform.python_version()}\n\nHardware:\n{torch.cuda.device_count()} GPUs : {properties.name}, {properties.total_memory/2**30:0.2f}GB\n"
def _UpperCAmelCase (UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : Any , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Dict ):
_A : Any = pd.DataFrame(UpperCamelCase__ )
_A : List[str] = "variation"
_A : List[Any] = "diff_%"
_A : int = nan
if base_variation is not None and len(df[df[variation_key] == base_variation] ):
# this may still return nan
_A : int = df.loc[df[variation_key] == base_variation][target_metric_key].item()
if math.isnan(UpperCamelCase__ ):
# as a fallback, use the minimal value as the sentinel
_A : List[str] = df.loc[df[target_metric_key] != nan][target_metric_key].min()
# create diff column if possible
if not math.isnan(UpperCamelCase__ ):
_A : Optional[Any] = df.apply(
lambda UpperCamelCase__ : round(100 * (r[target_metric_key] - sentinel_value) / sentinel_value )
if not math.isnan(r[target_metric_key] )
else 0 , axis="columns" , )
# re-order columns
_A : Union[str, Any] = [variation_key, target_metric_key, diff_key, *report_metric_keys]
_A : Any = df.reindex(UpperCamelCase__ , axis="columns" ) # reorder cols
# capitalize
_A : Tuple = df.rename(str.capitalize , axis="columns" )
# make the cols as narrow as possible
_A : List[str] = df.rename(lambda UpperCamelCase__ : c.replace("_" , "<br>" ) , axis="columns" )
_A : Union[str, Any] = df.rename(lambda UpperCamelCase__ : c.replace("_" , "\n" ) , axis="columns" )
_A : Optional[int] = ["", "Copy between the cut-here-lines and paste as is to github or a forum"]
report += ["----------8<-----------------8<--------"]
report += ["*** Results:", df_github.to_markdown(index=UpperCamelCase__ , floatfmt=".2f" )]
report += ["```"]
report += ["*** Setup:", get_versions()]
report += ["*** The benchmark command line was:", get_original_command()]
report += ["```"]
report += ["----------8<-----------------8<--------"]
report += ["*** Results (console):", df_console.to_markdown(index=UpperCamelCase__ , floatfmt=".2f" )]
print("\n\n".join(UpperCamelCase__ ) )
def _UpperCAmelCase ():
_A : int = argparse.ArgumentParser()
parser.add_argument(
"--base-cmd" , default=UpperCamelCase__ , type=UpperCamelCase__ , required=UpperCamelCase__ , help="Base cmd" , )
parser.add_argument(
"--variations" , default=UpperCamelCase__ , type=UpperCamelCase__ , nargs="+" , required=UpperCamelCase__ , help="Multi-dimensional variations, example: '|--fp16|--bf16' '|--tf32'" , )
parser.add_argument(
"--base-variation" , default=UpperCamelCase__ , type=UpperCamelCase__ , help="Baseline variation to compare to. if None the minimal target value will be used to compare against" , )
parser.add_argument(
"--target-metric-key" , default=UpperCamelCase__ , type=UpperCamelCase__ , required=UpperCamelCase__ , help="Target metric key in output_dir/all_results.json, e.g., train_samples_per_second" , )
parser.add_argument(
"--report-metric-keys" , default="" , type=UpperCamelCase__ , help="Report metric keys - other metric keys from output_dir/all_results.json to report, e.g., train_loss. Use a single argument e.g., 'train_loss train_samples" , )
parser.add_argument(
"--repeat-times" , default=1 , type=UpperCamelCase__ , help="How many times to re-run each variation - an average will be reported" , )
parser.add_argument(
"--output_dir" , default="output_benchmark" , type=UpperCamelCase__ , help="The output directory where all the benchmark reports will go to and additionally this directory will be used to override --output_dir in the script that is being benchmarked" , )
parser.add_argument(
"--verbose" , default=UpperCamelCase__ , action="store_true" , help="Whether to show the outputs of each run or just the benchmark progress" , )
_A : int = parser.parse_args()
_A : Union[str, Any] = args.output_dir
Path(UpperCamelCase__ ).mkdir(exist_ok=UpperCamelCase__ )
_A : Tuple = get_base_command(UpperCamelCase__ , UpperCamelCase__ )
# split each dimension into its --foo variations
_A : Dict = [list(map(str.strip , re.split(r"\|" , UpperCamelCase__ ) ) ) for x in args.variations]
# build a cartesian product of dimensions and convert those back into cmd-line arg strings,
# while stripping white space for inputs that were empty
_A : Union[str, Any] = list(map(str.strip , map(" ".join , itertools.product(*UpperCamelCase__ ) ) ) )
_A : Union[str, Any] = max(len(UpperCamelCase__ ) for x in variations )
# split wanted keys
_A : str = args.report_metric_keys.split()
# capture prints into a log file for convenience
_A : Optional[int] = f"benchmark-report-{datetime.datetime.now().strftime('%Y-%m-%d-%H-%M-%S' )}.txt"
print(f"\nNote: each run's output is also logged under {output_dir}/log.*.std*.txt" )
print(f"and this script's output is also piped into {report_fn}" )
_A : Tuple = Tee(UpperCamelCase__ )
print(f"\n*** Running {len(UpperCamelCase__ )} benchmarks:" )
print(f"Base command: {' '.join(UpperCamelCase__ )}" )
_A : str = "variation"
_A : Union[str, Any] = []
for id, variation in enumerate(tqdm(UpperCamelCase__ , desc="Total completion: " , leave=UpperCamelCase__ ) ):
_A : Dict = base_cmd + variation.split()
results.append(
process_run(
id + 1 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , args.target_metric_key , UpperCamelCase__ , args.repeat_times , UpperCamelCase__ , args.verbose , ) )
process_results(UpperCamelCase__ , args.target_metric_key , UpperCamelCase__ , args.base_variation , UpperCamelCase__ )
if __name__ == "__main__":
main()
| 11 | 0 |
import warnings
from functools import wraps
from typing import Callable
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Callable ) -> Callable:
'''simple docstring'''
@wraps(SCREAMING_SNAKE_CASE_ )
def _inner_fn(*SCREAMING_SNAKE_CASE_: int , **SCREAMING_SNAKE_CASE_: Union[str, Any] ):
warnings.warn(
(F'\'{fn.__name__}\' is experimental and might be subject to breaking changes in the future.') , SCREAMING_SNAKE_CASE_ , )
return fn(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
return _inner_fn
| 68 |
import logging
import os
import sys
from dataclasses import dataclass, field
from itertools import chain
from typing import Optional, Union
import datasets
import numpy as np
import torch
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.31.0')
lowerCAmelCase__ = logging.getLogger(__name__)
@dataclass
class lowerCAmelCase__ :
'''simple docstring'''
__SCREAMING_SNAKE_CASE = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"})
__SCREAMING_SNAKE_CASE = field(
default=a , metadata={"help": "Pretrained config name or path if not the same as model_name"})
__SCREAMING_SNAKE_CASE = field(
default=a , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"})
__SCREAMING_SNAKE_CASE = field(
default=a , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
__SCREAMING_SNAKE_CASE = field(
default=a , metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."} , )
__SCREAMING_SNAKE_CASE = field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
__SCREAMING_SNAKE_CASE = field(
default=a , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
@dataclass
class lowerCAmelCase__ :
'''simple docstring'''
__SCREAMING_SNAKE_CASE = field(default=a , metadata={"help": "The input training data file (a text file)."})
__SCREAMING_SNAKE_CASE = field(
default=a , metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."} , )
__SCREAMING_SNAKE_CASE = field(
default=a , metadata={"help": "Overwrite the cached training and evaluation sets"})
__SCREAMING_SNAKE_CASE = field(
default=a , metadata={"help": "The number of processes to use for the preprocessing."} , )
__SCREAMING_SNAKE_CASE = field(
default=a , metadata={
"help": (
"The maximum total input sequence length after tokenization. If passed, sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
__SCREAMING_SNAKE_CASE = field(
default=a , metadata={
"help": (
"Whether to pad all samples to the maximum sentence length. "
"If False, will pad the samples dynamically when batching to the maximum length in the batch. More "
"efficient on GPU but very bad for TPU."
)
} , )
__SCREAMING_SNAKE_CASE = field(
default=a , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
__SCREAMING_SNAKE_CASE = field(
default=a , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} , )
def _lowerCamelCase ( self) -> int:
if self.train_file is not None:
_A : Optional[int] = self.train_file.split(".")[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
_A : Dict = self.validation_file.split(".")[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
@dataclass
class lowerCAmelCase__ :
'''simple docstring'''
__SCREAMING_SNAKE_CASE = 42
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
def __call__( self , __lowerCamelCase) -> str:
_A : List[Any] = "label" if "label" in features[0].keys() else "labels"
_A : Any = [feature.pop(__lowerCamelCase) for feature in features]
_A : Optional[int] = len(__lowerCamelCase)
_A : int = len(features[0]["input_ids"])
_A : Tuple = [
[{k: v[i] for k, v in feature.items()} for i in range(__lowerCamelCase)] for feature in features
]
_A : str = list(chain(*__lowerCamelCase))
_A : Tuple = self.tokenizer.pad(
__lowerCamelCase , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="pt" , )
# Un-flatten
_A : Optional[int] = {k: v.view(__lowerCamelCase , __lowerCamelCase , -1) for k, v in batch.items()}
# Add back labels
_A : Optional[int] = torch.tensor(__lowerCamelCase , dtype=torch.intaa)
return batch
def _UpperCAmelCase ():
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_A : int = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_A , _A , _A : int = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_A , _A , _A : Union[str, Any] = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_swag" , UpperCamelCase__ , UpperCamelCase__ )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
_A : int = training_args.get_process_log_level()
logger.setLevel(UpperCamelCase__ )
datasets.utils.logging.set_verbosity(UpperCamelCase__ )
transformers.utils.logging.set_verbosity(UpperCamelCase__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ f"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" )
logger.info(f"Training/evaluation parameters {training_args}" )
# Detecting last checkpoint.
_A : List[Any] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_A : Optional[int] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
"Use --overwrite_output_dir to overcome." )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.train_file is not None or data_args.validation_file is not None:
_A : List[str] = {}
if data_args.train_file is not None:
_A : Optional[int] = data_args.train_file
if data_args.validation_file is not None:
_A : Tuple = data_args.validation_file
_A : Union[str, Any] = data_args.train_file.split("." )[-1]
_A : List[str] = load_dataset(
UpperCamelCase__ , data_files=UpperCamelCase__ , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
# Downloading and loading the swag dataset from the hub.
_A : Union[str, Any] = load_dataset(
"swag" , "regular" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_A : Optional[Any] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
_A : Optional[Any] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
_A : List[Any] = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=UpperCamelCase__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# When using your own dataset or a different dataset from swag, you will probably need to change this.
_A : str = [f"ending{i}" for i in range(4 )]
_A : Union[str, Any] = "sent1"
_A : str = "sent2"
if data_args.max_seq_length is None:
_A : Any = tokenizer.model_max_length
if max_seq_length > 1024:
logger.warning(
"The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value"
" of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can"
" override this default with `--block_size xxx`." )
_A : Optional[Any] = 1024
else:
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f"The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"
f"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}." )
_A : int = min(data_args.max_seq_length , tokenizer.model_max_length )
# Preprocessing the datasets.
def preprocess_function(UpperCamelCase__ : List[Any] ):
_A : List[Any] = [[context] * 4 for context in examples[context_name]]
_A : Any = examples[question_header_name]
_A : Union[str, Any] = [
[f"{header} {examples[end][i]}" for end in ending_names] for i, header in enumerate(UpperCamelCase__ )
]
# Flatten out
_A : Dict = list(chain(*UpperCamelCase__ ) )
_A : List[Any] = list(chain(*UpperCamelCase__ ) )
# Tokenize
_A : str = tokenizer(
UpperCamelCase__ , UpperCamelCase__ , truncation=UpperCamelCase__ , max_length=UpperCamelCase__ , padding="max_length" if data_args.pad_to_max_length else False , )
# Un-flatten
return {k: [v[i : i + 4] for i in range(0 , len(UpperCamelCase__ ) , 4 )] for k, v in tokenized_examples.items()}
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError("--do_train requires a train dataset" )
_A : Optional[int] = raw_datasets["train"]
if data_args.max_train_samples is not None:
_A : Union[str, Any] = min(len(UpperCamelCase__ ) , data_args.max_train_samples )
_A : Any = train_dataset.select(range(UpperCamelCase__ ) )
with training_args.main_process_first(desc="train dataset map pre-processing" ):
_A : Optional[int] = train_dataset.map(
UpperCamelCase__ , batched=UpperCamelCase__ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
if training_args.do_eval:
if "validation" not in raw_datasets:
raise ValueError("--do_eval requires a validation dataset" )
_A : Optional[int] = raw_datasets["validation"]
if data_args.max_eval_samples is not None:
_A : str = min(len(UpperCamelCase__ ) , data_args.max_eval_samples )
_A : Dict = eval_dataset.select(range(UpperCamelCase__ ) )
with training_args.main_process_first(desc="validation dataset map pre-processing" ):
_A : List[str] = eval_dataset.map(
UpperCamelCase__ , batched=UpperCamelCase__ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
# Data collator
_A : str = (
default_data_collator
if data_args.pad_to_max_length
else DataCollatorForMultipleChoice(tokenizer=UpperCamelCase__ , pad_to_multiple_of=8 if training_args.fpaa else None )
)
# Metric
def compute_metrics(UpperCamelCase__ : Tuple ):
_A , _A : List[str] = eval_predictions
_A : Optional[int] = np.argmax(UpperCamelCase__ , axis=1 )
return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()}
# Initialize our Trainer
_A : List[str] = Trainer(
model=UpperCamelCase__ , args=UpperCamelCase__ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=UpperCamelCase__ , data_collator=UpperCamelCase__ , compute_metrics=UpperCamelCase__ , )
# Training
if training_args.do_train:
_A : Any = None
if training_args.resume_from_checkpoint is not None:
_A : Optional[int] = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
_A : int = last_checkpoint
_A : Any = trainer.train(resume_from_checkpoint=UpperCamelCase__ )
trainer.save_model() # Saves the tokenizer too for easy upload
_A : Optional[int] = train_result.metrics
_A : Tuple = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(UpperCamelCase__ )
)
_A : Tuple = min(UpperCamelCase__ , len(UpperCamelCase__ ) )
trainer.log_metrics("train" , UpperCamelCase__ )
trainer.save_metrics("train" , UpperCamelCase__ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("*** Evaluate ***" )
_A : List[Any] = trainer.evaluate()
_A : int = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(UpperCamelCase__ )
_A : Optional[Any] = min(UpperCamelCase__ , len(UpperCamelCase__ ) )
trainer.log_metrics("eval" , UpperCamelCase__ )
trainer.save_metrics("eval" , UpperCamelCase__ )
_A : Tuple = {
"finetuned_from": model_args.model_name_or_path,
"tasks": "multiple-choice",
"dataset_tags": "swag",
"dataset_args": "regular",
"dataset": "SWAG",
"language": "en",
}
if training_args.push_to_hub:
trainer.push_to_hub(**UpperCamelCase__ )
else:
trainer.create_model_card(**UpperCamelCase__ )
def _UpperCAmelCase (UpperCamelCase__ : Optional[int] ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 11 | 0 |
"""simple docstring"""
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, PerceiverTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
__UpperCamelCase = '''pt'''
elif is_tf_available():
__UpperCamelCase = '''tf'''
else:
__UpperCamelCase = '''jax'''
class UpperCamelCase ( lowerCAmelCase__ , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ = PerceiverTokenizer
SCREAMING_SNAKE_CASE_ = False
def a_ ( self) -> Union[str, Any]:
super().setUp()
snake_case_ = PerceiverTokenizer()
tokenizer.save_pretrained(self.tmpdirname)
@cached_property
def a_ ( self) -> Union[str, Any]:
return PerceiverTokenizer.from_pretrained('deepmind/language-perceiver')
def a_ ( self, **lowerCAmelCase__) -> PerceiverTokenizer:
return self.tokenizer_class.from_pretrained(self.tmpdirname, **lowerCAmelCase__)
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__=False, lowerCAmelCase__=20, lowerCAmelCase__=5) -> Tuple[str, list]:
# XXX The default common tokenizer tests assume that every ID is decodable on its own.
# This assumption is invalid for Perceiver because single bytes might not be
# valid utf-8 (byte 128 for instance).
# Here we're overriding the smallest possible method to provide
# a clean sequence without making the same assumption.
snake_case_ = []
for i in range(len(lowerCAmelCase__)):
try:
snake_case_ = tokenizer.decode([i], clean_up_tokenization_spaces=lowerCAmelCase__)
except UnicodeDecodeError:
pass
toks.append((i, tok))
snake_case_ = list(filter(lambda lowerCAmelCase__: re.match(R'^[ a-zA-Z]+$', t[1]), lowerCAmelCase__))
snake_case_ = list(filter(lambda lowerCAmelCase__: [t[0]] == tokenizer.encode(t[1], add_special_tokens=lowerCAmelCase__), lowerCAmelCase__))
if max_length is not None and len(lowerCAmelCase__) > max_length:
snake_case_ = toks[:max_length]
if min_length is not None and len(lowerCAmelCase__) < min_length and len(lowerCAmelCase__) > 0:
while len(lowerCAmelCase__) < min_length:
snake_case_ = toks + toks
# toks_str = [t[1] for t in toks]
snake_case_ = [t[0] for t in toks]
# Ensure consistency
snake_case_ = tokenizer.decode(lowerCAmelCase__, clean_up_tokenization_spaces=lowerCAmelCase__)
if " " not in output_txt and len(lowerCAmelCase__) > 1:
snake_case_ = (
tokenizer.decode([toks_ids[0]], clean_up_tokenization_spaces=lowerCAmelCase__)
+ ' '
+ tokenizer.decode(toks_ids[1:], clean_up_tokenization_spaces=lowerCAmelCase__)
)
if with_prefix_space:
snake_case_ = ' ' + output_txt
snake_case_ = tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__)
return output_txt, output_ids
def a_ ( self) -> Tuple:
snake_case_ = self.perceiver_tokenizer
snake_case_ = 'Unicode €.'
snake_case_ = tokenizer(lowerCAmelCase__)
snake_case_ = [4, 91, 116, 111, 105, 117, 106, 107, 38, 232, 136, 178, 52, 5]
self.assertEqual(encoded['input_ids'], lowerCAmelCase__)
# decoding
snake_case_ = tokenizer.decode(lowerCAmelCase__)
self.assertEqual(lowerCAmelCase__, '[CLS]Unicode €.[SEP]')
snake_case_ = tokenizer('e è é ê ë')
snake_case_ = [4, 107, 38, 201, 174, 38, 201, 175, 38, 201, 176, 38, 201, 177, 5]
self.assertEqual(encoded['input_ids'], lowerCAmelCase__)
# decoding
snake_case_ = tokenizer.decode(lowerCAmelCase__)
self.assertEqual(lowerCAmelCase__, '[CLS]e è é ê ë[SEP]')
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('e è é ê ë')), '[CLS]e è é ê ë[SEP]')
def a_ ( self) -> List[str]:
snake_case_ = self.perceiver_tokenizer
snake_case_ = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
# fmt: off
snake_case_ = [4, 71, 38, 114, 117, 116, 109, 38, 118, 103, 120, 103, 109, 120, 103, 118, 110, 38, 108, 117, 120, 38, 121, 123, 115, 115, 103, 120, 111, 128, 103, 122, 111, 117, 116, 52, 5, 0]
# fmt: on
snake_case_ = tokenizer(lowerCAmelCase__, padding=lowerCAmelCase__, return_tensors=lowerCAmelCase__)
self.assertIsInstance(lowerCAmelCase__, lowerCAmelCase__)
if FRAMEWORK != "jax":
snake_case_ = list(batch.input_ids.numpy()[0])
else:
snake_case_ = list(batch.input_ids.tolist()[0])
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
self.assertEqual((2, 38), batch.input_ids.shape)
self.assertEqual((2, 38), batch.attention_mask.shape)
def a_ ( self) -> Optional[int]:
snake_case_ = self.perceiver_tokenizer
snake_case_ = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
snake_case_ = tokenizer(lowerCAmelCase__, padding=lowerCAmelCase__, return_tensors=lowerCAmelCase__)
# check if input_ids are returned and no decoder_input_ids
self.assertIn('input_ids', lowerCAmelCase__)
self.assertIn('attention_mask', lowerCAmelCase__)
self.assertNotIn('decoder_input_ids', lowerCAmelCase__)
self.assertNotIn('decoder_attention_mask', lowerCAmelCase__)
def a_ ( self) -> List[str]:
snake_case_ = self.perceiver_tokenizer
snake_case_ = [
'Summary of the text.',
'Another summary.',
]
snake_case_ = tokenizer(
text_target=lowerCAmelCase__, max_length=32, padding='max_length', truncation=lowerCAmelCase__, return_tensors=lowerCAmelCase__)
self.assertEqual(32, targets['input_ids'].shape[1])
def a_ ( self) -> Optional[int]:
# safety check on max_len default value so we are sure the test works
snake_case_ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}'):
self.assertNotEqual(tokenizer.model_max_length, 42)
# Now let's start the test
snake_case_ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}'):
# Isolate this from the other tests because we save additional tokens/etc
snake_case_ = tempfile.mkdtemp()
snake_case_ = ' He is very happy, UNwant\u00E9d,running'
snake_case_ = tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__)
tokenizer.save_pretrained(lowerCAmelCase__)
snake_case_ = tokenizer.__class__.from_pretrained(lowerCAmelCase__)
snake_case_ = after_tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
shutil.rmtree(lowerCAmelCase__)
snake_case_ = self.get_tokenizers(model_max_length=42)
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}'):
# Isolate this from the other tests because we save additional tokens/etc
snake_case_ = tempfile.mkdtemp()
snake_case_ = ' He is very happy, UNwant\u00E9d,running'
tokenizer.add_tokens(['bim', 'bambam'])
snake_case_ = tokenizer.additional_special_tokens
additional_special_tokens.append('new_additional_special_token')
tokenizer.add_special_tokens({'additional_special_tokens': additional_special_tokens})
snake_case_ = tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__)
tokenizer.save_pretrained(lowerCAmelCase__)
snake_case_ = tokenizer.__class__.from_pretrained(lowerCAmelCase__)
snake_case_ = after_tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
self.assertIn('new_additional_special_token', after_tokenizer.additional_special_tokens)
self.assertEqual(after_tokenizer.model_max_length, 42)
snake_case_ = tokenizer.__class__.from_pretrained(lowerCAmelCase__, model_max_length=43)
self.assertEqual(tokenizer.model_max_length, 43)
shutil.rmtree(lowerCAmelCase__)
def a_ ( self) -> Any:
snake_case_ = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()))
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()))
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(lowerCAmelCase__)
with open(os.path.join(lowerCAmelCase__, 'special_tokens_map.json'), encoding='utf-8') as json_file:
snake_case_ = json.load(lowerCAmelCase__)
with open(os.path.join(lowerCAmelCase__, 'tokenizer_config.json'), encoding='utf-8') as json_file:
snake_case_ = json.load(lowerCAmelCase__)
snake_case_ = [f'<extra_id_{i}>' for i in range(125)]
snake_case_ = added_tokens_extra_ids + [
'an_additional_special_token'
]
snake_case_ = added_tokens_extra_ids + [
'an_additional_special_token'
]
with open(os.path.join(lowerCAmelCase__, 'special_tokens_map.json'), 'w', encoding='utf-8') as outfile:
json.dump(lowerCAmelCase__, lowerCAmelCase__)
with open(os.path.join(lowerCAmelCase__, 'tokenizer_config.json'), 'w', encoding='utf-8') as outfile:
json.dump(lowerCAmelCase__, lowerCAmelCase__)
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
snake_case_ = tokenizer_class.from_pretrained(
lowerCAmelCase__, )
self.assertIn(
'an_additional_special_token', tokenizer_without_change_in_init.additional_special_tokens)
self.assertEqual(
['an_additional_special_token'], tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['an_additional_special_token'])), )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
snake_case_ = added_tokens_extra_ids + [AddedToken('a_new_additional_special_token', lstrip=lowerCAmelCase__)]
snake_case_ = tokenizer_class.from_pretrained(
lowerCAmelCase__, additional_special_tokens=lowerCAmelCase__, )
self.assertIn('a_new_additional_special_token', tokenizer.additional_special_tokens)
self.assertEqual(
['a_new_additional_special_token'], tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['a_new_additional_special_token'])), )
def a_ ( self) -> Optional[Any]:
snake_case_ = self.perceiver_tokenizer
self.assertEqual(tokenizer.decode([178]), '�')
def a_ ( self) -> Optional[Any]:
pass
def a_ ( self) -> List[str]:
pass
def a_ ( self) -> Union[str, Any]:
pass
def a_ ( self) -> List[Any]:
pass
def a_ ( self) -> Optional[Any]:
# The default common tokenizer tests uses invalid tokens for Perceiver that can only accept one-character
# strings and special added tokens as tokens
snake_case_ = self.get_tokenizers(fast=lowerCAmelCase__, do_lower_case=lowerCAmelCase__)
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}'):
snake_case_ = ['[CLS]', 't', 'h', 'i', 's', ' ', 'i', 's', ' ', 'a', ' ', 't', 'e', 's', 't', '[SEP]']
snake_case_ = tokenizer.convert_tokens_to_string(lowerCAmelCase__)
self.assertIsInstance(lowerCAmelCase__, lowerCAmelCase__)
| 69 |
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("TEST_SAGEMAKER" , "False")) is not True , reason="Skipping test because should only be run when releasing minor transformers version" , )
@pytest.mark.usefixtures("sm_env")
@parameterized_class(
[
{
"framework": "pytorch",
"script": "run_glue.py",
"model_name_or_path": "distilbert-base-cased",
"instance_type": "ml.p3.16xlarge",
"results": {"train_runtime": 650, "eval_accuracy": 0.7, "eval_loss": 0.6},
},
{
"framework": "pytorch",
"script": "run_ddp.py",
"model_name_or_path": "distilbert-base-cased",
"instance_type": "ml.p3.16xlarge",
"results": {"train_runtime": 600, "eval_accuracy": 0.7, "eval_loss": 0.6},
},
{
"framework": "tensorflow",
"script": "run_tf_dist.py",
"model_name_or_path": "distilbert-base-cased",
"instance_type": "ml.p3.16xlarge",
"results": {"train_runtime": 600, "eval_accuracy": 0.6, "eval_loss": 0.7},
},
])
class lowerCAmelCase__ ( unittest.TestCase):
'''simple docstring'''
def _lowerCamelCase ( self) -> str:
if self.framework == "pytorch":
subprocess.run(
F"cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py".split() , encoding="utf-8" , check=__lowerCamelCase , )
assert hasattr(self , "env")
def _lowerCamelCase ( self , __lowerCamelCase) -> Tuple:
_A : Dict = F"{self.env.base_job_name}-{instance_count}-{'ddp' if 'ddp' in self.script else 'smd'}"
# distributed data settings
_A : Optional[Any] = {"smdistributed": {"dataparallel": {"enabled": True}}} if self.script != "run_ddp.py" else None
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=__lowerCamelCase , instance_count=__lowerCamelCase , instance_type=self.instance_type , debugger_hook_config=__lowerCamelCase , hyperparameters={**self.env.distributed_hyperparameters, "model_name_or_path": self.model_name_or_path} , metric_definitions=self.env.metric_definitions , distribution=__lowerCamelCase , py_version="py36" , )
def _lowerCamelCase ( self , __lowerCamelCase) -> Optional[Any]:
TrainingJobAnalytics(__lowerCamelCase).export_csv(F"{self.env.test_path}/{job_name}_metrics.csv")
@parameterized.expand([(2,)])
def _lowerCamelCase ( self , __lowerCamelCase) -> Any:
# create estimator
_A : Union[str, Any] = self.create_estimator(__lowerCamelCase)
# run training
estimator.fit()
# result dataframe
_A : Optional[Any] = TrainingJobAnalytics(estimator.latest_training_job.name).dataframe()
# extract kpis
_A : List[Any] = list(result_metrics_df[result_metrics_df.metric_name == "eval_accuracy"]["value"])
_A : Dict = list(result_metrics_df[result_metrics_df.metric_name == "eval_loss"]["value"])
# get train time from SageMaker job, this includes starting, preprocessing, stopping
_A : Optional[Any] = (
Session().describe_training_job(estimator.latest_training_job.name).get("TrainingTimeInSeconds" , 9_9_9_9_9_9)
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["eval_accuracy"] for t in eval_accuracy)
assert all(t <= self.results["eval_loss"] for t in eval_loss)
# dump tests result into json file to share in PR
with open(F"{estimator.latest_training_job.name}.json" , "w") as outfile:
json.dump({"train_time": train_runtime, "eval_accuracy": eval_accuracy, "eval_loss": eval_loss} , __lowerCamelCase)
| 11 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
A__ : int ={
'''configuration_funnel''': ['''FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FunnelConfig'''],
'''convert_funnel_original_tf_checkpoint_to_pytorch''': [],
'''tokenization_funnel''': ['''FunnelTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : List[str] =['''FunnelTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : List[Any] =[
'''FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FunnelBaseModel''',
'''FunnelForMaskedLM''',
'''FunnelForMultipleChoice''',
'''FunnelForPreTraining''',
'''FunnelForQuestionAnswering''',
'''FunnelForSequenceClassification''',
'''FunnelForTokenClassification''',
'''FunnelModel''',
'''FunnelPreTrainedModel''',
'''load_tf_weights_in_funnel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Optional[Any] =[
'''TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFFunnelBaseModel''',
'''TFFunnelForMaskedLM''',
'''TFFunnelForMultipleChoice''',
'''TFFunnelForPreTraining''',
'''TFFunnelForQuestionAnswering''',
'''TFFunnelForSequenceClassification''',
'''TFFunnelForTokenClassification''',
'''TFFunnelModel''',
'''TFFunnelPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_funnel import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP, FunnelConfig
from .tokenization_funnel import FunnelTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_funnel_fast import FunnelTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_funnel import (
FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
FunnelBaseModel,
FunnelForMaskedLM,
FunnelForMultipleChoice,
FunnelForPreTraining,
FunnelForQuestionAnswering,
FunnelForSequenceClassification,
FunnelForTokenClassification,
FunnelModel,
FunnelPreTrainedModel,
load_tf_weights_in_funnel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_funnel import (
TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
TFFunnelPreTrainedModel,
)
else:
import sys
A__ : Dict =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 70 |
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class lowerCAmelCase__ ( a):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = ["image_processor", "tokenizer"]
__SCREAMING_SNAKE_CASE = "OwlViTImageProcessor"
__SCREAMING_SNAKE_CASE = ("CLIPTokenizer", "CLIPTokenizerFast")
def __init__( self , __lowerCamelCase=None , __lowerCamelCase=None , **__lowerCamelCase) -> Union[str, Any]:
_A : int = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , __lowerCamelCase , )
_A : List[Any] = kwargs.pop("feature_extractor")
_A : Dict = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`.")
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`.")
super().__init__(__lowerCamelCase , __lowerCamelCase)
def __call__( self , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase="max_length" , __lowerCamelCase="np" , **__lowerCamelCase) -> Any:
if text is None and query_images is None and images is None:
raise ValueError(
"You have to specify at least one text or query image or image. All three cannot be none.")
if text is not None:
if isinstance(__lowerCamelCase , __lowerCamelCase) or (isinstance(__lowerCamelCase , __lowerCamelCase) and not isinstance(text[0] , __lowerCamelCase)):
_A : Union[str, Any] = [self.tokenizer(__lowerCamelCase , padding=__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase)]
elif isinstance(__lowerCamelCase , __lowerCamelCase) and isinstance(text[0] , __lowerCamelCase):
_A : Optional[Any] = []
# Maximum number of queries across batch
_A : str = max([len(__lowerCamelCase) for t in text])
# Pad all batch samples to max number of text queries
for t in text:
if len(__lowerCamelCase) != max_num_queries:
_A : Optional[int] = t + [" "] * (max_num_queries - len(__lowerCamelCase))
_A : List[Any] = self.tokenizer(__lowerCamelCase , padding=__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase)
encodings.append(__lowerCamelCase)
else:
raise TypeError("Input text should be a string, a list of strings or a nested list of strings")
if return_tensors == "np":
_A : Tuple = np.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0)
_A : Optional[Any] = np.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0)
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
_A : Optional[int] = jnp.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0)
_A : Optional[int] = jnp.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0)
elif return_tensors == "pt" and is_torch_available():
import torch
_A : Optional[Any] = torch.cat([encoding["input_ids"] for encoding in encodings] , dim=0)
_A : Union[str, Any] = torch.cat([encoding["attention_mask"] for encoding in encodings] , dim=0)
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
_A : Any = tf.stack([encoding["input_ids"] for encoding in encodings] , axis=0)
_A : Tuple = tf.stack([encoding["attention_mask"] for encoding in encodings] , axis=0)
else:
raise ValueError("Target return tensor type could not be returned")
_A : Optional[Any] = BatchEncoding()
_A : Tuple = input_ids
_A : Dict = attention_mask
if query_images is not None:
_A : Optional[Any] = BatchEncoding()
_A : List[str] = self.image_processor(
__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase).pixel_values
_A : Union[str, Any] = query_pixel_values
if images is not None:
_A : int = self.image_processor(__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase)
if text is not None and images is not None:
_A : Tuple = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
_A : int = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**__lowerCamelCase) , tensor_type=__lowerCamelCase)
def _lowerCamelCase ( self , *__lowerCamelCase , **__lowerCamelCase) -> str:
return self.image_processor.post_process(*__lowerCamelCase , **__lowerCamelCase)
def _lowerCamelCase ( self , *__lowerCamelCase , **__lowerCamelCase) -> List[str]:
return self.image_processor.post_process_object_detection(*__lowerCamelCase , **__lowerCamelCase)
def _lowerCamelCase ( self , *__lowerCamelCase , **__lowerCamelCase) -> Optional[int]:
return self.image_processor.post_process_image_guided_detection(*__lowerCamelCase , **__lowerCamelCase)
def _lowerCamelCase ( self , *__lowerCamelCase , **__lowerCamelCase) -> int:
return self.tokenizer.batch_decode(*__lowerCamelCase , **__lowerCamelCase)
def _lowerCamelCase ( self , *__lowerCamelCase , **__lowerCamelCase) -> Optional[int]:
return self.tokenizer.decode(*__lowerCamelCase , **__lowerCamelCase)
@property
def _lowerCamelCase ( self) -> int:
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , __lowerCamelCase , )
return self.image_processor_class
@property
def _lowerCamelCase ( self) -> List[str]:
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , __lowerCamelCase , )
return self.image_processor
| 11 | 0 |
import os
from distutils.util import strtobool
def A ( a_ ,a_ ) -> Dict:
for e in env_keys:
__UpperCamelCase : Union[str, Any] =int(os.environ.get(a_ ,-1 ) )
if val >= 0:
return val
return default
def A ( a_ ,a_=False ) -> Tuple:
__UpperCamelCase : Tuple =os.environ.get(a_ ,str(a_ ) )
return strtobool(a_ ) == 1 # As its name indicates `strtobool` actually returns an int...
def A ( a_ ,a_="no" ) -> int:
__UpperCamelCase : Any =os.environ.get(a_ ,str(a_ ) )
return value
| 71 |
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_torch_available
from transformers.testing_utils import require_torch, torch_device
if is_torch_available():
from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments
@require_torch
class lowerCAmelCase__ ( unittest.TestCase):
'''simple docstring'''
def _lowerCamelCase ( self , __lowerCamelCase) -> Dict:
for model_result in results.values():
for batch_size, sequence_length in zip(model_result["bs"] , model_result["ss"]):
_A : Optional[int] = model_result["result"][batch_size][sequence_length]
self.assertIsNotNone(__lowerCamelCase)
def _lowerCamelCase ( self) -> int:
_A : Optional[int] = "sshleifer/tiny-gpt2"
_A : int = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , )
_A : List[str] = PyTorchBenchmark(__lowerCamelCase)
_A : Optional[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def _lowerCamelCase ( self) -> Dict:
_A : int = "sgugger/tiny-distilbert-classification"
_A : str = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , only_pretrain_model=__lowerCamelCase , )
_A : Dict = PyTorchBenchmark(__lowerCamelCase)
_A : Union[str, Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def _lowerCamelCase ( self) -> Optional[Any]:
_A : Tuple = "sshleifer/tiny-gpt2"
_A : Optional[Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , torchscript=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , )
_A : Union[str, Any] = PyTorchBenchmark(__lowerCamelCase)
_A : List[str] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
@unittest.skipIf(torch_device == "cpu" , "Cant do half precision")
def _lowerCamelCase ( self) -> int:
_A : Any = "sshleifer/tiny-gpt2"
_A : Tuple = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , fpaa=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , )
_A : Any = PyTorchBenchmark(__lowerCamelCase)
_A : Union[str, Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def _lowerCamelCase ( self) -> Any:
_A : Union[str, Any] = "sshleifer/tiny-gpt2"
_A : Any = AutoConfig.from_pretrained(__lowerCamelCase)
# set architectures equal to `None`
_A : Dict = None
_A : Any = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , )
_A : Union[str, Any] = PyTorchBenchmark(__lowerCamelCase , configs=[config])
_A : int = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def _lowerCamelCase ( self) -> int:
_A : List[Any] = "sshleifer/tiny-gpt2"
_A : int = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , )
_A : Optional[Any] = PyTorchBenchmark(__lowerCamelCase)
_A : int = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result)
self.check_results_dict_not_empty(results.memory_train_result)
@unittest.skipIf(torch_device == "cpu" , "Can't do half precision")
def _lowerCamelCase ( self) -> Optional[Any]:
_A : Any = "sshleifer/tiny-gpt2"
_A : Tuple = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , fpaa=__lowerCamelCase , multi_process=__lowerCamelCase , )
_A : List[Any] = PyTorchBenchmark(__lowerCamelCase)
_A : Tuple = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result)
self.check_results_dict_not_empty(results.memory_train_result)
def _lowerCamelCase ( self) -> str:
_A : List[str] = "sshleifer/tiny-gpt2"
_A : Union[str, Any] = AutoConfig.from_pretrained(__lowerCamelCase)
_A : Any = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , )
_A : Optional[Any] = PyTorchBenchmark(__lowerCamelCase , configs=[config])
_A : Tuple = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def _lowerCamelCase ( self) -> int:
_A : Tuple = "sshleifer/tinier_bart"
_A : Optional[Any] = AutoConfig.from_pretrained(__lowerCamelCase)
_A : Optional[int] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , )
_A : Dict = PyTorchBenchmark(__lowerCamelCase , configs=[config])
_A : Optional[int] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def _lowerCamelCase ( self) -> str:
_A : List[Any] = "sshleifer/tiny-gpt2"
_A : Optional[Any] = AutoConfig.from_pretrained(__lowerCamelCase)
_A : Optional[Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , )
_A : List[str] = PyTorchBenchmark(__lowerCamelCase , configs=[config])
_A : Dict = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result)
self.check_results_dict_not_empty(results.memory_train_result)
def _lowerCamelCase ( self) -> int:
_A : int = "sshleifer/tinier_bart"
_A : str = AutoConfig.from_pretrained(__lowerCamelCase)
_A : Tuple = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , )
_A : Tuple = PyTorchBenchmark(__lowerCamelCase , configs=[config])
_A : Union[str, Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result)
self.check_results_dict_not_empty(results.memory_train_result)
def _lowerCamelCase ( self) -> Dict:
_A : List[str] = "sshleifer/tiny-gpt2"
with tempfile.TemporaryDirectory() as tmp_dir:
_A : Optional[Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , save_to_csv=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(__lowerCamelCase , "inf_time.csv") , train_memory_csv_file=os.path.join(__lowerCamelCase , "train_mem.csv") , inference_memory_csv_file=os.path.join(__lowerCamelCase , "inf_mem.csv") , train_time_csv_file=os.path.join(__lowerCamelCase , "train_time.csv") , env_info_csv_file=os.path.join(__lowerCamelCase , "env.csv") , multi_process=__lowerCamelCase , )
_A : Tuple = PyTorchBenchmark(__lowerCamelCase)
benchmark.run()
self.assertTrue(Path(os.path.join(__lowerCamelCase , "inf_time.csv")).exists())
self.assertTrue(Path(os.path.join(__lowerCamelCase , "train_time.csv")).exists())
self.assertTrue(Path(os.path.join(__lowerCamelCase , "inf_mem.csv")).exists())
self.assertTrue(Path(os.path.join(__lowerCamelCase , "train_mem.csv")).exists())
self.assertTrue(Path(os.path.join(__lowerCamelCase , "env.csv")).exists())
def _lowerCamelCase ( self) -> int:
_A : Dict = "sshleifer/tiny-gpt2"
def _check_summary_is_not_empty(__lowerCamelCase):
self.assertTrue(hasattr(__lowerCamelCase , "sequential"))
self.assertTrue(hasattr(__lowerCamelCase , "cumulative"))
self.assertTrue(hasattr(__lowerCamelCase , "current"))
self.assertTrue(hasattr(__lowerCamelCase , "total"))
with tempfile.TemporaryDirectory() as tmp_dir:
_A : Union[str, Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(__lowerCamelCase , "log.txt") , log_print=__lowerCamelCase , trace_memory_line_by_line=__lowerCamelCase , multi_process=__lowerCamelCase , )
_A : Optional[int] = PyTorchBenchmark(__lowerCamelCase)
_A : Dict = benchmark.run()
_check_summary_is_not_empty(result.inference_summary)
_check_summary_is_not_empty(result.train_summary)
self.assertTrue(Path(os.path.join(__lowerCamelCase , "log.txt")).exists())
| 11 | 0 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from diffusers import DDIMScheduler, KandinskyVaaPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.utils import floats_tensor, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __snake_case ( _lowercase , unittest.TestCase):
snake_case__ : Any = KandinskyVaaPipeline
snake_case__ : List[str] = [
"image_embeds",
"negative_image_embeds",
]
snake_case__ : Optional[int] = ["image_embeds", "negative_image_embeds"]
snake_case__ : Any = [
"generator",
"height",
"width",
"latents",
"guidance_scale",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
snake_case__ : Union[str, Any] = False
@property
def SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
return 3_2
@property
def SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
return 3_2
@property
def SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
return self.time_input_dim
@property
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
return self.time_input_dim * 4
@property
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
return 1_0_0
@property
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
torch.manual_seed(0 )
_lowerCamelCase : Any = {
'''in_channels''': 4,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''image''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
_lowerCamelCase : Tuple = UNetaDConditionModel(**__lowerCAmelCase )
return model
@property
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
return {
"block_out_channels": [3_2, 6_4],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 1_2,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
torch.manual_seed(0 )
_lowerCamelCase : Any = VQModel(**self.dummy_movq_kwargs )
return model
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = self.dummy_unet
_lowerCamelCase : Optional[Any] = self.dummy_movq
_lowerCamelCase : Optional[int] = DDIMScheduler(
num_train_timesteps=1_0_0_0 , beta_schedule='''linear''' , beta_start=0.0_00_85 , beta_end=0.0_12 , clip_sample=__lowerCAmelCase , set_alpha_to_one=__lowerCAmelCase , steps_offset=1 , prediction_type='''epsilon''' , thresholding=__lowerCAmelCase , )
_lowerCamelCase : Optional[int] = {
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[Any]=0 ):
"""simple docstring"""
_lowerCamelCase : Tuple = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(__lowerCAmelCase ) ).to(__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
__lowerCAmelCase )
if str(__lowerCAmelCase ).startswith('''mps''' ):
_lowerCamelCase : Union[str, Any] = torch.manual_seed(__lowerCAmelCase )
else:
_lowerCamelCase : Any = torch.Generator(device=__lowerCAmelCase ).manual_seed(__lowerCAmelCase )
_lowerCamelCase : List[str] = {
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''generator''': generator,
'''height''': 6_4,
'''width''': 6_4,
'''guidance_scale''': 4.0,
'''num_inference_steps''': 2,
'''output_type''': '''np''',
}
return inputs
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = '''cpu'''
_lowerCamelCase : Any = self.get_dummy_components()
_lowerCamelCase : Union[str, Any] = self.pipeline_class(**__lowerCAmelCase )
_lowerCamelCase : Tuple = pipe.to(__lowerCAmelCase )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
_lowerCamelCase : Optional[int] = pipe(**self.get_dummy_inputs(__lowerCAmelCase ) )
_lowerCamelCase : List[Any] = output.images
_lowerCamelCase : str = pipe(
**self.get_dummy_inputs(__lowerCAmelCase ) , return_dict=__lowerCAmelCase , )[0]
_lowerCamelCase : Any = image[0, -3:, -3:, -1]
_lowerCamelCase : Optional[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
_lowerCamelCase : int = np.array(
[0.6_23_79_76, 1.0, 0.36_44_13_32, 1.0, 0.70_63_96_34, 0.29_87_71_86, 0.85_65_21_25, 0.5_21_68_43, 0.54_45_40_46] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), f''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), f''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class __snake_case ( unittest.TestCase):
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
_lowerCamelCase : List[Any] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/kandinskyv22_text2img_cat_fp16.npy''' )
_lowerCamelCase : Dict = KandinskyVaaPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(__lowerCAmelCase )
_lowerCamelCase : Any = KandinskyVaaPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-decoder''' , torch_dtype=torch.floataa )
_lowerCamelCase : int = pipeline.to(__lowerCAmelCase )
pipeline.set_progress_bar_config(disable=__lowerCAmelCase )
_lowerCamelCase : int = '''red cat, 4k photo'''
_lowerCamelCase : Any = torch.Generator(device='''cuda''' ).manual_seed(0 )
_lowerCamelCase , _lowerCamelCase : int = pipe_prior(
__lowerCAmelCase , generator=__lowerCAmelCase , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple()
_lowerCamelCase : List[Any] = torch.Generator(device='''cuda''' ).manual_seed(0 )
_lowerCamelCase : Dict = pipeline(
image_embeds=__lowerCAmelCase , negative_image_embeds=__lowerCAmelCase , generator=__lowerCAmelCase , num_inference_steps=1_0_0 , output_type='''np''' , )
_lowerCamelCase : Optional[int] = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert_mean_pixel_difference(__lowerCAmelCase , __lowerCAmelCase )
| 72 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_nllb import NllbTokenizer
else:
lowerCAmelCase__ = None
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'}
lowerCAmelCase__ = {
'vocab_file': {
'facebook/nllb-200-distilled-600M': (
'https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model'
),
},
'tokenizer_file': {
'facebook/nllb-200-distilled-600M': (
'https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json'
),
},
}
lowerCAmelCase__ = {
'facebook/nllb-large-en-ro': 10_24,
'facebook/nllb-200-distilled-600M': 10_24,
}
# fmt: off
lowerCAmelCase__ = ['ace_Arab', 'ace_Latn', 'acm_Arab', 'acq_Arab', 'aeb_Arab', 'afr_Latn', 'ajp_Arab', 'aka_Latn', 'amh_Ethi', 'apc_Arab', 'arb_Arab', 'ars_Arab', 'ary_Arab', 'arz_Arab', 'asm_Beng', 'ast_Latn', 'awa_Deva', 'ayr_Latn', 'azb_Arab', 'azj_Latn', 'bak_Cyrl', 'bam_Latn', 'ban_Latn', 'bel_Cyrl', 'bem_Latn', 'ben_Beng', 'bho_Deva', 'bjn_Arab', 'bjn_Latn', 'bod_Tibt', 'bos_Latn', 'bug_Latn', 'bul_Cyrl', 'cat_Latn', 'ceb_Latn', 'ces_Latn', 'cjk_Latn', 'ckb_Arab', 'crh_Latn', 'cym_Latn', 'dan_Latn', 'deu_Latn', 'dik_Latn', 'dyu_Latn', 'dzo_Tibt', 'ell_Grek', 'eng_Latn', 'epo_Latn', 'est_Latn', 'eus_Latn', 'ewe_Latn', 'fao_Latn', 'pes_Arab', 'fij_Latn', 'fin_Latn', 'fon_Latn', 'fra_Latn', 'fur_Latn', 'fuv_Latn', 'gla_Latn', 'gle_Latn', 'glg_Latn', 'grn_Latn', 'guj_Gujr', 'hat_Latn', 'hau_Latn', 'heb_Hebr', 'hin_Deva', 'hne_Deva', 'hrv_Latn', 'hun_Latn', 'hye_Armn', 'ibo_Latn', 'ilo_Latn', 'ind_Latn', 'isl_Latn', 'ita_Latn', 'jav_Latn', 'jpn_Jpan', 'kab_Latn', 'kac_Latn', 'kam_Latn', 'kan_Knda', 'kas_Arab', 'kas_Deva', 'kat_Geor', 'knc_Arab', 'knc_Latn', 'kaz_Cyrl', 'kbp_Latn', 'kea_Latn', 'khm_Khmr', 'kik_Latn', 'kin_Latn', 'kir_Cyrl', 'kmb_Latn', 'kon_Latn', 'kor_Hang', 'kmr_Latn', 'lao_Laoo', 'lvs_Latn', 'lij_Latn', 'lim_Latn', 'lin_Latn', 'lit_Latn', 'lmo_Latn', 'ltg_Latn', 'ltz_Latn', 'lua_Latn', 'lug_Latn', 'luo_Latn', 'lus_Latn', 'mag_Deva', 'mai_Deva', 'mal_Mlym', 'mar_Deva', 'min_Latn', 'mkd_Cyrl', 'plt_Latn', 'mlt_Latn', 'mni_Beng', 'khk_Cyrl', 'mos_Latn', 'mri_Latn', 'zsm_Latn', 'mya_Mymr', 'nld_Latn', 'nno_Latn', 'nob_Latn', 'npi_Deva', 'nso_Latn', 'nus_Latn', 'nya_Latn', 'oci_Latn', 'gaz_Latn', 'ory_Orya', 'pag_Latn', 'pan_Guru', 'pap_Latn', 'pol_Latn', 'por_Latn', 'prs_Arab', 'pbt_Arab', 'quy_Latn', 'ron_Latn', 'run_Latn', 'rus_Cyrl', 'sag_Latn', 'san_Deva', 'sat_Beng', 'scn_Latn', 'shn_Mymr', 'sin_Sinh', 'slk_Latn', 'slv_Latn', 'smo_Latn', 'sna_Latn', 'snd_Arab', 'som_Latn', 'sot_Latn', 'spa_Latn', 'als_Latn', 'srd_Latn', 'srp_Cyrl', 'ssw_Latn', 'sun_Latn', 'swe_Latn', 'swh_Latn', 'szl_Latn', 'tam_Taml', 'tat_Cyrl', 'tel_Telu', 'tgk_Cyrl', 'tgl_Latn', 'tha_Thai', 'tir_Ethi', 'taq_Latn', 'taq_Tfng', 'tpi_Latn', 'tsn_Latn', 'tso_Latn', 'tuk_Latn', 'tum_Latn', 'tur_Latn', 'twi_Latn', 'tzm_Tfng', 'uig_Arab', 'ukr_Cyrl', 'umb_Latn', 'urd_Arab', 'uzn_Latn', 'vec_Latn', 'vie_Latn', 'war_Latn', 'wol_Latn', 'xho_Latn', 'ydd_Hebr', 'yor_Latn', 'yue_Hant', 'zho_Hans', 'zho_Hant', 'zul_Latn']
class lowerCAmelCase__ ( a):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE = ["input_ids", "attention_mask"]
__SCREAMING_SNAKE_CASE = NllbTokenizer
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = []
def __init__( self , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase="<s>" , __lowerCamelCase="</s>" , __lowerCamelCase="</s>" , __lowerCamelCase="<s>" , __lowerCamelCase="<unk>" , __lowerCamelCase="<pad>" , __lowerCamelCase="<mask>" , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=False , **__lowerCamelCase , ) -> Tuple:
# Mask token behave like a normal word, i.e. include the space before it
_A : Any = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase) if isinstance(__lowerCamelCase , __lowerCamelCase) else mask_token
_A : Optional[int] = legacy_behaviour
super().__init__(
vocab_file=__lowerCamelCase , tokenizer_file=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , sep_token=__lowerCamelCase , cls_token=__lowerCamelCase , unk_token=__lowerCamelCase , pad_token=__lowerCamelCase , mask_token=__lowerCamelCase , src_lang=__lowerCamelCase , tgt_lang=__lowerCamelCase , additional_special_tokens=__lowerCamelCase , legacy_behaviour=__lowerCamelCase , **__lowerCamelCase , )
_A : int = vocab_file
_A : Optional[Any] = False if not self.vocab_file else True
_A : Tuple = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens])
self.add_special_tokens({"additional_special_tokens": _additional_special_tokens})
_A : Union[str, Any] = {
lang_code: self.convert_tokens_to_ids(__lowerCamelCase) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
_A : Optional[int] = src_lang if src_lang is not None else "eng_Latn"
_A : Union[str, Any] = self.convert_tokens_to_ids(self._src_lang)
_A : List[str] = tgt_lang
self.set_src_lang_special_tokens(self._src_lang)
@property
def _lowerCamelCase ( self) -> str:
return self._src_lang
@src_lang.setter
def _lowerCamelCase ( self , __lowerCamelCase) -> None:
_A : Tuple = new_src_lang
self.set_src_lang_special_tokens(self._src_lang)
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = None) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = None) -> List[int]:
_A : Tuple = [self.sep_token_id]
_A : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase) -> Optional[int]:
if src_lang is None or tgt_lang is None:
raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model")
_A : List[Any] = src_lang
_A : Optional[int] = self(__lowerCamelCase , add_special_tokens=__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase)
_A : Tuple = self.convert_tokens_to_ids(__lowerCamelCase)
_A : Tuple = tgt_lang_id
return inputs
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = "eng_Latn" , __lowerCamelCase = None , __lowerCamelCase = "fra_Latn" , **__lowerCamelCase , ) -> BatchEncoding:
_A : Tuple = src_lang
_A : int = tgt_lang
return super().prepare_seqaseq_batch(__lowerCamelCase , __lowerCamelCase , **__lowerCamelCase)
def _lowerCamelCase ( self) -> str:
return self.set_src_lang_special_tokens(self.src_lang)
def _lowerCamelCase ( self) -> List[str]:
return self.set_tgt_lang_special_tokens(self.tgt_lang)
def _lowerCamelCase ( self , __lowerCamelCase) -> None:
_A : Dict = self.convert_tokens_to_ids(__lowerCamelCase)
if self.legacy_behaviour:
_A : List[str] = []
_A : Dict = [self.eos_token_id, self.cur_lang_code]
else:
_A : Tuple = [self.cur_lang_code]
_A : Optional[Any] = [self.eos_token_id]
_A : Optional[int] = self.convert_ids_to_tokens(self.prefix_tokens)
_A : int = self.convert_ids_to_tokens(self.suffix_tokens)
_A : List[Any] = processors.TemplateProcessing(
single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens)) , )
def _lowerCamelCase ( self , __lowerCamelCase) -> None:
_A : Optional[Any] = self.convert_tokens_to_ids(__lowerCamelCase)
if self.legacy_behaviour:
_A : Tuple = []
_A : Any = [self.eos_token_id, self.cur_lang_code]
else:
_A : Union[str, Any] = [self.cur_lang_code]
_A : str = [self.eos_token_id]
_A : Optional[Any] = self.convert_ids_to_tokens(self.prefix_tokens)
_A : Dict = self.convert_ids_to_tokens(self.suffix_tokens)
_A : Union[str, Any] = processors.TemplateProcessing(
single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens)) , )
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = None) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer.")
if not os.path.isdir(__lowerCamelCase):
logger.error(F"Vocabulary path ({save_directory}) should be a directory.")
return
_A : Dict = os.path.join(
__lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
if os.path.abspath(self.vocab_file) != os.path.abspath(__lowerCamelCase):
copyfile(self.vocab_file , __lowerCamelCase)
return (out_vocab_file,)
| 11 | 0 |
import collections
import inspect
import unittest
from transformers import SwinvaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel
from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class A_ :
def __init__( self : List[Any] ,SCREAMING_SNAKE_CASE__ : List[str] ,SCREAMING_SNAKE_CASE__ : List[str]=1_3 ,SCREAMING_SNAKE_CASE__ : List[str]=3_2 ,SCREAMING_SNAKE_CASE__ : Tuple=2 ,SCREAMING_SNAKE_CASE__ : str=3 ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=1_6 ,SCREAMING_SNAKE_CASE__ : List[str]=[1, 2, 1] ,SCREAMING_SNAKE_CASE__ : Tuple=[2, 2, 4] ,SCREAMING_SNAKE_CASE__ : Dict=2 ,SCREAMING_SNAKE_CASE__ : List[Any]=2.0 ,SCREAMING_SNAKE_CASE__ : Dict=True ,SCREAMING_SNAKE_CASE__ : Tuple=0.0 ,SCREAMING_SNAKE_CASE__ : Dict=0.0 ,SCREAMING_SNAKE_CASE__ : Optional[int]=0.1 ,SCREAMING_SNAKE_CASE__ : Union[str, Any]="gelu" ,SCREAMING_SNAKE_CASE__ : str=False ,SCREAMING_SNAKE_CASE__ : str=True ,SCREAMING_SNAKE_CASE__ : Dict=0.02 ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=1E-5 ,SCREAMING_SNAKE_CASE__ : Dict=True ,SCREAMING_SNAKE_CASE__ : Any=None ,SCREAMING_SNAKE_CASE__ : List[str]=True ,SCREAMING_SNAKE_CASE__ : List[str]=1_0 ,SCREAMING_SNAKE_CASE__ : Optional[int]=8 ,):
__lowerCamelCase : Optional[int] = parent
__lowerCamelCase : Union[str, Any] = batch_size
__lowerCamelCase : List[Any] = image_size
__lowerCamelCase : Optional[Any] = patch_size
__lowerCamelCase : int = num_channels
__lowerCamelCase : Dict = embed_dim
__lowerCamelCase : List[Any] = depths
__lowerCamelCase : Optional[Any] = num_heads
__lowerCamelCase : Tuple = window_size
__lowerCamelCase : Tuple = mlp_ratio
__lowerCamelCase : Dict = qkv_bias
__lowerCamelCase : int = hidden_dropout_prob
__lowerCamelCase : List[Any] = attention_probs_dropout_prob
__lowerCamelCase : Tuple = drop_path_rate
__lowerCamelCase : str = hidden_act
__lowerCamelCase : Optional[Any] = use_absolute_embeddings
__lowerCamelCase : str = patch_norm
__lowerCamelCase : Optional[int] = layer_norm_eps
__lowerCamelCase : Optional[Any] = initializer_range
__lowerCamelCase : int = is_training
__lowerCamelCase : str = scope
__lowerCamelCase : int = use_labels
__lowerCamelCase : int = type_sequence_label_size
__lowerCamelCase : int = encoder_stride
def lowerCAmelCase ( self : Optional[Any]):
__lowerCamelCase : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
__lowerCamelCase : int = None
if self.use_labels:
__lowerCamelCase : Optional[Any] = ids_tensor([self.batch_size] ,self.type_sequence_label_size)
__lowerCamelCase : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase ( self : int):
return SwinvaConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,embed_dim=self.embed_dim ,depths=self.depths ,num_heads=self.num_heads ,window_size=self.window_size ,mlp_ratio=self.mlp_ratio ,qkv_bias=self.qkv_bias ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,drop_path_rate=self.drop_path_rate ,hidden_act=self.hidden_act ,use_absolute_embeddings=self.use_absolute_embeddings ,path_norm=self.patch_norm ,layer_norm_eps=self.layer_norm_eps ,initializer_range=self.initializer_range ,encoder_stride=self.encoder_stride ,)
def lowerCAmelCase ( self : List[str] ,SCREAMING_SNAKE_CASE__ : Tuple ,SCREAMING_SNAKE_CASE__ : Any ,SCREAMING_SNAKE_CASE__ : str):
__lowerCamelCase : Tuple = SwinvaModel(config=SCREAMING_SNAKE_CASE__)
model.to(SCREAMING_SNAKE_CASE__)
model.eval()
__lowerCamelCase : Tuple = model(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Any = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths) - 1))
__lowerCamelCase : List[str] = int(config.embed_dim * 2 ** (len(config.depths) - 1))
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, expected_seq_len, expected_dim))
def lowerCAmelCase ( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : List[Any] ,SCREAMING_SNAKE_CASE__ : Optional[Any]):
__lowerCamelCase : List[str] = SwinvaForMaskedImageModeling(config=SCREAMING_SNAKE_CASE__)
model.to(SCREAMING_SNAKE_CASE__)
model.eval()
__lowerCamelCase : Optional[int] = model(SCREAMING_SNAKE_CASE__)
self.parent.assertEqual(
result.logits.shape ,(self.batch_size, self.num_channels, self.image_size, self.image_size))
# test greyscale images
__lowerCamelCase : Tuple = 1
__lowerCamelCase : Union[str, Any] = SwinvaForMaskedImageModeling(SCREAMING_SNAKE_CASE__)
model.to(SCREAMING_SNAKE_CASE__)
model.eval()
__lowerCamelCase : str = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
__lowerCamelCase : Dict = model(SCREAMING_SNAKE_CASE__)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, 1, self.image_size, self.image_size))
def lowerCAmelCase ( self : List[Any] ,SCREAMING_SNAKE_CASE__ : Optional[int] ,SCREAMING_SNAKE_CASE__ : str ,SCREAMING_SNAKE_CASE__ : str):
__lowerCamelCase : Optional[Any] = self.type_sequence_label_size
__lowerCamelCase : Optional[int] = SwinvaForImageClassification(SCREAMING_SNAKE_CASE__)
model.to(SCREAMING_SNAKE_CASE__)
model.eval()
__lowerCamelCase : int = model(SCREAMING_SNAKE_CASE__ ,labels=SCREAMING_SNAKE_CASE__)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size))
def lowerCAmelCase ( self : Optional[int]):
__lowerCamelCase : Any = self.prepare_config_and_inputs()
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Union[str, Any] = config_and_inputs
__lowerCamelCase : int = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class A_ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
_UpperCAmelCase : Optional[Any] = (
(SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else ()
)
_UpperCAmelCase : Tuple = (
{'''feature-extraction''': SwinvaModel, '''image-classification''': SwinvaForImageClassification}
if is_torch_available()
else {}
)
_UpperCAmelCase : Dict = False
_UpperCAmelCase : List[Any] = False
_UpperCAmelCase : Union[str, Any] = False
_UpperCAmelCase : Any = False
def lowerCAmelCase ( self : Union[str, Any]):
__lowerCamelCase : List[Any] = SwinvaModelTester(self)
__lowerCamelCase : Optional[int] = ConfigTester(self ,config_class=SCREAMING_SNAKE_CASE__ ,embed_dim=3_7)
def lowerCAmelCase ( self : int):
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCAmelCase ( self : Any):
__lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE__)
@unittest.skip(reason='Got `CUDA error: misaligned address` with PyTorch 2.0.0.')
def lowerCAmelCase ( self : Tuple):
pass
@unittest.skip(reason='Swinv2 does not use inputs_embeds')
def lowerCAmelCase ( self : Optional[Any]):
pass
def lowerCAmelCase ( self : Any):
__lowerCamelCase , __lowerCamelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCamelCase : Optional[Any] = model_class(SCREAMING_SNAKE_CASE__)
self.assertIsInstance(model.get_input_embeddings() ,(nn.Module))
__lowerCamelCase : List[str] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(SCREAMING_SNAKE_CASE__ ,nn.Linear))
def lowerCAmelCase ( self : str):
__lowerCamelCase , __lowerCamelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCamelCase : Optional[Any] = model_class(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Union[str, Any] = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCamelCase : Optional[Any] = [*signature.parameters.keys()]
__lowerCamelCase : List[Any] = ['pixel_values']
self.assertListEqual(arg_names[:1] ,SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : List[Any]):
__lowerCamelCase , __lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCamelCase : Tuple = True
for model_class in self.all_model_classes:
__lowerCamelCase : str = True
__lowerCamelCase : List[str] = False
__lowerCamelCase : List[str] = True
__lowerCamelCase : List[str] = model_class(SCREAMING_SNAKE_CASE__)
model.to(SCREAMING_SNAKE_CASE__)
model.eval()
with torch.no_grad():
__lowerCamelCase : Union[str, Any] = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__))
__lowerCamelCase : Optional[Any] = outputs.attentions
__lowerCamelCase : Tuple = len(self.model_tester.depths)
self.assertEqual(len(SCREAMING_SNAKE_CASE__) ,SCREAMING_SNAKE_CASE__)
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__lowerCamelCase : Dict = True
__lowerCamelCase : int = config.window_size**2
__lowerCamelCase : List[str] = model_class(SCREAMING_SNAKE_CASE__)
model.to(SCREAMING_SNAKE_CASE__)
model.eval()
with torch.no_grad():
__lowerCamelCase : str = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__))
__lowerCamelCase : Optional[Any] = outputs.attentions
self.assertEqual(len(SCREAMING_SNAKE_CASE__) ,SCREAMING_SNAKE_CASE__)
self.assertListEqual(
list(attentions[0].shape[-3:]) ,[self.model_tester.num_heads[0], window_size_squared, window_size_squared] ,)
__lowerCamelCase : Dict = len(SCREAMING_SNAKE_CASE__)
# Check attention is always last and order is fine
__lowerCamelCase : Optional[Any] = True
__lowerCamelCase : List[Any] = True
__lowerCamelCase : Tuple = model_class(SCREAMING_SNAKE_CASE__)
model.to(SCREAMING_SNAKE_CASE__)
model.eval()
with torch.no_grad():
__lowerCamelCase : Tuple = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__))
if hasattr(self.model_tester ,'num_hidden_states_types'):
__lowerCamelCase : int = self.model_tester.num_hidden_states_types
else:
# also another +1 for reshaped_hidden_states
__lowerCamelCase : List[str] = 2
self.assertEqual(out_len + added_hidden_states ,len(SCREAMING_SNAKE_CASE__))
__lowerCamelCase : Optional[Any] = outputs.attentions
self.assertEqual(len(SCREAMING_SNAKE_CASE__) ,SCREAMING_SNAKE_CASE__)
self.assertListEqual(
list(self_attentions[0].shape[-3:]) ,[self.model_tester.num_heads[0], window_size_squared, window_size_squared] ,)
def lowerCAmelCase ( self : Dict ,SCREAMING_SNAKE_CASE__ : str ,SCREAMING_SNAKE_CASE__ : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : List[str] ,SCREAMING_SNAKE_CASE__ : Any):
__lowerCamelCase : int = model_class(SCREAMING_SNAKE_CASE__)
model.to(SCREAMING_SNAKE_CASE__)
model.eval()
with torch.no_grad():
__lowerCamelCase : Dict = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__))
__lowerCamelCase : Dict = outputs.hidden_states
__lowerCamelCase : List[str] = getattr(
self.model_tester ,'expected_num_hidden_layers' ,len(self.model_tester.depths) + 1)
self.assertEqual(len(SCREAMING_SNAKE_CASE__) ,SCREAMING_SNAKE_CASE__)
# Swinv2 has a different seq_length
__lowerCamelCase : Optional[Any] = (
config.patch_size
if isinstance(config.patch_size ,collections.abc.Iterable)
else (config.patch_size, config.patch_size)
)
__lowerCamelCase : int = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:]) ,[num_patches, self.model_tester.embed_dim] ,)
__lowerCamelCase : int = outputs.reshaped_hidden_states
self.assertEqual(len(SCREAMING_SNAKE_CASE__) ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Tuple = reshaped_hidden_states[0].shape
__lowerCamelCase : Union[str, Any] = (
reshaped_hidden_states[0].view(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,height * width).permute(0 ,2 ,1)
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:]) ,[num_patches, self.model_tester.embed_dim] ,)
def lowerCAmelCase ( self : Union[str, Any]):
__lowerCamelCase , __lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCamelCase : int = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size ,collections.abc.Iterable)
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
__lowerCamelCase : List[Any] = True
self.check_hidden_states_output(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowerCamelCase : Optional[int] = True
self.check_hidden_states_output(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Union[str, Any]):
__lowerCamelCase , __lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCamelCase : int = 3
__lowerCamelCase : List[str] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size ,collections.abc.Iterable)
else (self.model_tester.image_size, self.model_tester.image_size)
)
__lowerCamelCase : Tuple = (
config.patch_size
if isinstance(config.patch_size ,collections.abc.Iterable)
else (config.patch_size, config.patch_size)
)
__lowerCamelCase : List[Any] = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
__lowerCamelCase : Optional[int] = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
__lowerCamelCase : str = True
self.check_hidden_states_output(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,(padded_height, padded_width))
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowerCamelCase : List[Any] = True
self.check_hidden_states_output(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,(padded_height, padded_width))
def lowerCAmelCase ( self : int):
__lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : List[Any]):
__lowerCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE__)
@slow
def lowerCAmelCase ( self : Optional[int]):
for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCamelCase : Optional[int] = SwinvaModel.from_pretrained(SCREAMING_SNAKE_CASE__)
self.assertIsNotNone(SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : str):
__lowerCamelCase , __lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCamelCase : int = _config_zero_init(SCREAMING_SNAKE_CASE__)
for model_class in self.all_model_classes:
__lowerCamelCase : List[Any] = model_class(config=SCREAMING_SNAKE_CASE__)
for name, param in model.named_parameters():
if "embeddings" not in name and "logit_scale" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() ,[0.0, 1.0] ,msg=F"Parameter {name} of model {model_class} seems not properly initialized" ,)
@require_vision
@require_torch
class A_ ( unittest.TestCase ):
@cached_property
def lowerCAmelCase ( self : str):
return (
AutoImageProcessor.from_pretrained('microsoft/swinv2-tiny-patch4-window8-256')
if is_vision_available()
else None
)
@slow
def lowerCAmelCase ( self : str):
__lowerCamelCase : List[str] = SwinvaForImageClassification.from_pretrained('microsoft/swinv2-tiny-patch4-window8-256').to(
SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[Any] = self.default_image_processor
__lowerCamelCase : Optional[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
__lowerCamelCase : List[str] = image_processor(images=SCREAMING_SNAKE_CASE__ ,return_tensors='pt').to(SCREAMING_SNAKE_CASE__)
# forward pass
with torch.no_grad():
__lowerCamelCase : List[str] = model(**SCREAMING_SNAKE_CASE__)
# verify the logits
__lowerCamelCase : Tuple = torch.Size((1, 1_0_0_0))
self.assertEqual(outputs.logits.shape ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[Any] = torch.tensor([-0.3947, -0.4306, 0.0026]).to(SCREAMING_SNAKE_CASE__)
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,SCREAMING_SNAKE_CASE__ ,atol=1E-4))
| 73 |
# flake8: noqa
# Lint as: python3
from typing import Dict, List, Optional, Type
from .. import config
from ..utils import logging
from .formatting import (
ArrowFormatter,
CustomFormatter,
Formatter,
PandasFormatter,
PythonFormatter,
TensorFormatter,
format_table,
query_table,
)
from .np_formatter import NumpyFormatter
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {}
lowerCAmelCase__ = {}
lowerCAmelCase__ = {}
def _UpperCAmelCase (UpperCamelCase__ : type , UpperCamelCase__ : Optional[str] , UpperCamelCase__ : Optional[List[str]] = None , ):
_A : Union[str, Any] = aliases if aliases is not None else []
if format_type in _FORMAT_TYPES:
logger.warning(
f"Overwriting format type '{format_type}' ({_FORMAT_TYPES[format_type].__name__} -> {formatter_cls.__name__})" )
_A : Dict = formatter_cls
for alias in set(aliases + [format_type] ):
if alias in _FORMAT_TYPES_ALIASES:
logger.warning(
f"Overwriting format type alias '{alias}' ({_FORMAT_TYPES_ALIASES[alias]} -> {format_type})" )
_A : Dict = format_type
def _UpperCAmelCase (UpperCamelCase__ : Exception , UpperCamelCase__ : Optional[str] , UpperCamelCase__ : Optional[List[str]] = None ):
_A : Union[str, Any] = aliases if aliases is not None else []
for alias in set(aliases + [format_type] ):
_A : Union[str, Any] = unavailable_error
# Here we define all the available formatting functions that can be used by `Dataset.set_format`
_register_formatter(PythonFormatter, None, aliases=['python'])
_register_formatter(ArrowFormatter, 'arrow', aliases=['pa', 'pyarrow'])
_register_formatter(NumpyFormatter, 'numpy', aliases=['np'])
_register_formatter(PandasFormatter, 'pandas', aliases=['pd'])
_register_formatter(CustomFormatter, 'custom')
if config.TORCH_AVAILABLE:
from .torch_formatter import TorchFormatter
_register_formatter(TorchFormatter, 'torch', aliases=['pt', 'pytorch'])
else:
lowerCAmelCase__ = ValueError('PyTorch needs to be installed to be able to return PyTorch tensors.')
_register_unavailable_formatter(_torch_error, 'torch', aliases=['pt', 'pytorch'])
if config.TF_AVAILABLE:
from .tf_formatter import TFFormatter
_register_formatter(TFFormatter, 'tensorflow', aliases=['tf'])
else:
lowerCAmelCase__ = ValueError('Tensorflow needs to be installed to be able to return Tensorflow tensors.')
_register_unavailable_formatter(_tf_error, 'tensorflow', aliases=['tf'])
if config.JAX_AVAILABLE:
from .jax_formatter import JaxFormatter
_register_formatter(JaxFormatter, 'jax', aliases=[])
else:
lowerCAmelCase__ = ValueError('JAX needs to be installed to be able to return JAX arrays.')
_register_unavailable_formatter(_jax_error, 'jax', aliases=[])
def _UpperCAmelCase (UpperCamelCase__ : Optional[str] ):
if format_type in _FORMAT_TYPES_ALIASES:
return _FORMAT_TYPES_ALIASES[format_type]
else:
return format_type
def _UpperCAmelCase (UpperCamelCase__ : Optional[str] , **UpperCamelCase__ : List[Any] ):
_A : List[str] = get_format_type_from_alias(UpperCamelCase__ )
if format_type in _FORMAT_TYPES:
return _FORMAT_TYPES[format_type](**UpperCamelCase__ )
if format_type in _FORMAT_TYPES_ALIASES_UNAVAILABLE:
raise _FORMAT_TYPES_ALIASES_UNAVAILABLE[format_type]
else:
raise ValueError(
f"Return type should be None or selected in {list(type for type in _FORMAT_TYPES.keys() if type != None )}, but got '{format_type}'" )
| 11 | 0 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_lowercase = logging.get_logger(__name__)
_lowercase = '''▁'''
_lowercase = {'''vocab_file''': '''spiece.model'''}
_lowercase = {
'''vocab_file''': {
'''google/reformer-crime-and-punishment''': (
'''https://huggingface.co/google/reformer-crime-and-punishment/resolve/main/spiece.model'''
)
}
}
_lowercase = {
'''google/reformer-crime-and-punishment''': 52_42_88,
}
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
_lowerCamelCase: Any = VOCAB_FILES_NAMES
_lowerCamelCase: List[str] = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase: int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase: Any = ['''input_ids''', '''attention_mask''']
def __init__( self : Tuple ,A_ : Optional[Any] ,A_ : Optional[Any]="</s>" ,A_ : Union[str, Any]="<unk>" ,A_ : Dict=[] ,A_ : Optional[Dict[str, Any]] = None ,**A_ : str ,) -> None:
A = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=A_ ,unk_token=A_ ,additional_special_tokens=A_ ,sp_model_kwargs=self.sp_model_kwargs ,**A_ ,)
A = vocab_file
A = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(A_ )
@property
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[Any]:
return self.sp_model.get_piece_size()
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Dict[str, int]:
A = {self.convert_ids_to_tokens(A_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : str ) -> Tuple:
A = self.__dict__.copy()
A = None
return state
def __setstate__( self : int ,A_ : str ) -> str:
A = d
# for backward compatibility
if not hasattr(self ,'sp_model_kwargs' ):
A = {}
A = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : str ) -> List[str]:
return self.sp_model.encode(A_ ,out_type=A_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ,A_ : Optional[Any] ) -> Optional[Any]:
return self.sp_model.piece_to_id(A_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : Optional[int] ) -> Dict:
if index < self.sp_model.get_piece_size():
A = self.sp_model.IdToPiece(A_ )
return token
def _SCREAMING_SNAKE_CASE ( self : Tuple ,A_ : int ) -> Dict:
A = []
A = ''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(A_ ) + token
A = []
else:
current_sub_tokens.append(A_ )
out_string += self.sp_model.decode(A_ )
return out_string.strip()
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : str ,A_ : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(A_ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
A = os.path.join(
A_ ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,A_ )
elif not os.path.isfile(self.vocab_file ):
with open(A_ ,'wb' ) as fi:
A = self.sp_model.serialized_model_proto()
fi.write(A_ )
return (out_vocab_file,) | 74 |
def _UpperCAmelCase (UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Union[str, Any] ):
# "extended trapezoidal rule"
# int(f) = dx/2 * (f1 + 2f2 + ... + fn)
_A : int = (boundary[1] - boundary[0]) / steps
_A : Any = boundary[0]
_A : List[Any] = boundary[1]
_A : str = make_points(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
_A : str = 0.0
y += (h / 2.0) * f(UpperCamelCase__ )
for i in x_i:
# print(i)
y += h * f(UpperCamelCase__ )
y += (h / 2.0) * f(UpperCamelCase__ )
return y
def _UpperCAmelCase (UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Any ):
_A : Optional[int] = a + h
while x < (b - h):
yield x
_A : Dict = x + h
def _UpperCAmelCase (UpperCamelCase__ : Optional[int] ): # enter your function here
_A : Any = (x - 0) * (x - 0)
return y
def _UpperCAmelCase ():
_A : Optional[Any] = 0.0 # Lower bound of integration
_A : Optional[int] = 1.0 # Upper bound of integration
_A : List[Any] = 10.0 # define number of steps or resolution
_A : Any = [a, b] # define boundary of integration
_A : Tuple = method_a(UpperCamelCase__ , UpperCamelCase__ )
print(f"y = {y}" )
if __name__ == "__main__":
main()
| 11 | 0 |
'''simple docstring'''
def a_ ( __snake_case : int , __snake_case : int ) -> int:
"""simple docstring"""
while b:
lowerCamelCase_, lowerCamelCase_ =b, a % b
return a
def a_ ( __snake_case : int , __snake_case : int ) -> int:
"""simple docstring"""
return a if b == 0 else euclidean_gcd_recursive(__snake_case , a % b )
def a_ ( ) -> str:
"""simple docstring"""
print(F'''euclidean_gcd(3, 5) = {euclidean_gcd(3 , 5 )}''' )
print(F'''euclidean_gcd(5, 3) = {euclidean_gcd(5 , 3 )}''' )
print(F'''euclidean_gcd(1, 3) = {euclidean_gcd(1 , 3 )}''' )
print(F'''euclidean_gcd(3, 6) = {euclidean_gcd(3 , 6 )}''' )
print(F'''euclidean_gcd(6, 3) = {euclidean_gcd(6 , 3 )}''' )
print(F'''euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 , 5 )}''' )
print(F'''euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 , 3 )}''' )
print(F'''euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 , 3 )}''' )
print(F'''euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 , 6 )}''' )
print(F'''euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 , 3 )}''' )
if __name__ == "__main__":
main()
| 75 |
import copy
import tempfile
import unittest
from huggingface_hub import HfFolder, delete_repo
from parameterized import parameterized
from requests.exceptions import HTTPError
from transformers import AutoConfig, GenerationConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
class lowerCAmelCase__ ( unittest.TestCase):
'''simple docstring'''
@parameterized.expand([(None,), ("foo.json",)])
def _lowerCamelCase ( self , __lowerCamelCase) -> List[str]:
_A : str = GenerationConfig(
do_sample=__lowerCamelCase , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(__lowerCamelCase , config_name=__lowerCamelCase)
_A : Tuple = GenerationConfig.from_pretrained(__lowerCamelCase , config_name=__lowerCamelCase)
# Checks parameters that were specified
self.assertEqual(loaded_config.do_sample , __lowerCamelCase)
self.assertEqual(loaded_config.temperature , 0.7)
self.assertEqual(loaded_config.length_penalty , 1.0)
self.assertEqual(loaded_config.bad_words_ids , [[1, 2, 3], [4, 5]])
# Checks parameters that were not specified (defaults)
self.assertEqual(loaded_config.top_k , 5_0)
self.assertEqual(loaded_config.max_length , 2_0)
self.assertEqual(loaded_config.max_time , __lowerCamelCase)
def _lowerCamelCase ( self) -> Optional[int]:
_A : Optional[int] = AutoConfig.from_pretrained("gpt2")
_A : int = GenerationConfig.from_model_config(__lowerCamelCase)
_A : List[Any] = GenerationConfig()
# The generation config has loaded a few non-default parameters from the model config
self.assertNotEqual(__lowerCamelCase , __lowerCamelCase)
# One of those parameters is eos_token_id -- check if it matches
self.assertNotEqual(generation_config_from_model.eos_token_id , default_generation_config.eos_token_id)
self.assertEqual(generation_config_from_model.eos_token_id , model_config.eos_token_id)
def _lowerCamelCase ( self) -> Optional[Any]:
_A : Optional[Any] = GenerationConfig()
_A : List[Any] = {
"max_new_tokens": 1_0_2_4,
"foo": "bar",
}
_A : List[str] = copy.deepcopy(__lowerCamelCase)
_A : int = generation_config.update(**__lowerCamelCase)
# update_kwargs was not modified (no side effects)
self.assertEqual(__lowerCamelCase , __lowerCamelCase)
# update_kwargs was used to update the config on valid attributes
self.assertEqual(generation_config.max_new_tokens , 1_0_2_4)
# `.update()` returns a dictionary of unused kwargs
self.assertEqual(__lowerCamelCase , {"foo": "bar"})
def _lowerCamelCase ( self) -> Any:
_A : int = GenerationConfig()
_A : int = "bar"
with tempfile.TemporaryDirectory("test-generation-config") as tmp_dir:
generation_config.save_pretrained(__lowerCamelCase)
_A : Any = GenerationConfig.from_pretrained(__lowerCamelCase)
# update_kwargs was used to update the config on valid attributes
self.assertEqual(new_config.foo , "bar")
_A : Optional[Any] = GenerationConfig.from_model_config(__lowerCamelCase)
assert not hasattr(__lowerCamelCase , "foo") # no new kwargs should be initialized if from config
def _lowerCamelCase ( self) -> List[str]:
_A : Union[str, Any] = GenerationConfig()
self.assertEqual(default_config.temperature , 1.0)
self.assertEqual(default_config.do_sample , __lowerCamelCase)
self.assertEqual(default_config.num_beams , 1)
_A : Optional[int] = GenerationConfig(
do_sample=__lowerCamelCase , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
self.assertEqual(config.temperature , 0.7)
self.assertEqual(config.do_sample , __lowerCamelCase)
self.assertEqual(config.num_beams , 1)
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(__lowerCamelCase)
_A : Optional[int] = GenerationConfig.from_pretrained(__lowerCamelCase , temperature=1.0)
self.assertEqual(loaded_config.temperature , 1.0)
self.assertEqual(loaded_config.do_sample , __lowerCamelCase)
self.assertEqual(loaded_config.num_beams , 1) # default value
@is_staging_test
class lowerCAmelCase__ ( unittest.TestCase):
'''simple docstring'''
@classmethod
def _lowerCamelCase ( cls) -> Optional[int]:
_A : Dict = TOKEN
HfFolder.save_token(__lowerCamelCase)
@classmethod
def _lowerCamelCase ( cls) -> List[Any]:
try:
delete_repo(token=cls._token , repo_id="test-generation-config")
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-generation-config-org")
except HTTPError:
pass
def _lowerCamelCase ( self) -> Any:
_A : Optional[int] = GenerationConfig(
do_sample=__lowerCamelCase , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub("test-generation-config" , use_auth_token=self._token)
_A : Union[str, Any] = GenerationConfig.from_pretrained(F"{USER}/test-generation-config")
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__lowerCamelCase , getattr(__lowerCamelCase , __lowerCamelCase))
# Reset repo
delete_repo(token=self._token , repo_id="test-generation-config")
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
__lowerCamelCase , repo_id="test-generation-config" , push_to_hub=__lowerCamelCase , use_auth_token=self._token)
_A : Optional[Any] = GenerationConfig.from_pretrained(F"{USER}/test-generation-config")
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__lowerCamelCase , getattr(__lowerCamelCase , __lowerCamelCase))
def _lowerCamelCase ( self) -> Union[str, Any]:
_A : Union[str, Any] = GenerationConfig(
do_sample=__lowerCamelCase , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub("valid_org/test-generation-config-org" , use_auth_token=self._token)
_A : int = GenerationConfig.from_pretrained("valid_org/test-generation-config-org")
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__lowerCamelCase , getattr(__lowerCamelCase , __lowerCamelCase))
# Reset repo
delete_repo(token=self._token , repo_id="valid_org/test-generation-config-org")
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
__lowerCamelCase , repo_id="valid_org/test-generation-config-org" , push_to_hub=__lowerCamelCase , use_auth_token=self._token)
_A : Optional[int] = GenerationConfig.from_pretrained("valid_org/test-generation-config-org")
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__lowerCamelCase , getattr(__lowerCamelCase , __lowerCamelCase))
| 11 | 0 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_download, hf_hub_url
from PIL import Image
from transformers import DetaConfig, DetaForObjectDetection, DetaImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
a_ = logging.get_logger(__name__)
def lowerCamelCase__ ( _a):
SCREAMING_SNAKE_CASE : int = SwinConfig(
embed_dim=192 , depths=(2, 2, 18, 2) , num_heads=(6, 12, 24, 48) , window_size=12 , out_features=["stage2", "stage3", "stage4"] , )
SCREAMING_SNAKE_CASE : Optional[int] = DetaConfig(
backbone_config=_a , num_queries=900 , encoder_ffn_dim=2048 , decoder_ffn_dim=2048 , num_feature_levels=5 , assign_first_stage=_a , with_box_refine=_a , two_stage=_a , )
# set labels
SCREAMING_SNAKE_CASE : Tuple = "huggingface/label-files"
if "o365" in model_name:
SCREAMING_SNAKE_CASE : Optional[int] = 366
SCREAMING_SNAKE_CASE : Optional[Any] = "object365-id2label.json"
else:
SCREAMING_SNAKE_CASE : str = 91
SCREAMING_SNAKE_CASE : Tuple = "coco-detection-id2label.json"
SCREAMING_SNAKE_CASE : int = num_labels
SCREAMING_SNAKE_CASE : str = json.load(open(cached_download(hf_hub_url(_a , _a , repo_type="dataset")) , "r"))
SCREAMING_SNAKE_CASE : Union[str, Any] = {int(_a): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE : List[str] = idalabel
SCREAMING_SNAKE_CASE : List[Any] = {v: k for k, v in idalabel.items()}
return config
def lowerCamelCase__ ( _a):
SCREAMING_SNAKE_CASE : List[Any] = []
# stem
# fmt: off
rename_keys.append(("backbone.0.body.patch_embed.proj.weight", "model.backbone.model.embeddings.patch_embeddings.projection.weight"))
rename_keys.append(("backbone.0.body.patch_embed.proj.bias", "model.backbone.model.embeddings.patch_embeddings.projection.bias"))
rename_keys.append(("backbone.0.body.patch_embed.norm.weight", "model.backbone.model.embeddings.norm.weight"))
rename_keys.append(("backbone.0.body.patch_embed.norm.bias", "model.backbone.model.embeddings.norm.bias"))
# stages
for i in range(len(config.backbone_config.depths)):
for j in range(config.backbone_config.depths[i]):
rename_keys.append((f"backbone.0.body.layers.{i}.blocks.{j}.norm1.weight", f"model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight"))
rename_keys.append((f"backbone.0.body.layers.{i}.blocks.{j}.norm1.bias", f"model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias"))
rename_keys.append((f"backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_bias_table", f"model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table"))
rename_keys.append((f"backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_index", f"model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index"))
rename_keys.append((f"backbone.0.body.layers.{i}.blocks.{j}.attn.proj.weight", f"model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight"))
rename_keys.append((f"backbone.0.body.layers.{i}.blocks.{j}.attn.proj.bias", f"model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias"))
rename_keys.append((f"backbone.0.body.layers.{i}.blocks.{j}.norm2.weight", f"model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight"))
rename_keys.append((f"backbone.0.body.layers.{i}.blocks.{j}.norm2.bias", f"model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias"))
rename_keys.append((f"backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.weight", f"model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight"))
rename_keys.append((f"backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.bias", f"model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias"))
rename_keys.append((f"backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.weight", f"model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.weight"))
rename_keys.append((f"backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.bias", f"model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.bias"))
if i < 3:
rename_keys.append((f"backbone.0.body.layers.{i}.downsample.reduction.weight", f"model.backbone.model.encoder.layers.{i}.downsample.reduction.weight"))
rename_keys.append((f"backbone.0.body.layers.{i}.downsample.norm.weight", f"model.backbone.model.encoder.layers.{i}.downsample.norm.weight"))
rename_keys.append((f"backbone.0.body.layers.{i}.downsample.norm.bias", f"model.backbone.model.encoder.layers.{i}.downsample.norm.bias"))
rename_keys.append(("backbone.0.body.norm1.weight", "model.backbone.model.hidden_states_norms.stage2.weight"))
rename_keys.append(("backbone.0.body.norm1.bias", "model.backbone.model.hidden_states_norms.stage2.bias"))
rename_keys.append(("backbone.0.body.norm2.weight", "model.backbone.model.hidden_states_norms.stage3.weight"))
rename_keys.append(("backbone.0.body.norm2.bias", "model.backbone.model.hidden_states_norms.stage3.bias"))
rename_keys.append(("backbone.0.body.norm3.weight", "model.backbone.model.hidden_states_norms.stage4.weight"))
rename_keys.append(("backbone.0.body.norm3.bias", "model.backbone.model.hidden_states_norms.stage4.bias"))
# transformer encoder
for i in range(config.encoder_layers):
rename_keys.append((f"transformer.encoder.layers.{i}.self_attn.sampling_offsets.weight", f"model.encoder.layers.{i}.self_attn.sampling_offsets.weight"))
rename_keys.append((f"transformer.encoder.layers.{i}.self_attn.sampling_offsets.bias", f"model.encoder.layers.{i}.self_attn.sampling_offsets.bias"))
rename_keys.append((f"transformer.encoder.layers.{i}.self_attn.attention_weights.weight", f"model.encoder.layers.{i}.self_attn.attention_weights.weight"))
rename_keys.append((f"transformer.encoder.layers.{i}.self_attn.attention_weights.bias", f"model.encoder.layers.{i}.self_attn.attention_weights.bias"))
rename_keys.append((f"transformer.encoder.layers.{i}.self_attn.value_proj.weight", f"model.encoder.layers.{i}.self_attn.value_proj.weight"))
rename_keys.append((f"transformer.encoder.layers.{i}.self_attn.value_proj.bias", f"model.encoder.layers.{i}.self_attn.value_proj.bias"))
rename_keys.append((f"transformer.encoder.layers.{i}.self_attn.output_proj.weight", f"model.encoder.layers.{i}.self_attn.output_proj.weight"))
rename_keys.append((f"transformer.encoder.layers.{i}.self_attn.output_proj.bias", f"model.encoder.layers.{i}.self_attn.output_proj.bias"))
rename_keys.append((f"transformer.encoder.layers.{i}.norm1.weight", f"model.encoder.layers.{i}.self_attn_layer_norm.weight"))
rename_keys.append((f"transformer.encoder.layers.{i}.norm1.bias", f"model.encoder.layers.{i}.self_attn_layer_norm.bias"))
rename_keys.append((f"transformer.encoder.layers.{i}.linear1.weight", f"model.encoder.layers.{i}.fc1.weight"))
rename_keys.append((f"transformer.encoder.layers.{i}.linear1.bias", f"model.encoder.layers.{i}.fc1.bias"))
rename_keys.append((f"transformer.encoder.layers.{i}.linear2.weight", f"model.encoder.layers.{i}.fc2.weight"))
rename_keys.append((f"transformer.encoder.layers.{i}.linear2.bias", f"model.encoder.layers.{i}.fc2.bias"))
rename_keys.append((f"transformer.encoder.layers.{i}.norm2.weight", f"model.encoder.layers.{i}.final_layer_norm.weight"))
rename_keys.append((f"transformer.encoder.layers.{i}.norm2.bias", f"model.encoder.layers.{i}.final_layer_norm.bias"))
# transformer decoder
for i in range(config.decoder_layers):
rename_keys.append((f"transformer.decoder.layers.{i}.cross_attn.sampling_offsets.weight", f"model.decoder.layers.{i}.encoder_attn.sampling_offsets.weight"))
rename_keys.append((f"transformer.decoder.layers.{i}.cross_attn.sampling_offsets.bias", f"model.decoder.layers.{i}.encoder_attn.sampling_offsets.bias"))
rename_keys.append((f"transformer.decoder.layers.{i}.cross_attn.attention_weights.weight", f"model.decoder.layers.{i}.encoder_attn.attention_weights.weight"))
rename_keys.append((f"transformer.decoder.layers.{i}.cross_attn.attention_weights.bias", f"model.decoder.layers.{i}.encoder_attn.attention_weights.bias"))
rename_keys.append((f"transformer.decoder.layers.{i}.cross_attn.value_proj.weight", f"model.decoder.layers.{i}.encoder_attn.value_proj.weight"))
rename_keys.append((f"transformer.decoder.layers.{i}.cross_attn.value_proj.bias", f"model.decoder.layers.{i}.encoder_attn.value_proj.bias"))
rename_keys.append((f"transformer.decoder.layers.{i}.cross_attn.output_proj.weight", f"model.decoder.layers.{i}.encoder_attn.output_proj.weight"))
rename_keys.append((f"transformer.decoder.layers.{i}.cross_attn.output_proj.bias", f"model.decoder.layers.{i}.encoder_attn.output_proj.bias"))
rename_keys.append((f"transformer.decoder.layers.{i}.norm1.weight", f"model.decoder.layers.{i}.encoder_attn_layer_norm.weight"))
rename_keys.append((f"transformer.decoder.layers.{i}.norm1.bias", f"model.decoder.layers.{i}.encoder_attn_layer_norm.bias"))
rename_keys.append((f"transformer.decoder.layers.{i}.self_attn.out_proj.weight", f"model.decoder.layers.{i}.self_attn.out_proj.weight"))
rename_keys.append((f"transformer.decoder.layers.{i}.self_attn.out_proj.bias", f"model.decoder.layers.{i}.self_attn.out_proj.bias"))
rename_keys.append((f"transformer.decoder.layers.{i}.norm2.weight", f"model.decoder.layers.{i}.self_attn_layer_norm.weight"))
rename_keys.append((f"transformer.decoder.layers.{i}.norm2.bias", f"model.decoder.layers.{i}.self_attn_layer_norm.bias"))
rename_keys.append((f"transformer.decoder.layers.{i}.linear1.weight", f"model.decoder.layers.{i}.fc1.weight"))
rename_keys.append((f"transformer.decoder.layers.{i}.linear1.bias", f"model.decoder.layers.{i}.fc1.bias"))
rename_keys.append((f"transformer.decoder.layers.{i}.linear2.weight", f"model.decoder.layers.{i}.fc2.weight"))
rename_keys.append((f"transformer.decoder.layers.{i}.linear2.bias", f"model.decoder.layers.{i}.fc2.bias"))
rename_keys.append((f"transformer.decoder.layers.{i}.norm3.weight", f"model.decoder.layers.{i}.final_layer_norm.weight"))
rename_keys.append((f"transformer.decoder.layers.{i}.norm3.bias", f"model.decoder.layers.{i}.final_layer_norm.bias"))
# fmt: on
return rename_keys
def lowerCamelCase__ ( _a , _a , _a):
SCREAMING_SNAKE_CASE : Tuple = dct.pop(_a)
SCREAMING_SNAKE_CASE : Optional[Any] = val
def lowerCamelCase__ ( _a , _a):
SCREAMING_SNAKE_CASE : Any = [int(backbone_config.embed_dim * 2**i) for i in range(len(backbone_config.depths))]
for i in range(len(backbone_config.depths)):
SCREAMING_SNAKE_CASE : Optional[int] = num_features[i]
for j in range(backbone_config.depths[i]):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
SCREAMING_SNAKE_CASE : str = state_dict.pop(f"backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.weight")
SCREAMING_SNAKE_CASE : Dict = state_dict.pop(f"backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.bias")
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE : int = in_proj_weight[:dim, :]
SCREAMING_SNAKE_CASE : Dict = in_proj_bias[: dim]
SCREAMING_SNAKE_CASE : Any = in_proj_weight[
dim : dim * 2, :
]
SCREAMING_SNAKE_CASE : Tuple = in_proj_bias[
dim : dim * 2
]
SCREAMING_SNAKE_CASE : int = in_proj_weight[
-dim :, :
]
SCREAMING_SNAKE_CASE : Any = in_proj_bias[-dim :]
# fmt: on
def lowerCamelCase__ ( _a , _a):
# transformer decoder self-attention layers
SCREAMING_SNAKE_CASE : List[str] = config.d_model
for i in range(config.decoder_layers):
# read in weights + bias of input projection layer of self-attention
SCREAMING_SNAKE_CASE : Tuple = state_dict.pop(f"transformer.decoder.layers.{i}.self_attn.in_proj_weight")
SCREAMING_SNAKE_CASE : str = state_dict.pop(f"transformer.decoder.layers.{i}.self_attn.in_proj_bias")
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE : int = in_proj_weight[:hidden_size, :]
SCREAMING_SNAKE_CASE : str = in_proj_bias[:hidden_size]
SCREAMING_SNAKE_CASE : int = in_proj_weight[
hidden_size : hidden_size * 2, :
]
SCREAMING_SNAKE_CASE : Tuple = in_proj_bias[hidden_size : hidden_size * 2]
SCREAMING_SNAKE_CASE : Any = in_proj_weight[-hidden_size:, :]
SCREAMING_SNAKE_CASE : Dict = in_proj_bias[-hidden_size:]
def lowerCamelCase__ ( ):
SCREAMING_SNAKE_CASE : Any = "http://images.cocodataset.org/val2017/000000039769.jpg"
SCREAMING_SNAKE_CASE : Tuple = Image.open(requests.get(_a , stream=_a).raw)
return im
@torch.no_grad()
def lowerCamelCase__ ( _a , _a , _a):
SCREAMING_SNAKE_CASE : int = get_deta_config(_a)
# load original state dict
if model_name == "deta-swin-large":
SCREAMING_SNAKE_CASE : Any = hf_hub_download(repo_id="nielsr/deta-checkpoints" , filename="adet_swin_ft.pth")
elif model_name == "deta-swin-large-o365":
SCREAMING_SNAKE_CASE : Any = hf_hub_download(repo_id="jozhang97/deta-swin-l-o365" , filename="deta_swin_pt_o365.pth")
else:
raise ValueError(f"Model name {model_name} not supported")
SCREAMING_SNAKE_CASE : List[Any] = torch.load(_a , map_location="cpu")["model"]
# original state dict
for name, param in state_dict.items():
print(_a , param.shape)
# rename keys
SCREAMING_SNAKE_CASE : Optional[Any] = create_rename_keys(_a)
for src, dest in rename_keys:
rename_key(_a , _a , _a)
read_in_swin_q_k_v(_a , config.backbone_config)
read_in_decoder_q_k_v(_a , _a)
# fix some prefixes
for key in state_dict.copy().keys():
if "transformer.decoder.class_embed" in key or "transformer.decoder.bbox_embed" in key:
SCREAMING_SNAKE_CASE : List[str] = state_dict.pop(_a)
SCREAMING_SNAKE_CASE : Optional[int] = val
if "input_proj" in key:
SCREAMING_SNAKE_CASE : Any = state_dict.pop(_a)
SCREAMING_SNAKE_CASE : str = val
if "level_embed" in key or "pos_trans" in key or "pix_trans" in key or "enc_output" in key:
SCREAMING_SNAKE_CASE : Any = state_dict.pop(_a)
SCREAMING_SNAKE_CASE : str = val
# finally, create HuggingFace model and load state dict
SCREAMING_SNAKE_CASE : str = DetaForObjectDetection(_a)
model.load_state_dict(_a)
model.eval()
SCREAMING_SNAKE_CASE : str = "cuda" if torch.cuda.is_available() else "cpu"
model.to(_a)
# load image processor
SCREAMING_SNAKE_CASE : int = DetaImageProcessor(format="coco_detection")
# verify our conversion on image
SCREAMING_SNAKE_CASE : List[str] = prepare_img()
SCREAMING_SNAKE_CASE : Union[str, Any] = processor(images=_a , return_tensors="pt")
SCREAMING_SNAKE_CASE : List[Any] = encoding["pixel_values"]
SCREAMING_SNAKE_CASE : Dict = model(pixel_values.to(_a))
# verify logits
print("Logits:" , outputs.logits[0, :3, :3])
print("Boxes:" , outputs.pred_boxes[0, :3, :3])
if model_name == "deta-swin-large":
SCREAMING_SNAKE_CASE : Any = torch.tensor(
[[-7.6308, -2.8485, -5.3737], [-7.2037, -4.5505, -4.8027], [-7.2943, -4.2611, -4.6617]])
SCREAMING_SNAKE_CASE : Dict = torch.tensor([[0.4987, 0.4969, 0.9999], [0.2549, 0.5498, 0.4805], [0.5498, 0.2757, 0.0569]])
elif model_name == "deta-swin-large-o365":
SCREAMING_SNAKE_CASE : Tuple = torch.tensor(
[[-8.0122, -3.5720, -4.9717], [-8.1547, -3.6886, -4.6389], [-7.6610, -3.6194, -5.0134]])
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([[0.2523, 0.5549, 0.4881], [0.7715, 0.4149, 0.4601], [0.5503, 0.2753, 0.0575]])
assert torch.allclose(outputs.logits[0, :3, :3] , expected_logits.to(_a) , atol=1E-4)
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , expected_boxes.to(_a) , atol=1E-4)
print("Everything ok!")
if pytorch_dump_folder_path:
# Save model and processor
logger.info(f"Saving PyTorch model and processor to {pytorch_dump_folder_path}...")
Path(_a).mkdir(exist_ok=_a)
model.save_pretrained(_a)
processor.save_pretrained(_a)
# Push to hub
if push_to_hub:
print("Pushing model and processor to hub...")
model.push_to_hub(f"jozhang97/{model_name}")
processor.push_to_hub(f"jozhang97/{model_name}")
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument(
'--model_name',
type=str,
default='deta-swin-large',
choices=['deta-swin-large', 'deta-swin-large-o365'],
help='Name of the model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
help='Path to the folder to output PyTorch model.',
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
a_ = parser.parse_args()
convert_deta_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub) | 76 |
import pickle
import numpy as np
from matplotlib import pyplot as plt
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=0.2 , __lowerCamelCase=0.2) -> str:
_A : Optional[int] = bp_numa
_A : Dict = bp_numa
_A : Tuple = bp_numa
_A : List[str] = conva_get[:2]
_A : Tuple = conva_get[2]
_A : Optional[int] = size_pa
_A : Optional[Any] = rate_w
_A : Optional[Any] = rate_t
_A : Union[str, Any] = [
np.mat(-1 * np.random.rand(self.conva[0] , self.conva[0]) + 0.5)
for i in range(self.conva[1])
]
_A : int = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa) + 0.5)
_A : Dict = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa) + 0.5)
_A : Any = -2 * np.random.rand(self.conva[1]) + 1
_A : Optional[int] = -2 * np.random.rand(self.num_bpa) + 1
_A : Optional[Any] = -2 * np.random.rand(self.num_bpa) + 1
def _lowerCamelCase ( self , __lowerCamelCase) -> Dict:
# save model dict with pickle
_A : Dict = {
"num_bp1": self.num_bpa,
"num_bp2": self.num_bpa,
"num_bp3": self.num_bpa,
"conv1": self.conva,
"step_conv1": self.step_conva,
"size_pooling1": self.size_poolinga,
"rate_weight": self.rate_weight,
"rate_thre": self.rate_thre,
"w_conv1": self.w_conva,
"wkj": self.wkj,
"vji": self.vji,
"thre_conv1": self.thre_conva,
"thre_bp2": self.thre_bpa,
"thre_bp3": self.thre_bpa,
}
with open(__lowerCamelCase , "wb") as f:
pickle.dump(__lowerCamelCase , __lowerCamelCase)
print(F"Model saved: {save_path}")
@classmethod
def _lowerCamelCase ( cls , __lowerCamelCase) -> Any:
# read saved model
with open(__lowerCamelCase , "rb") as f:
_A : Any = pickle.load(__lowerCamelCase) # noqa: S301
_A : Optional[int] = model_dic.get("conv1")
conv_get.append(model_dic.get("step_conv1"))
_A : str = model_dic.get("size_pooling1")
_A : List[str] = model_dic.get("num_bp1")
_A : Union[str, Any] = model_dic.get("num_bp2")
_A : List[Any] = model_dic.get("num_bp3")
_A : Dict = model_dic.get("rate_weight")
_A : List[Any] = model_dic.get("rate_thre")
# create model instance
_A : str = CNN(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase)
# modify model parameter
_A : List[Any] = model_dic.get("w_conv1")
_A : Union[str, Any] = model_dic.get("wkj")
_A : str = model_dic.get("vji")
_A : List[str] = model_dic.get("thre_conv1")
_A : Optional[Any] = model_dic.get("thre_bp2")
_A : Dict = model_dic.get("thre_bp3")
return conv_ins
def _lowerCamelCase ( self , __lowerCamelCase) -> Dict:
return 1 / (1 + np.exp(-1 * x))
def _lowerCamelCase ( self , __lowerCamelCase) -> Union[str, Any]:
return round(__lowerCamelCase , 3)
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase) -> Union[str, Any]:
# convolution process
_A : Tuple = convs[0]
_A : Union[str, Any] = convs[1]
_A : List[Any] = np.shape(__lowerCamelCase)[0]
# get the data slice of original image data, data_focus
_A : Tuple = []
for i_focus in range(0 , size_data - size_conv + 1 , __lowerCamelCase):
for j_focus in range(0 , size_data - size_conv + 1 , __lowerCamelCase):
_A : Optional[int] = data[
i_focus : i_focus + size_conv, j_focus : j_focus + size_conv
]
data_focus.append(__lowerCamelCase)
# calculate the feature map of every single kernel, and saved as list of matrix
_A : Optional[Any] = []
_A : Optional[int] = int((size_data - size_conv) / conv_step + 1)
for i_map in range(__lowerCamelCase):
_A : Optional[int] = []
for i_focus in range(len(__lowerCamelCase)):
_A : Any = (
np.sum(np.multiply(data_focus[i_focus] , w_convs[i_map]))
- thre_convs[i_map]
)
featuremap.append(self.sig(__lowerCamelCase))
_A : Optional[Any] = np.asmatrix(__lowerCamelCase).reshape(
__lowerCamelCase , __lowerCamelCase)
data_featuremap.append(__lowerCamelCase)
# expanding the data slice to One dimenssion
_A : Optional[Any] = []
for each_focus in data_focus:
focusa_list.extend(self.Expand_Mat(__lowerCamelCase))
_A : Dict = np.asarray(__lowerCamelCase)
return focus_list, data_featuremap
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase="average_pool") -> Dict:
# pooling process
_A : Optional[Any] = len(featuremaps[0])
_A : str = int(size_map / size_pooling)
_A : Optional[int] = []
for i_map in range(len(__lowerCamelCase)):
_A : int = featuremaps[i_map]
_A : Optional[int] = []
for i_focus in range(0 , __lowerCamelCase , __lowerCamelCase):
for j_focus in range(0 , __lowerCamelCase , __lowerCamelCase):
_A : str = feature_map[
i_focus : i_focus + size_pooling,
j_focus : j_focus + size_pooling,
]
if pooling_type == "average_pool":
# average pooling
map_pooled.append(np.average(__lowerCamelCase))
elif pooling_type == "max_pooling":
# max pooling
map_pooled.append(np.max(__lowerCamelCase))
_A : Tuple = np.asmatrix(__lowerCamelCase).reshape(__lowerCamelCase , __lowerCamelCase)
featuremap_pooled.append(__lowerCamelCase)
return featuremap_pooled
def _lowerCamelCase ( self , __lowerCamelCase) -> Tuple:
# expanding three dimension data to one dimension list
_A : Tuple = []
for i in range(len(__lowerCamelCase)):
_A : Union[str, Any] = np.shape(data[i])
_A : List[Any] = data[i].reshape(1 , shapes[0] * shapes[1])
_A : Optional[Any] = data_listed.getA().tolist()[0]
data_expanded.extend(__lowerCamelCase)
_A : Optional[Any] = np.asarray(__lowerCamelCase)
return data_expanded
def _lowerCamelCase ( self , __lowerCamelCase) -> Union[str, Any]:
# expanding matrix to one dimension list
_A : List[Any] = np.asarray(__lowerCamelCase)
_A : Union[str, Any] = np.shape(__lowerCamelCase)
_A : Dict = data_mat.reshape(1 , shapes[0] * shapes[1])
return data_expanded
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase) -> Optional[int]:
_A : Dict = []
_A : Any = 0
for i_map in range(__lowerCamelCase):
_A : Union[str, Any] = np.ones((size_map, size_map))
for i in range(0 , __lowerCamelCase , __lowerCamelCase):
for j in range(0 , __lowerCamelCase , __lowerCamelCase):
_A : List[Any] = pd_pool[
i_pool
]
_A : Tuple = i_pool + 1
_A : Optional[Any] = np.multiply(
__lowerCamelCase , np.multiply(out_map[i_map] , (1 - out_map[i_map])))
pd_all.append(__lowerCamelCase)
return pd_all
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=bool) -> Union[str, Any]:
# model traning
print("----------------------Start Training-------------------------")
print((" - - Shape: Train_Data ", np.shape(__lowerCamelCase)))
print((" - - Shape: Teach_Data ", np.shape(__lowerCamelCase)))
_A : Tuple = 0
_A : Dict = []
_A : Optional[Any] = 1_0_0_0_0
while rp < n_repeat and mse >= error_accuracy:
_A : Union[str, Any] = 0
print(F"-------------Learning Time {rp}--------------")
for p in range(len(__lowerCamelCase)):
# print('------------Learning Image: %d--------------'%p)
_A : str = np.asmatrix(datas_train[p])
_A : Union[str, Any] = np.asarray(datas_teach[p])
_A , _A : Any = self.convolute(
__lowerCamelCase , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
_A : Optional[Any] = self.pooling(__lowerCamelCase , self.size_poolinga)
_A : Optional[int] = np.shape(__lowerCamelCase)
_A : List[str] = self._expand(__lowerCamelCase)
_A : Tuple = data_bp_input
_A : int = np.dot(__lowerCamelCase , self.vji.T) - self.thre_bpa
_A : List[Any] = self.sig(__lowerCamelCase)
_A : Union[str, Any] = np.dot(__lowerCamelCase , self.wkj.T) - self.thre_bpa
_A : List[str] = self.sig(__lowerCamelCase)
# --------------Model Leaning ------------------------
# calculate error and gradient---------------
_A : int = np.multiply(
(data_teach - bp_outa) , np.multiply(__lowerCamelCase , (1 - bp_outa)))
_A : Optional[Any] = np.multiply(
np.dot(__lowerCamelCase , self.wkj) , np.multiply(__lowerCamelCase , (1 - bp_outa)))
_A : Union[str, Any] = np.dot(__lowerCamelCase , self.vji)
_A : Any = pd_i_all / (self.size_poolinga * self.size_poolinga)
_A : Dict = pd_conva_pooled.T.getA().tolist()
_A : Optional[Any] = self._calculate_gradient_from_pool(
__lowerCamelCase , __lowerCamelCase , shape_featuremapa[0] , shape_featuremapa[1] , self.size_poolinga , )
# weight and threshold learning process---------
# convolution layer
for k_conv in range(self.conva[1]):
_A : int = self._expand_mat(pd_conva_all[k_conv])
_A : Optional[int] = self.rate_weight * np.dot(__lowerCamelCase , __lowerCamelCase)
_A : List[Any] = self.w_conva[k_conv] + delta_w.reshape(
(self.conva[0], self.conva[0]))
_A : Any = (
self.thre_conva[k_conv]
- np.sum(pd_conva_all[k_conv]) * self.rate_thre
)
# all connected layer
_A : Tuple = self.wkj + pd_k_all.T * bp_outa * self.rate_weight
_A : int = self.vji + pd_j_all.T * bp_outa * self.rate_weight
_A : Tuple = self.thre_bpa - pd_k_all * self.rate_thre
_A : List[str] = self.thre_bpa - pd_j_all * self.rate_thre
# calculate the sum error of all single image
_A : Optional[int] = np.sum(abs(data_teach - bp_outa))
error_count += errors
# print(' ----Teach ',data_teach)
# print(' ----BP_output ',bp_out3)
_A : Any = rp + 1
_A : Dict = error_count / patterns
all_mse.append(__lowerCamelCase)
def draw_error():
_A : Optional[int] = [error_accuracy for i in range(int(n_repeat * 1.2))]
plt.plot(__lowerCamelCase , "+-")
plt.plot(__lowerCamelCase , "r--")
plt.xlabel("Learning Times")
plt.ylabel("All_mse")
plt.grid(__lowerCamelCase , alpha=0.5)
plt.show()
print("------------------Training Complished---------------------")
print((" - - Training epoch: ", rp, F" - - Mse: {mse:.6f}"))
if draw_e:
draw_error()
return mse
def _lowerCamelCase ( self , __lowerCamelCase) -> int:
# model predict
_A : Union[str, Any] = []
print("-------------------Start Testing-------------------------")
print((" - - Shape: Test_Data ", np.shape(__lowerCamelCase)))
for p in range(len(__lowerCamelCase)):
_A : int = np.asmatrix(datas_test[p])
_A , _A : List[Any] = self.convolute(
__lowerCamelCase , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
_A : str = self.pooling(__lowerCamelCase , self.size_poolinga)
_A : Optional[int] = self._expand(__lowerCamelCase)
_A : List[Any] = data_bp_input
_A : Optional[int] = bp_outa * self.vji.T - self.thre_bpa
_A : int = self.sig(__lowerCamelCase)
_A : int = bp_outa * self.wkj.T - self.thre_bpa
_A : Optional[int] = self.sig(__lowerCamelCase)
produce_out.extend(bp_outa.getA().tolist())
_A : int = [list(map(self.do_round , __lowerCamelCase)) for each in produce_out]
return np.asarray(__lowerCamelCase)
def _lowerCamelCase ( self , __lowerCamelCase) -> Dict:
# return the data of image after convoluting process so we can check it out
_A : Optional[int] = np.asmatrix(__lowerCamelCase)
_A , _A : Tuple = self.convolute(
__lowerCamelCase , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
_A : Union[str, Any] = self.pooling(__lowerCamelCase , self.size_poolinga)
return data_conveda, data_pooleda
if __name__ == "__main__":
pass
| 11 | 0 |
"""simple docstring"""
import tempfile
import torch
from diffusers import IPNDMScheduler
from .test_schedulers import SchedulerCommonTest
class UpperCAmelCase_ ( _a):
lowerCamelCase__ : int = (IPNDMScheduler,)
lowerCamelCase__ : str = (("num_inference_steps", 5_0),)
def _UpperCAmelCase ( self , **a ) -> List[str]:
lowercase__ : int = {'num_train_timesteps': 1_0_0_0}
config.update(**a )
return config
def _UpperCAmelCase ( self , a=0 , **a ) -> Union[str, Any]:
lowercase__ : Tuple = dict(self.forward_default_kwargs )
lowercase__ : List[Any] = kwargs.pop('num_inference_steps' , a )
lowercase__ : Dict = self.dummy_sample
lowercase__ : Union[str, Any] = 0.1 * sample
lowercase__ : Optional[Any] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
lowercase__ : List[str] = self.get_scheduler_config(**a )
lowercase__ : Union[str, Any] = scheduler_class(**a )
scheduler.set_timesteps(a )
# copy over dummy past residuals
lowercase__ : Optional[int] = dummy_past_residuals[:]
if time_step is None:
lowercase__ : Optional[int] = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(a )
lowercase__ : int = scheduler_class.from_pretrained(a )
new_scheduler.set_timesteps(a )
# copy over dummy past residuals
lowercase__ : Tuple = dummy_past_residuals[:]
lowercase__ : Optional[Any] = scheduler.step(a , a , a , **a ).prev_sample
lowercase__ : List[Any] = new_scheduler.step(a , a , a , **a ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
lowercase__ : Tuple = scheduler.step(a , a , a , **a ).prev_sample
lowercase__ : List[str] = new_scheduler.step(a , a , a , **a ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def _UpperCAmelCase ( self ) -> Tuple:
pass
def _UpperCAmelCase ( self , a=0 , **a ) -> List[Any]:
lowercase__ : List[str] = dict(self.forward_default_kwargs )
lowercase__ : List[str] = kwargs.pop('num_inference_steps' , a )
lowercase__ : Union[str, Any] = self.dummy_sample
lowercase__ : Tuple = 0.1 * sample
lowercase__ : Optional[Any] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
lowercase__ : str = self.get_scheduler_config()
lowercase__ : Union[str, Any] = scheduler_class(**a )
scheduler.set_timesteps(a )
# copy over dummy past residuals (must be after setting timesteps)
lowercase__ : Optional[Any] = dummy_past_residuals[:]
if time_step is None:
lowercase__ : List[Any] = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(a )
lowercase__ : Optional[Any] = scheduler_class.from_pretrained(a )
# copy over dummy past residuals
new_scheduler.set_timesteps(a )
# copy over dummy past residual (must be after setting timesteps)
lowercase__ : Union[str, Any] = dummy_past_residuals[:]
lowercase__ : Optional[Any] = scheduler.step(a , a , a , **a ).prev_sample
lowercase__ : int = new_scheduler.step(a , a , a , **a ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
lowercase__ : List[str] = scheduler.step(a , a , a , **a ).prev_sample
lowercase__ : int = new_scheduler.step(a , a , a , **a ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def _UpperCAmelCase ( self , **a ) -> Tuple:
lowercase__ : Union[str, Any] = self.scheduler_classes[0]
lowercase__ : Tuple = self.get_scheduler_config(**a )
lowercase__ : str = scheduler_class(**a )
lowercase__ : Dict = 1_0
lowercase__ : str = self.dummy_model()
lowercase__ : Optional[Any] = self.dummy_sample_deter
scheduler.set_timesteps(a )
for i, t in enumerate(scheduler.timesteps ):
lowercase__ : Any = model(a , a )
lowercase__ : Union[str, Any] = scheduler.step(a , a , a ).prev_sample
for i, t in enumerate(scheduler.timesteps ):
lowercase__ : str = model(a , a )
lowercase__ : Tuple = scheduler.step(a , a , a ).prev_sample
return sample
def _UpperCAmelCase ( self ) -> List[Any]:
lowercase__ : List[Any] = dict(self.forward_default_kwargs )
lowercase__ : Tuple = kwargs.pop('num_inference_steps' , a )
for scheduler_class in self.scheduler_classes:
lowercase__ : List[Any] = self.get_scheduler_config()
lowercase__ : Optional[int] = scheduler_class(**a )
lowercase__ : List[Any] = self.dummy_sample
lowercase__ : int = 0.1 * sample
if num_inference_steps is not None and hasattr(a , 'set_timesteps' ):
scheduler.set_timesteps(a )
elif num_inference_steps is not None and not hasattr(a , 'set_timesteps' ):
lowercase__ : Any = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
lowercase__ : Any = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
lowercase__ : Union[str, Any] = dummy_past_residuals[:]
lowercase__ : Any = scheduler.timesteps[5]
lowercase__ : str = scheduler.timesteps[6]
lowercase__ : Optional[int] = scheduler.step(a , a , a , **a ).prev_sample
lowercase__ : Union[str, Any] = scheduler.step(a , a , a , **a ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
lowercase__ : Union[str, Any] = scheduler.step(a , a , a , **a ).prev_sample
lowercase__ : Optional[Any] = scheduler.step(a , a , a , **a ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def _UpperCAmelCase ( self ) -> Optional[int]:
for timesteps in [1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=a , time_step=a )
def _UpperCAmelCase ( self ) -> Any:
for t, num_inference_steps in zip([1, 5, 1_0] , [1_0, 5_0, 1_0_0] ):
self.check_over_forward(num_inference_steps=a , time_step=a )
def _UpperCAmelCase ( self ) -> Dict:
lowercase__ : List[str] = self.full_loop()
lowercase__ : Tuple = torch.mean(torch.abs(a ) )
assert abs(result_mean.item() - 2_5_4_0_5_2_9 ) < 1_0
| 77 |
import re
from flax.core.frozen_dict import freeze
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.experimental import PartitionSpec as P
# Sentinels
lowerCAmelCase__ = object()
# For specifying empty leaf dict `{}`
lowerCAmelCase__ = object()
def _UpperCAmelCase (UpperCamelCase__ : str , UpperCamelCase__ : Union[str, Any] ):
_A : str = tuple((re.compile(x + "$" ) for x in qs) )
for i in range(len(UpperCamelCase__ ) - len(UpperCamelCase__ ) + 1 ):
_A : Tuple = [x.match(UpperCamelCase__ ) for x, y in zip(UpperCamelCase__ , ks[i:] )]
if matches and all(UpperCamelCase__ ):
return True
return False
def _UpperCAmelCase (UpperCamelCase__ : str ):
def replace(UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[int] ):
for rule, replacement in rules:
if _match(UpperCamelCase__ , UpperCamelCase__ ):
return replacement
return val
return replace
def _UpperCAmelCase ():
return [
# embeddings
(("transformer", "wpe", "embedding"), P("mp" , UpperCamelCase__ )),
(("transformer", "wte", "embedding"), P("mp" , UpperCamelCase__ )),
# atention
(("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(UpperCamelCase__ , "mp" )),
(("attention", "out_proj", "kernel"), P("mp" , UpperCamelCase__ )),
(("attention", "out_proj", "bias"), None),
# mlp
(("mlp", "c_fc", "kernel"), P(UpperCamelCase__ , "mp" )),
(("mlp", "c_fc", "bias"), P("mp" )),
(("mlp", "c_proj", "kernel"), P("mp" , UpperCamelCase__ )),
(("mlp", "c_proj", "bias"), None),
# layer norms
((r"ln_\d+", "bias"), None),
((r"\d+", r"ln_\d+", "scale"), None),
(("ln_f", "bias"), None),
(("ln_f", "scale"), None),
]
def _UpperCAmelCase (UpperCamelCase__ : List[str] ):
_A : int = _get_partition_rules()
_A : Optional[int] = _replacement_rules(UpperCamelCase__ )
_A : Optional[int] = {k: _unmatched for k in flatten_dict(UpperCamelCase__ )}
_A : List[str] = {k: replace(UpperCamelCase__ , UpperCamelCase__ ) for k, v in initd.items()}
assert _unmatched not in result.values(), "Incomplete partition spec."
return freeze(unflatten_dict(UpperCamelCase__ ) )
| 11 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case_ = logging.get_logger(__name__)
snake_case_ = {"""openai-gpt""": """https://huggingface.co/openai-gpt/resolve/main/config.json"""}
class A_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__UpperCamelCase = """openai-gpt"""
__UpperCamelCase = {
"""max_position_embeddings""": """n_positions""",
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self :Optional[int] , lowercase_ :Optional[int]=4_04_78 , lowercase_ :List[Any]=5_12 , lowercase_ :List[str]=7_68 , lowercase_ :int=12 , lowercase_ :Dict=12 , lowercase_ :Union[str, Any]="gelu" , lowercase_ :Union[str, Any]=0.1 , lowercase_ :str=0.1 , lowercase_ :List[str]=0.1 , lowercase_ :Optional[Any]=1E-5 , lowercase_ :Optional[int]=0.02 , lowercase_ :Optional[Any]="cls_index" , lowercase_ :List[str]=True , lowercase_ :List[str]=None , lowercase_ :str=True , lowercase_ :int=0.1 , **lowercase_ :List[Any] , ) -> str:
UpperCAmelCase = vocab_size
UpperCAmelCase = n_positions
UpperCAmelCase = n_embd
UpperCAmelCase = n_layer
UpperCAmelCase = n_head
UpperCAmelCase = afn
UpperCAmelCase = resid_pdrop
UpperCAmelCase = embd_pdrop
UpperCAmelCase = attn_pdrop
UpperCAmelCase = layer_norm_epsilon
UpperCAmelCase = initializer_range
UpperCAmelCase = summary_type
UpperCAmelCase = summary_use_proj
UpperCAmelCase = summary_activation
UpperCAmelCase = summary_first_dropout
UpperCAmelCase = summary_proj_to_labels
super().__init__(**lowercase_ )
| 78 |
def _UpperCAmelCase (UpperCamelCase__ : str , UpperCamelCase__ : bool = False ):
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
_A : Optional[Any] = f"Expected string as input, found {type(UpperCamelCase__ )}"
raise ValueError(UpperCamelCase__ )
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
_A : Union[str, Any] = f"Expected boolean as use_pascal parameter, found {type(UpperCamelCase__ )}"
raise ValueError(UpperCamelCase__ )
_A : int = input_str.split("_" )
_A : str = 0 if use_pascal else 1
_A : str = words[start_index:]
_A : Optional[Any] = [word[0].upper() + word[1:] for word in words_to_capitalize]
_A : Any = "" if use_pascal else words[0]
return "".join([initial_word, *capitalized_words] )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 11 | 0 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTConfig,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase_ = logging.get_logger(__name__)
def __lowercase ( __lowercase ) -> Union[str, Any]:
'''simple docstring'''
_A = MobileViTConfig()
# size of the architecture
if "mobilevit_s" in mobilevit_name:
_A = [144, 192, 240]
_A = [16, 32, 64, 96, 128, 160, 640]
elif "mobilevit_xs" in mobilevit_name:
_A = [96, 120, 144]
_A = [16, 32, 48, 64, 80, 96, 384]
elif "mobilevit_xxs" in mobilevit_name:
_A = [64, 80, 96]
_A = [16, 16, 24, 48, 64, 80, 320]
_A = 0.05
_A = 2.0
if mobilevit_name.startswith("deeplabv3_" ):
_A = 512
_A = 16
_A = 21
_A = "pascal-voc-id2label.json"
else:
_A = 1000
_A = "imagenet-1k-id2label.json"
_A = "huggingface/label-files"
_A = json.load(open(hf_hub_download(__lowercase , __lowercase , repo_type="dataset" ) , "r" ) )
_A = {int(__lowercase ): v for k, v in idalabel.items()}
_A = idalabel
_A = {v: k for k, v in idalabel.items()}
return config
def __lowercase ( __lowercase , __lowercase=False ) -> int:
'''simple docstring'''
for i in range(1 , 6 ):
if F'''layer_{i}.''' in name:
_A = name.replace(F'''layer_{i}.''' , F'''encoder.layer.{i - 1}.''' )
if "conv_1." in name:
_A = name.replace("conv_1." , "conv_stem." )
if ".block." in name:
_A = name.replace(".block." , "." )
if "exp_1x1" in name:
_A = name.replace("exp_1x1" , "expand_1x1" )
if "red_1x1" in name:
_A = name.replace("red_1x1" , "reduce_1x1" )
if ".local_rep.conv_3x3." in name:
_A = name.replace(".local_rep.conv_3x3." , ".conv_kxk." )
if ".local_rep.conv_1x1." in name:
_A = name.replace(".local_rep.conv_1x1." , ".conv_1x1." )
if ".norm." in name:
_A = name.replace(".norm." , ".normalization." )
if ".conv." in name:
_A = name.replace(".conv." , ".convolution." )
if ".conv_proj." in name:
_A = name.replace(".conv_proj." , ".conv_projection." )
for i in range(0 , 2 ):
for j in range(0 , 4 ):
if F'''.{i}.{j}.''' in name:
_A = name.replace(F'''.{i}.{j}.''' , F'''.{i}.layer.{j}.''' )
for i in range(2 , 6 ):
for j in range(0 , 4 ):
if F'''.{i}.{j}.''' in name:
_A = name.replace(F'''.{i}.{j}.''' , F'''.{i}.''' )
if "expand_1x1" in name:
_A = name.replace("expand_1x1" , "downsampling_layer.expand_1x1" )
if "conv_3x3" in name:
_A = name.replace("conv_3x3" , "downsampling_layer.conv_3x3" )
if "reduce_1x1" in name:
_A = name.replace("reduce_1x1" , "downsampling_layer.reduce_1x1" )
for i in range(2 , 5 ):
if F'''.global_rep.{i}.weight''' in name:
_A = name.replace(F'''.global_rep.{i}.weight''' , ".layernorm.weight" )
if F'''.global_rep.{i}.bias''' in name:
_A = name.replace(F'''.global_rep.{i}.bias''' , ".layernorm.bias" )
if ".global_rep." in name:
_A = name.replace(".global_rep." , ".transformer." )
if ".pre_norm_mha.0." in name:
_A = name.replace(".pre_norm_mha.0." , ".layernorm_before." )
if ".pre_norm_mha.1.out_proj." in name:
_A = name.replace(".pre_norm_mha.1.out_proj." , ".attention.output.dense." )
if ".pre_norm_ffn.0." in name:
_A = name.replace(".pre_norm_ffn.0." , ".layernorm_after." )
if ".pre_norm_ffn.1." in name:
_A = name.replace(".pre_norm_ffn.1." , ".intermediate.dense." )
if ".pre_norm_ffn.4." in name:
_A = name.replace(".pre_norm_ffn.4." , ".output.dense." )
if ".transformer." in name:
_A = name.replace(".transformer." , ".transformer.layer." )
if ".aspp_layer." in name:
_A = name.replace(".aspp_layer." , "." )
if ".aspp_pool." in name:
_A = name.replace(".aspp_pool." , "." )
if "seg_head." in name:
_A = name.replace("seg_head." , "segmentation_head." )
if "segmentation_head.classifier.classifier." in name:
_A = name.replace("segmentation_head.classifier.classifier." , "segmentation_head.classifier." )
if "classifier.fc." in name:
_A = name.replace("classifier.fc." , "classifier." )
elif (not base_model) and ("segmentation_head." not in name):
_A = "mobilevit." + name
return name
def __lowercase ( __lowercase , __lowercase , __lowercase=False ) -> Union[str, Any]:
'''simple docstring'''
if base_model:
_A = ""
else:
_A = "mobilevit."
for key in orig_state_dict.copy().keys():
_A = orig_state_dict.pop(__lowercase )
if key[:8] == "encoder.":
_A = key[8:]
if "qkv" in key:
_A = key.split("." )
_A = int(key_split[0][6:] ) - 1
_A = int(key_split[3] )
_A = model.get_submodule(F'''{model_prefix}encoder.layer.{layer_num}''' )
_A = layer.transformer.layer[transformer_num].attention.attention.all_head_size
_A = (
F'''{model_prefix}encoder.layer.{layer_num}.transformer.layer.{transformer_num}.attention.attention.'''
)
if "weight" in key:
_A = val[:dim, :]
_A = val[dim : dim * 2, :]
_A = val[-dim:, :]
else:
_A = val[:dim]
_A = val[dim : dim * 2]
_A = val[-dim:]
else:
_A = val
return orig_state_dict
def __lowercase ( ) -> Optional[Any]:
'''simple docstring'''
_A = "http://images.cocodataset.org/val2017/000000039769.jpg"
_A = Image.open(requests.get(__lowercase , stream=__lowercase ).raw )
return im
@torch.no_grad()
def __lowercase ( __lowercase , __lowercase , __lowercase , __lowercase=False ) -> List[str]:
'''simple docstring'''
_A = get_mobilevit_config(__lowercase )
# load original state_dict
_A = torch.load(__lowercase , map_location="cpu" )
# load 🤗 model
if mobilevit_name.startswith("deeplabv3_" ):
_A = MobileViTForSemanticSegmentation(__lowercase ).eval()
else:
_A = MobileViTForImageClassification(__lowercase ).eval()
_A = convert_state_dict(__lowercase , __lowercase )
model.load_state_dict(__lowercase )
# Check outputs on an image, prepared by MobileViTImageProcessor
_A = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 )
_A = image_processor(images=prepare_img() , return_tensors="pt" )
_A = model(**__lowercase )
_A = outputs.logits
if mobilevit_name.startswith("deeplabv3_" ):
assert logits.shape == (1, 21, 32, 32)
if mobilevit_name == "deeplabv3_mobilevit_s":
_A = torch.tensor(
[
[[6.2065, 6.1292, 6.2070], [6.1079, 6.1254, 6.1747], [6.0042, 6.1071, 6.1034]],
[[-6.9253, -6.8653, -7.0398], [-7.3218, -7.3983, -7.3670], [-7.1961, -7.2482, -7.1569]],
[[-4.4723, -4.4348, -4.3769], [-5.3629, -5.4632, -5.4598], [-5.1587, -5.3402, -5.5059]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xs":
_A = torch.tensor(
[
[[5.4449, 5.5733, 5.6314], [5.1815, 5.3930, 5.5963], [5.1656, 5.4333, 5.4853]],
[[-9.4423, -9.7766, -9.6714], [-9.1581, -9.5720, -9.5519], [-9.1006, -9.6458, -9.5703]],
[[-7.7721, -7.3716, -7.1583], [-8.4599, -8.0624, -7.7944], [-8.4172, -7.8366, -7.5025]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xxs":
_A = torch.tensor(
[
[[6.9811, 6.9743, 7.3123], [7.1777, 7.1931, 7.3938], [7.5633, 7.8050, 7.8901]],
[[-10.5536, -10.2332, -10.2924], [-10.2336, -9.8624, -9.5964], [-10.8840, -10.8158, -10.6659]],
[[-3.4938, -3.0631, -2.8620], [-3.4205, -2.8135, -2.6875], [-3.4179, -2.7945, -2.8750]],
] )
else:
raise ValueError(F'''Unknown mobilevit_name: {mobilevit_name}''' )
assert torch.allclose(logits[0, :3, :3, :3] , __lowercase , atol=1e-4 )
else:
assert logits.shape == (1, 1000)
if mobilevit_name == "mobilevit_s":
_A = torch.tensor([-0.9866, 0.2392, -1.1241] )
elif mobilevit_name == "mobilevit_xs":
_A = torch.tensor([-2.4761, -0.9399, -1.9587] )
elif mobilevit_name == "mobilevit_xxs":
_A = torch.tensor([-1.9364, -1.2327, -0.4653] )
else:
raise ValueError(F'''Unknown mobilevit_name: {mobilevit_name}''' )
assert torch.allclose(logits[0, :3] , __lowercase , atol=1e-4 )
Path(__lowercase ).mkdir(exist_ok=__lowercase )
print(F'''Saving model {mobilevit_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(__lowercase )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__lowercase )
if push_to_hub:
_A = {
"mobilevit_s": "mobilevit-small",
"mobilevit_xs": "mobilevit-x-small",
"mobilevit_xxs": "mobilevit-xx-small",
"deeplabv3_mobilevit_s": "deeplabv3-mobilevit-small",
"deeplabv3_mobilevit_xs": "deeplabv3-mobilevit-x-small",
"deeplabv3_mobilevit_xxs": "deeplabv3-mobilevit-xx-small",
}
print("Pushing to the hub..." )
_A = model_mapping[mobilevit_name]
image_processor.push_to_hub(__lowercase , organization="apple" )
model.push_to_hub(__lowercase , organization="apple" )
if __name__ == "__main__":
lowerCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--mobilevit_name''',
default='''mobilevit_s''',
type=str,
help=(
'''Name of the MobileViT model you\'d like to convert. Should be one of \'mobilevit_s\', \'mobilevit_xs\','''
''' \'mobilevit_xxs\', \'deeplabv3_mobilevit_s\', \'deeplabv3_mobilevit_xs\', \'deeplabv3_mobilevit_xxs\'.'''
),
)
parser.add_argument(
'''--checkpoint_path''', required=True, type=str, help='''Path to the original state dict (.pt file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
lowerCamelCase_ = parser.parse_args()
convert_movilevit_checkpoint(
args.mobilevit_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 79 |
from __future__ import annotations
def _UpperCAmelCase (UpperCamelCase__ : list[int] , UpperCamelCase__ : list[int] , UpperCamelCase__ : int ):
_A : Dict = list(range(len(UpperCamelCase__ ) ) )
_A : Any = [v / w for v, w in zip(UpperCamelCase__ , UpperCamelCase__ )]
index.sort(key=lambda UpperCamelCase__ : ratio[i] , reverse=UpperCamelCase__ )
_A : float = 0
_A : list[float] = [0] * len(UpperCamelCase__ )
for i in index:
if weight[i] <= capacity:
_A : Union[str, Any] = 1
max_value += value[i]
capacity -= weight[i]
else:
_A : Optional[Any] = capacity / weight[i]
max_value += value[i] * capacity / weight[i]
break
return max_value, fractions
if __name__ == "__main__":
import doctest
doctest.testmod()
| 11 | 0 |
'''simple docstring'''
from math import factorial
def _UpperCamelCase ( __A = 20 ) -> int:
'''simple docstring'''
UpperCamelCase__ = 2 * n # middle entry of odd rows starting at row 3 is the solution for n = 1,
# 2, 3,...
UpperCamelCase__ = n // 2
return int(factorial(__A ) / (factorial(__A ) * factorial(n - k )) )
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution(2_0))
else:
try:
a__ : List[Any] = int(sys.argv[1])
print(solution(n))
except ValueError:
print('Invalid entry - please enter a number.')
| 80 |
import warnings
from ...utils import logging
from .image_processing_beit import BeitImageProcessor
lowerCAmelCase__ = logging.get_logger(__name__)
class lowerCAmelCase__ ( a):
'''simple docstring'''
def __init__( self , *__lowerCamelCase , **__lowerCamelCase) -> None:
warnings.warn(
"The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use BeitImageProcessor instead." , __lowerCamelCase , )
super().__init__(*__lowerCamelCase , **__lowerCamelCase)
| 11 | 0 |
"""simple docstring"""
def _A ( lowercase , lowercase ):
"""simple docstring"""
return "\n".join(
f'''{number} * {i} = {number * i}''' for i in range(1 , number_of_terms + 1 ) )
if __name__ == "__main__":
print(multiplication_table(number=5, number_of_terms=1_0)) | 81 |
import collections
import inspect
import unittest
from typing import Dict, List, Tuple
from transformers import MaskFormerSwinConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device
from transformers.utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MaskFormerSwinBackbone
from transformers.models.maskformer import MaskFormerSwinModel
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self , __lowerCamelCase , __lowerCamelCase=1_3 , __lowerCamelCase=3_2 , __lowerCamelCase=2 , __lowerCamelCase=3 , __lowerCamelCase=1_6 , __lowerCamelCase=[1, 2, 1] , __lowerCamelCase=[2, 2, 4] , __lowerCamelCase=2 , __lowerCamelCase=2.0 , __lowerCamelCase=True , __lowerCamelCase=0.0 , __lowerCamelCase=0.0 , __lowerCamelCase=0.1 , __lowerCamelCase="gelu" , __lowerCamelCase=False , __lowerCamelCase=True , __lowerCamelCase=0.0_2 , __lowerCamelCase=1e-5 , __lowerCamelCase=True , __lowerCamelCase=None , __lowerCamelCase=True , __lowerCamelCase=1_0 , __lowerCamelCase=8 , __lowerCamelCase=["stage1", "stage2", "stage3"] , __lowerCamelCase=[1, 2, 3] , ) -> Optional[Any]:
_A : int = parent
_A : Optional[Any] = batch_size
_A : str = image_size
_A : Tuple = patch_size
_A : Tuple = num_channels
_A : Optional[int] = embed_dim
_A : Dict = depths
_A : Any = num_heads
_A : Any = window_size
_A : int = mlp_ratio
_A : Any = qkv_bias
_A : Union[str, Any] = hidden_dropout_prob
_A : Optional[Any] = attention_probs_dropout_prob
_A : Dict = drop_path_rate
_A : List[Any] = hidden_act
_A : Any = use_absolute_embeddings
_A : Optional[int] = patch_norm
_A : Tuple = layer_norm_eps
_A : List[str] = initializer_range
_A : Optional[int] = is_training
_A : Optional[Any] = scope
_A : Optional[int] = use_labels
_A : Dict = type_sequence_label_size
_A : str = encoder_stride
_A : Optional[int] = out_features
_A : Optional[int] = out_indices
def _lowerCamelCase ( self) -> Union[str, Any]:
_A : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
_A : Optional[Any] = None
if self.use_labels:
_A : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size)
_A : Optional[int] = self.get_config()
return config, pixel_values, labels
def _lowerCamelCase ( self) -> Union[str, Any]:
return MaskFormerSwinConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase) -> List[Any]:
_A : Dict = MaskFormerSwinModel(config=__lowerCamelCase)
model.to(__lowerCamelCase)
model.eval()
_A : int = model(__lowerCamelCase)
_A : Any = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths) - 1))
_A : List[str] = int(config.embed_dim * 2 ** (len(config.depths) - 1))
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim))
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase) -> Dict:
_A : Optional[Any] = MaskFormerSwinBackbone(config=__lowerCamelCase)
model.to(__lowerCamelCase)
model.eval()
_A : Dict = model(__lowerCamelCase)
# verify feature maps
self.parent.assertEqual(len(result.feature_maps) , len(config.out_features))
self.parent.assertListEqual(list(result.feature_maps[0].shape) , [1_3, 1_6, 1_6, 1_6])
# verify channels
self.parent.assertEqual(len(model.channels) , len(config.out_features))
self.parent.assertListEqual(model.channels , [1_6, 3_2, 6_4])
# verify ValueError
with self.parent.assertRaises(__lowerCamelCase):
_A : Union[str, Any] = ["stem"]
_A : Union[str, Any] = MaskFormerSwinBackbone(config=__lowerCamelCase)
def _lowerCamelCase ( self) -> Dict:
_A : Any = self.prepare_config_and_inputs()
_A , _A , _A : List[Any] = config_and_inputs
_A : Optional[int] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class lowerCAmelCase__ ( a , a , unittest.TestCase):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = (
(
MaskFormerSwinModel,
MaskFormerSwinBackbone,
)
if is_torch_available()
else ()
)
__SCREAMING_SNAKE_CASE = {"feature-extraction": MaskFormerSwinModel} if is_torch_available() else {}
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
def _lowerCamelCase ( self) -> str:
_A : Union[str, Any] = MaskFormerSwinModelTester(self)
_A : Optional[int] = ConfigTester(self , config_class=__lowerCamelCase , embed_dim=3_7)
@require_torch_multi_gpu
@unittest.skip(
reason=(
"`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn't work well with"
" `nn.DataParallel`"
))
def _lowerCamelCase ( self) -> Union[str, Any]:
pass
def _lowerCamelCase ( self) -> int:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _lowerCamelCase ( self) -> str:
return
def _lowerCamelCase ( self) -> List[Any]:
_A : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase)
def _lowerCamelCase ( self) -> Union[str, Any]:
_A : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*__lowerCamelCase)
@unittest.skip("Swin does not use inputs_embeds")
def _lowerCamelCase ( self) -> str:
pass
@unittest.skip("Swin does not support feedforward chunking")
def _lowerCamelCase ( self) -> List[Any]:
pass
def _lowerCamelCase ( self) -> Optional[int]:
_A , _A : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A : Union[str, Any] = model_class(__lowerCamelCase)
self.assertIsInstance(model.get_input_embeddings() , (nn.Module))
_A : Dict = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__lowerCamelCase , nn.Linear))
def _lowerCamelCase ( self) -> Any:
_A , _A : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A : int = model_class(__lowerCamelCase)
_A : Optional[int] = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_A : int = [*signature.parameters.keys()]
_A : Optional[int] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __lowerCamelCase)
@unittest.skip(reason="MaskFormerSwin is only used as backbone and doesn't support output_attentions")
def _lowerCamelCase ( self) -> Tuple:
pass
@unittest.skip(reason="MaskFormerSwin is only used as an internal backbone")
def _lowerCamelCase ( self) -> str:
pass
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase) -> Optional[int]:
_A : Any = model_class(__lowerCamelCase)
model.to(__lowerCamelCase)
model.eval()
with torch.no_grad():
_A : str = model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase))
_A : Tuple = outputs.hidden_states
_A : Any = getattr(
self.model_tester , "expected_num_hidden_layers" , len(self.model_tester.depths) + 1)
self.assertEqual(len(__lowerCamelCase) , __lowerCamelCase)
# Swin has a different seq_length
_A : Optional[int] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable)
else (config.patch_size, config.patch_size)
)
_A : str = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:]) , [num_patches, self.model_tester.embed_dim] , )
def _lowerCamelCase ( self) -> Dict:
_A , _A : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
_A : Any = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable)
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
_A : List[Any] = True
self.check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_A : Optional[int] = True
self.check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase)
def _lowerCamelCase ( self) -> Tuple:
_A , _A : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
_A : Optional[int] = 3
_A : Dict = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable)
else (self.model_tester.image_size, self.model_tester.image_size)
)
_A : Optional[int] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable)
else (config.patch_size, config.patch_size)
)
_A : int = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
_A : Dict = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
_A : List[Any] = True
self.check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , (padded_height, padded_width))
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_A : Union[str, Any] = True
self.check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , (padded_height, padded_width))
@unittest.skip(reason="MaskFormerSwin doesn't have pretrained checkpoints")
def _lowerCamelCase ( self) -> List[str]:
pass
@unittest.skip(reason="This will be fixed once MaskFormerSwin is replaced by native Swin")
def _lowerCamelCase ( self) -> List[str]:
pass
@unittest.skip(reason="This will be fixed once MaskFormerSwin is replaced by native Swin")
def _lowerCamelCase ( self) -> str:
pass
def _lowerCamelCase ( self) -> Optional[Any]:
_A , _A : Any = self.model_tester.prepare_config_and_inputs_for_common()
def set_nan_tensor_to_zero(__lowerCamelCase):
_A : Optional[int] = 0
return t
def check_equivalence(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase={}):
with torch.no_grad():
_A : Any = model(**__lowerCamelCase , return_dict=__lowerCamelCase , **__lowerCamelCase)
_A : int = model(**__lowerCamelCase , return_dict=__lowerCamelCase , **__lowerCamelCase).to_tuple()
def recursive_check(__lowerCamelCase , __lowerCamelCase):
if isinstance(__lowerCamelCase , (List, Tuple)):
for tuple_iterable_value, dict_iterable_value in zip(__lowerCamelCase , __lowerCamelCase):
recursive_check(__lowerCamelCase , __lowerCamelCase)
elif isinstance(__lowerCamelCase , __lowerCamelCase):
for tuple_iterable_value, dict_iterable_value in zip(
tuple_object.values() , dict_object.values()):
recursive_check(__lowerCamelCase , __lowerCamelCase)
elif tuple_object is None:
return
else:
self.assertTrue(
torch.allclose(
set_nan_tensor_to_zero(__lowerCamelCase) , set_nan_tensor_to_zero(__lowerCamelCase) , atol=1e-5) , msg=(
"Tuple and dict output are not equal. Difference:"
F" {torch.max(torch.abs(tuple_object - dict_object))}. Tuple has `nan`:"
F" {torch.isnan(__lowerCamelCase).any()} and `inf`: {torch.isinf(__lowerCamelCase)}. Dict has"
F" `nan`: {torch.isnan(__lowerCamelCase).any()} and `inf`: {torch.isinf(__lowerCamelCase)}."
) , )
recursive_check(__lowerCamelCase , __lowerCamelCase)
for model_class in self.all_model_classes:
_A : List[Any] = model_class(__lowerCamelCase)
model.to(__lowerCamelCase)
model.eval()
_A : str = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase)
_A : Tuple = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase)
check_equivalence(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase)
_A : Any = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase)
_A : List[Any] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase)
check_equivalence(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase)
_A : List[Any] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase)
_A : str = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase)
check_equivalence(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , {"output_hidden_states": True})
_A : Union[str, Any] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase)
_A : Optional[Any] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase)
check_equivalence(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , {"output_hidden_states": True})
@require_torch
class lowerCAmelCase__ ( unittest.TestCase , a):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = (MaskFormerSwinBackbone,) if is_torch_available() else ()
__SCREAMING_SNAKE_CASE = MaskFormerSwinConfig
def _lowerCamelCase ( self) -> Optional[Any]:
_A : Tuple = MaskFormerSwinModelTester(self)
def _lowerCamelCase ( self) -> Optional[Any]:
_A , _A : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
_A : Union[str, Any] = inputs_dict["pixel_values"].shape[0]
for backbone_class in self.all_model_classes:
_A : Optional[Any] = backbone_class(__lowerCamelCase)
backbone.to(__lowerCamelCase)
backbone.eval()
_A : List[Any] = backbone(**__lowerCamelCase)
# Test default outputs and verify feature maps
self.assertIsInstance(outputs.feature_maps , __lowerCamelCase)
self.assertTrue(len(outputs.feature_maps) == len(backbone.channels))
for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels):
self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels))
self.assertIsNone(outputs.hidden_states)
self.assertIsNone(outputs.attentions)
# Test output_hidden_states=True
_A : List[str] = backbone(**__lowerCamelCase , output_hidden_states=__lowerCamelCase)
self.assertIsNotNone(outputs.hidden_states)
self.assertTrue(len(outputs.hidden_states) , len(backbone.stage_names))
# We skip the stem layer
for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels):
for hidden_state in hidden_states:
# Hidden states are in the format (batch_size, (height * width), n_channels)
_A , _A , _A : List[str] = hidden_state.shape
self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels))
# Test output_attentions=True
if self.has_attentions:
_A : int = backbone(**__lowerCamelCase , output_attentions=__lowerCamelCase)
self.assertIsNotNone(outputs.attentions)
| 11 | 0 |
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert import BertTokenizer
A__ = logging.get_logger(__name__)
A__ = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
A__ = {
"""vocab_file""": {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json"""
),
},
}
A__ = {
"""vocab_file""": {
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json"""
),
},
}
A__ = {
"""vocab_file""": {
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json"""
),
},
}
A__ = {
"""facebook/dpr-ctx_encoder-single-nq-base""": 5_12,
"""facebook/dpr-ctx_encoder-multiset-base""": 5_12,
}
A__ = {
"""facebook/dpr-question_encoder-single-nq-base""": 5_12,
"""facebook/dpr-question_encoder-multiset-base""": 5_12,
}
A__ = {
"""facebook/dpr-reader-single-nq-base""": 5_12,
"""facebook/dpr-reader-multiset-base""": 5_12,
}
A__ = {
"""facebook/dpr-ctx_encoder-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-ctx_encoder-multiset-base""": {"""do_lower_case""": True},
}
A__ = {
"""facebook/dpr-question_encoder-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-question_encoder-multiset-base""": {"""do_lower_case""": True},
}
A__ = {
"""facebook/dpr-reader-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-reader-multiset-base""": {"""do_lower_case""": True},
}
class __lowerCAmelCase ( lowerCamelCase__ ):
__lowerCamelCase = VOCAB_FILES_NAMES
__lowerCamelCase = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
class __lowerCAmelCase ( lowerCamelCase__ ):
__lowerCamelCase = VOCAB_FILES_NAMES
__lowerCamelCase = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
A__ = collections.namedtuple(
"""DPRSpanPrediction""", ["""span_score""", """relevance_score""", """doc_id""", """start_index""", """end_index""", """text"""]
)
A__ = collections.namedtuple("""DPRReaderOutput""", ["""start_logits""", """end_logits""", """relevance_logits"""])
A__ = R"""
Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.
It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),
using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`
with the format:
```
[CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>
```
Args:
questions (`str` or `List[str]`):
The questions to be encoded. You can specify one question for many passages. In this case, the question
will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in
`titles` or `texts`.
titles (`str` or `List[str]`):
The passages titles to be encoded. This can be a string or a list of strings if there are several passages.
texts (`str` or `List[str]`):
The passages texts to be encoded. This can be a string or a list of strings if there are several passages.
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):
Activates and controls padding. Accepts the following values:
- `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence
if provided).
- `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different
lengths).
truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):
Activates and controls truncation. Accepts the following values:
- `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to
the maximum acceptable input length for the model if that argument is not provided. This will truncate
token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch
of pairs) is provided.
- `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the first
sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the
second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths
greater than the model maximum admissible input size).
max_length (`int`, *optional*):
Controls the maximum length to use by one of the truncation/padding parameters.
If left unset or set to `None`, this will use the predefined model maximum length if a maximum length
is required by one of the truncation/padding parameters. If the model has no specific maximum input
length (like XLNet) truncation/padding to a maximum length will be deactivated.
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors instead of list of python integers. Acceptable values are:
- `'tf'`: Return TensorFlow `tf.constant` objects.
- `'pt'`: Return PyTorch `torch.Tensor` objects.
- `'np'`: Return Numpy `np.ndarray` objects.
return_attention_mask (`bool`, *optional*):
Whether or not to return the attention mask. If not set, will return the attention mask according to the
specific tokenizer's default, defined by the `return_outputs` attribute.
[What are attention masks?](../glossary#attention-mask)
Returns:
`Dict[str, List[List[int]]]`: A dictionary with the following keys:
- `input_ids`: List of token ids to be fed to a model.
- `attention_mask`: List of indices specifying which tokens should be attended to by the model.
"""
@add_start_docstrings(lowerCamelCase__ )
class __lowerCAmelCase :
def __call__( self , _snake_case , _snake_case = None , _snake_case = None , _snake_case = False , _snake_case = False , _snake_case = None , _snake_case = None , _snake_case = None , **_snake_case , ):
"""simple docstring"""
if titles is None and texts is None:
return super().__call__(
_snake_case , padding=_snake_case , truncation=_snake_case , max_length=_snake_case , return_tensors=_snake_case , return_attention_mask=_snake_case , **_snake_case , )
elif titles is None or texts is None:
_lowerCAmelCase = titles if texts is None else texts
return super().__call__(
_snake_case , _snake_case , padding=_snake_case , truncation=_snake_case , max_length=_snake_case , return_tensors=_snake_case , return_attention_mask=_snake_case , **_snake_case , )
_lowerCAmelCase = titles if not isinstance(_snake_case , _snake_case ) else [titles]
_lowerCAmelCase = texts if not isinstance(_snake_case , _snake_case ) else [texts]
_lowerCAmelCase = len(_snake_case )
_lowerCAmelCase = questions if not isinstance(_snake_case , _snake_case ) else [questions] * n_passages
if len(_snake_case ) != len(_snake_case ):
raise ValueError(
F'There should be as many titles than texts but got {len(_snake_case )} titles and {len(_snake_case )} texts.' )
_lowerCAmelCase = super().__call__(_snake_case , _snake_case , padding=_snake_case , truncation=_snake_case )["""input_ids"""]
_lowerCAmelCase = super().__call__(_snake_case , add_special_tokens=_snake_case , padding=_snake_case , truncation=_snake_case )["""input_ids"""]
_lowerCAmelCase = {
"""input_ids""": [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(_snake_case , _snake_case )
]
}
if return_attention_mask is not False:
_lowerCAmelCase = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
_lowerCAmelCase = attention_mask
return self.pad(_snake_case , padding=_snake_case , max_length=_snake_case , return_tensors=_snake_case )
def snake_case ( self , _snake_case , _snake_case , _snake_case = 16 , _snake_case = 64 , _snake_case = 4 , ):
"""simple docstring"""
_lowerCAmelCase = reader_input["""input_ids"""]
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = reader_output[:3]
_lowerCAmelCase = len(_snake_case )
_lowerCAmelCase = sorted(range(_snake_case ) , reverse=_snake_case , key=relevance_logits.__getitem__ )
_lowerCAmelCase = []
for doc_id in sorted_docs:
_lowerCAmelCase = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
_lowerCAmelCase = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
_lowerCAmelCase = sequence_ids.index(self.pad_token_id )
else:
_lowerCAmelCase = len(_snake_case )
_lowerCAmelCase = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=_snake_case , top_spans=_snake_case , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=_snake_case , start_index=_snake_case , end_index=_snake_case , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(_snake_case ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def snake_case ( self , _snake_case , _snake_case , _snake_case , _snake_case , ):
"""simple docstring"""
_lowerCAmelCase = []
for start_index, start_score in enumerate(_snake_case ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
_lowerCAmelCase = sorted(_snake_case , key=lambda _snake_case : x[1] , reverse=_snake_case )
_lowerCAmelCase = []
for (start_index, end_index), score in scores:
if start_index > end_index:
raise ValueError(F'Wrong span indices: [{start_index}:{end_index}]' )
_lowerCAmelCase = end_index - start_index + 1
if length > max_answer_length:
raise ValueError(F'Span is too long: {length} > {max_answer_length}' )
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(_snake_case ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(lowerCamelCase__ )
class __lowerCAmelCase ( lowerCamelCase__ , lowerCamelCase__ ):
__lowerCamelCase = VOCAB_FILES_NAMES
__lowerCamelCase = READER_PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase = READER_PRETRAINED_INIT_CONFIGURATION
__lowerCamelCase = ['''input_ids''', '''attention_mask''']
| 82 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_torch_available,
)
lowerCAmelCase__ = {
'configuration_speecht5': [
'SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP',
'SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP',
'SpeechT5Config',
'SpeechT5HifiGanConfig',
],
'feature_extraction_speecht5': ['SpeechT5FeatureExtractor'],
'processing_speecht5': ['SpeechT5Processor'],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['SpeechT5Tokenizer']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST',
'SpeechT5ForSpeechToText',
'SpeechT5ForSpeechToSpeech',
'SpeechT5ForTextToSpeech',
'SpeechT5Model',
'SpeechT5PreTrainedModel',
'SpeechT5HifiGan',
]
if TYPE_CHECKING:
from .configuration_speechta import (
SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP,
SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP,
SpeechTaConfig,
SpeechTaHifiGanConfig,
)
from .feature_extraction_speechta import SpeechTaFeatureExtractor
from .processing_speechta import SpeechTaProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speechta import SpeechTaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speechta import (
SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaHifiGan,
SpeechTaModel,
SpeechTaPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 11 | 0 |
'''simple docstring'''
def A__ ( UpperCAmelCase_ ):
if upper_limit < 0:
raise ValueError('Limit for the Catalan sequence must be ≥ 0' )
_UpperCamelCase : Tuple = [0] * (upper_limit + 1)
# Base case: C(0) = C(1) = 1
_UpperCamelCase : Optional[Any] = 1
if upper_limit > 0:
_UpperCamelCase : Optional[Any] = 1
# Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i
for i in range(2 , upper_limit + 1 ):
for j in range(UpperCAmelCase_ ):
catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1]
return catalan_list
if __name__ == "__main__":
print('\n********* Catalan Numbers Using Dynamic Programming ************\n')
print('\n*** Enter -1 at any time to quit ***')
print('\nEnter the upper limit (≥ 0) for the Catalan number sequence: ', end='')
try:
while True:
snake_case_ : Optional[Any] = int(input().strip())
if N < 0:
print('\n********* Goodbye!! ************')
break
else:
print(F"""The Catalan numbers from 0 through {N} are:""")
print(catalan_numbers(N))
print('Try another upper limit for the sequence: ', end='')
except (NameError, ValueError):
print('\n********* Invalid input, goodbye! ************\n')
import doctest
doctest.testmod()
| 83 |
import inspect
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
import torch.utils.checkpoint
from ...models import UNetaDModel, VQModel
from ...schedulers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
)
from ...utils import PIL_INTERPOLATION, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
def _UpperCAmelCase (UpperCamelCase__ : Union[str, Any] ):
_A , _A : Any = image.size
_A , _A : str = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
_A : List[str] = image.resize((w, h) , resample=PIL_INTERPOLATION["lanczos"] )
_A : Any = np.array(UpperCamelCase__ ).astype(np.floataa ) / 2_55.0
_A : Optional[Any] = image[None].transpose(0 , 3 , 1 , 2 )
_A : Union[str, Any] = torch.from_numpy(UpperCamelCase__ )
return 2.0 * image - 1.0
class lowerCAmelCase__ ( a):
'''simple docstring'''
def __init__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , ) -> Optional[int]:
super().__init__()
self.register_modules(vqvae=__lowerCamelCase , unet=__lowerCamelCase , scheduler=__lowerCamelCase)
@torch.no_grad()
def __call__( self , __lowerCamelCase = None , __lowerCamelCase = 1 , __lowerCamelCase = 1_0_0 , __lowerCamelCase = 0.0 , __lowerCamelCase = None , __lowerCamelCase = "pil" , __lowerCamelCase = True , ) -> Union[Tuple, ImagePipelineOutput]:
if isinstance(__lowerCamelCase , PIL.Image.Image):
_A : Tuple = 1
elif isinstance(__lowerCamelCase , torch.Tensor):
_A : Union[str, Any] = image.shape[0]
else:
raise ValueError(F"`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(__lowerCamelCase)}")
if isinstance(__lowerCamelCase , PIL.Image.Image):
_A : Union[str, Any] = preprocess(__lowerCamelCase)
_A , _A : Union[str, Any] = image.shape[-2:]
# in_channels should be 6: 3 for latents, 3 for low resolution image
_A : Optional[Any] = (batch_size, self.unet.config.in_channels // 2, height, width)
_A : str = next(self.unet.parameters()).dtype
_A : Union[str, Any] = randn_tensor(__lowerCamelCase , generator=__lowerCamelCase , device=self.device , dtype=__lowerCamelCase)
_A : List[Any] = image.to(device=self.device , dtype=__lowerCamelCase)
# set timesteps and move to the correct device
self.scheduler.set_timesteps(__lowerCamelCase , device=self.device)
_A : Any = self.scheduler.timesteps
# scale the initial noise by the standard deviation required by the scheduler
_A : List[str] = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature.
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
_A : str = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
_A : Optional[int] = {}
if accepts_eta:
_A : List[Any] = eta
for t in self.progress_bar(__lowerCamelCase):
# concat latents and low resolution image in the channel dimension.
_A : List[Any] = torch.cat([latents, image] , dim=1)
_A : str = self.scheduler.scale_model_input(__lowerCamelCase , __lowerCamelCase)
# predict the noise residual
_A : Any = self.unet(__lowerCamelCase , __lowerCamelCase).sample
# compute the previous noisy sample x_t -> x_t-1
_A : Optional[int] = self.scheduler.step(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase).prev_sample
# decode the image latents with the VQVAE
_A : Union[str, Any] = self.vqvae.decode(__lowerCamelCase).sample
_A : Dict = torch.clamp(__lowerCamelCase , -1.0 , 1.0)
_A : Tuple = image / 2 + 0.5
_A : int = image.cpu().permute(0 , 2 , 3 , 1).numpy()
if output_type == "pil":
_A : Optional[int] = self.numpy_to_pil(__lowerCamelCase)
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__lowerCamelCase)
| 11 | 0 |
"""simple docstring"""
from PIL import Image
def _snake_case ( lowercase__ : Image , lowercase__ : float ) -> Image:
'''simple docstring'''
def brightness(lowercase__ : int ) -> float:
return 1_2_8 + level + (c - 1_2_8)
if not -255.0 <= level <= 255.0:
raise ValueError("""level must be between -255.0 (black) and 255.0 (white)""" )
return img.point(lowercase__ )
if __name__ == "__main__":
# Load image
with Image.open('image_data/lena.jpg') as img:
# Change brightness to 100
__UpperCAmelCase = change_brightness(img, 1_00)
brigt_img.save('image_data/lena_brightness.png', format='png')
| 84 |
import unittest
import torch
from diffusers import VQModel
from diffusers.utils import floats_tensor, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class lowerCAmelCase__ ( a , a , unittest.TestCase):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = VQModel
__SCREAMING_SNAKE_CASE = "sample"
@property
def _lowerCamelCase ( self , __lowerCamelCase=(3_2, 3_2)) -> Optional[Any]:
_A : Optional[int] = 4
_A : Tuple = 3
_A : List[Any] = floats_tensor((batch_size, num_channels) + sizes).to(__lowerCamelCase)
return {"sample": image}
@property
def _lowerCamelCase ( self) -> int:
return (3, 3_2, 3_2)
@property
def _lowerCamelCase ( self) -> List[Any]:
return (3, 3_2, 3_2)
def _lowerCamelCase ( self) -> Union[str, Any]:
_A : List[Any] = {
"block_out_channels": [3_2, 6_4],
"in_channels": 3,
"out_channels": 3,
"down_block_types": ["DownEncoderBlock2D", "DownEncoderBlock2D"],
"up_block_types": ["UpDecoderBlock2D", "UpDecoderBlock2D"],
"latent_channels": 3,
}
_A : int = self.dummy_input
return init_dict, inputs_dict
def _lowerCamelCase ( self) -> Union[str, Any]:
pass
def _lowerCamelCase ( self) -> Any:
pass
def _lowerCamelCase ( self) -> Any:
_A , _A : List[Any] = VQModel.from_pretrained("fusing/vqgan-dummy" , output_loading_info=__lowerCamelCase)
self.assertIsNotNone(__lowerCamelCase)
self.assertEqual(len(loading_info["missing_keys"]) , 0)
model.to(__lowerCamelCase)
_A : str = model(**self.dummy_input)
assert image is not None, "Make sure output is not None"
def _lowerCamelCase ( self) -> Union[str, Any]:
_A : Optional[Any] = VQModel.from_pretrained("fusing/vqgan-dummy")
model.to(__lowerCamelCase).eval()
torch.manual_seed(0)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(0)
_A : Tuple = torch.randn(1 , model.config.in_channels , model.config.sample_size , model.config.sample_size)
_A : Optional[int] = image.to(__lowerCamelCase)
with torch.no_grad():
_A : List[str] = model(__lowerCamelCase).sample
_A : int = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
_A : Optional[Any] = torch.tensor([-0.0_1_5_3, -0.4_0_4_4, -0.1_8_8_0, -0.5_1_6_1, -0.2_4_1_8, -0.4_0_7_2, -0.1_6_1_2, -0.0_6_3_3, -0.0_1_4_3])
# fmt: on
self.assertTrue(torch.allclose(__lowerCamelCase , __lowerCamelCase , atol=1e-3))
| 11 | 0 |
'''simple docstring'''
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel
if is_vision_available():
from transformers import MaskFormerImageProcessor
if is_vision_available():
from PIL import Image
class _snake_case :
def __init__( self , a__ , a__=2 , a__=True , a__=False , a__=10 , a__=3 , a__=32 * 4 , a__=32 * 6 , a__=4 , a__=32 , ) -> List[Any]:
'''simple docstring'''
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = is_training
snake_case_ = use_auxiliary_loss
snake_case_ = num_queries
snake_case_ = num_channels
snake_case_ = min_size
snake_case_ = max_size
snake_case_ = num_labels
snake_case_ = mask_feature_size
def lowerCAmelCase__ ( self ) -> int:
'''simple docstring'''
snake_case_ = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
a__ )
snake_case_ = torch.ones([self.batch_size, self.min_size, self.max_size] , device=a__ )
snake_case_ = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=a__ ) > 0.5
).float()
snake_case_ = (torch.rand((self.batch_size, self.num_labels) , device=a__ ) > 0.5).long()
snake_case_ = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def lowerCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
return MaskFormerConfig.from_backbone_and_decoder_configs(
backbone_config=SwinConfig(
depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig(
decoder_ffn_dim=128 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , )
def lowerCAmelCase__ ( self ) -> str:
'''simple docstring'''
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ = self.prepare_config_and_inputs()
snake_case_ = {"pixel_values": pixel_values, "pixel_mask": pixel_mask}
return config, inputs_dict
def lowerCAmelCase__ ( self , a__ , a__ ) -> Optional[Any]:
'''simple docstring'''
snake_case_ = output.encoder_hidden_states
snake_case_ = output.pixel_decoder_hidden_states
snake_case_ = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(a__ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(a__ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(a__ ) , config.decoder_config.decoder_layers )
def lowerCAmelCase__ ( self , a__ , a__ , a__ , a__=False ) -> int:
'''simple docstring'''
with torch.no_grad():
snake_case_ = MaskFormerModel(config=a__ )
model.to(a__ )
model.eval()
snake_case_ = model(pixel_values=a__ , pixel_mask=a__ )
snake_case_ = model(a__ , output_hidden_states=a__ )
# the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the
# encoder and pixel decoder
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(a__ , a__ )
def lowerCAmelCase__ ( self , a__ , a__ , a__ , a__ , a__ ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ = MaskFormerForInstanceSegmentation(config=a__ )
model.to(a__ )
model.eval()
def comm_check_on_output(a__ ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
snake_case_ = model(pixel_values=a__ , pixel_mask=a__ )
snake_case_ = model(a__ )
comm_check_on_output(a__ )
snake_case_ = model(
pixel_values=a__ , pixel_mask=a__ , mask_labels=a__ , class_labels=a__ )
comm_check_on_output(a__ )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class _snake_case ( lowercase_ , lowercase_ , unittest.TestCase ):
lowerCAmelCase_ : str = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else ()
lowerCAmelCase_ : Dict = (
{"feature-extraction": MaskFormerModel, "image-segmentation": MaskFormerForInstanceSegmentation}
if is_torch_available()
else {}
)
lowerCAmelCase_ : Dict = False
lowerCAmelCase_ : List[Any] = False
lowerCAmelCase_ : Optional[Any] = False
lowerCAmelCase_ : List[str] = False
def lowerCAmelCase__ ( self ) -> str:
'''simple docstring'''
snake_case_ = MaskFormerModelTester(self )
snake_case_ = ConfigTester(self , config_class=a__ , has_text_modality=a__ )
def lowerCAmelCase__ ( self ) -> str:
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCAmelCase__ ( self ) -> Any:
'''simple docstring'''
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(a__ , **a__ , output_hidden_states=a__ )
def lowerCAmelCase__ ( self ) -> Any:
'''simple docstring'''
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*a__ )
@unittest.skip(reason="MaskFormer does not use inputs_embeds" )
def lowerCAmelCase__ ( self ) -> Any:
'''simple docstring'''
pass
@unittest.skip(reason="MaskFormer does not have a get_input_embeddings method" )
def lowerCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
pass
@unittest.skip(reason="MaskFormer is not a generative model" )
def lowerCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
pass
@unittest.skip(reason="MaskFormer does not use token embeddings" )
def lowerCAmelCase__ ( self ) -> str:
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(
reason="MaskFormer has some layers using `add_module` which doesn't work well with `nn.DataParallel`" )
def lowerCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def lowerCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
pass
def lowerCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ = model_class(a__ )
snake_case_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case_ = [*signature.parameters.keys()]
snake_case_ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , a__ )
@slow
def lowerCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
for model_name in ["facebook/maskformer-swin-small-coco"]:
snake_case_ = MaskFormerModel.from_pretrained(a__ )
self.assertIsNotNone(a__ )
def lowerCAmelCase__ ( self ) -> int:
'''simple docstring'''
snake_case_ = (self.model_tester.min_size,) * 2
snake_case_ = {
"pixel_values": torch.randn((2, 3, *size) , device=a__ ),
"mask_labels": torch.randn((2, 10, *size) , device=a__ ),
"class_labels": torch.zeros(2 , 10 , device=a__ ).long(),
}
snake_case_ = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(a__ )
snake_case_ = model(**a__ )
self.assertTrue(outputs.loss is not None )
def lowerCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(a__ , **a__ , output_hidden_states=a__ )
def lowerCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ = model_class(a__ ).to(a__ )
snake_case_ = model(**a__ , output_attentions=a__ )
self.assertTrue(outputs.attentions is not None )
def lowerCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
if not self.model_tester.is_training:
return
# only MaskFormerForInstanceSegmentation has the loss
snake_case_ = self.all_model_classes[1]
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs()
snake_case_ = model_class(a__ )
model.to(a__ )
model.train()
snake_case_ = model(a__ , mask_labels=a__ , class_labels=a__ ).loss
loss.backward()
def lowerCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
snake_case_ = self.all_model_classes[1]
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs()
snake_case_ = True
snake_case_ = True
snake_case_ = model_class(a__ )
model.to(a__ )
model.train()
snake_case_ = model(a__ , mask_labels=a__ , class_labels=a__ )
snake_case_ = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
snake_case_ = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
# we requires_grad=True in inputs_embeds (line 2152), the original implementation don't
snake_case_ = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
snake_case_ = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=a__ )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
_SCREAMING_SNAKE_CASE : int = 1e-4
def UpperCamelCase_( ):
'''simple docstring'''
snake_case_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_vision
@slow
class _snake_case ( unittest.TestCase ):
@cached_property
def lowerCAmelCase__ ( self ) -> str:
'''simple docstring'''
return (
MaskFormerImageProcessor.from_pretrained("facebook/maskformer-swin-small-coco" )
if is_vision_available()
else None
)
def lowerCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
snake_case_ = MaskFormerModel.from_pretrained("facebook/maskformer-swin-small-coco" ).to(a__ )
snake_case_ = self.default_image_processor
snake_case_ = prepare_img()
snake_case_ = image_processor(a__ , return_tensors="pt" ).to(a__ )
snake_case_ = inputs["pixel_values"].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(a__ , (1, 3, 800, 1_088) )
with torch.no_grad():
snake_case_ = model(**a__ )
snake_case_ = torch.tensor(
[[-0.0_4_8_2, 0.9_2_2_8, 0.4_9_5_1], [-0.2_5_4_7, 0.8_0_1_7, 0.8_5_2_7], [-0.0_0_6_9, 0.3_3_8_5, -0.0_0_8_9]] ).to(a__ )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , a__ , atol=a__ ) )
snake_case_ = torch.tensor(
[[-0.8_4_2_2, -0.8_4_3_4, -0.9_7_1_8], [-1.0_1_4_4, -0.5_5_6_5, -0.4_1_9_5], [-1.0_0_3_8, -0.4_4_8_4, -0.1_9_6_1]] ).to(a__ )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , a__ , atol=a__ ) )
snake_case_ = torch.tensor(
[[0.2_8_5_2, -0.0_1_5_9, 0.9_7_3_5], [0.6_2_5_4, 0.1_8_5_8, 0.8_5_2_9], [-0.0_6_8_0, -0.4_1_1_6, 1.8_4_1_3]] ).to(a__ )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , a__ , atol=a__ ) )
def lowerCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
snake_case_ = (
MaskFormerForInstanceSegmentation.from_pretrained("facebook/maskformer-swin-small-coco" )
.to(a__ )
.eval()
)
snake_case_ = self.default_image_processor
snake_case_ = prepare_img()
snake_case_ = image_processor(a__ , return_tensors="pt" ).to(a__ )
snake_case_ = inputs["pixel_values"].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(a__ , (1, 3, 800, 1_088) )
with torch.no_grad():
snake_case_ = model(**a__ )
# masks_queries_logits
snake_case_ = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
snake_case_ = [
[-1.3_7_3_7_1_2_4, -1.7_7_2_4_9_3_7, -1.9_3_6_4_2_3_3],
[-1.5_9_7_7_2_8_1, -1.9_8_6_7_9_3_9, -2.1_5_2_3_6_9_5],
[-1.5_7_9_5_3_9_8, -1.9_2_6_9_8_3_2, -2.0_9_3_9_4_2],
]
snake_case_ = torch.tensor(a__ ).to(a__ )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , a__ , atol=a__ ) )
# class_queries_logits
snake_case_ = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
snake_case_ = torch.tensor(
[
[1.6512e00, -5.2572e00, -3.3519e00],
[3.6169e-02, -5.9025e00, -2.9313e00],
[1.0766e-04, -7.7630e00, -5.1263e00],
] ).to(a__ )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , a__ , atol=a__ ) )
def lowerCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ = (
MaskFormerForInstanceSegmentation.from_pretrained("facebook/maskformer-resnet101-coco-stuff" )
.to(a__ )
.eval()
)
snake_case_ = self.default_image_processor
snake_case_ = prepare_img()
snake_case_ = image_processor(a__ , return_tensors="pt" ).to(a__ )
snake_case_ = inputs["pixel_values"].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(a__ , (1, 3, 800, 1_088) )
with torch.no_grad():
snake_case_ = model(**a__ )
# masks_queries_logits
snake_case_ = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
snake_case_ = [[-0.9_0_4_6, -2.6_3_6_6, -4.6_0_6_2], [-3.4_1_7_9, -5.7_8_9_0, -8.8_0_5_7], [-4.9_1_7_9, -7.6_5_6_0, -1_0.7_7_1_1]]
snake_case_ = torch.tensor(a__ ).to(a__ )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , a__ , atol=a__ ) )
# class_queries_logits
snake_case_ = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
snake_case_ = torch.tensor(
[[4.7_1_8_8, -3.2_5_8_5, -2.8_8_5_7], [6.6_8_7_1, -2.9_1_8_1, -1.2_4_8_7], [7.2_4_4_9, -2.2_7_6_4, -2.1_8_7_4]] ).to(a__ )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , a__ , atol=a__ ) )
def lowerCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ = (
MaskFormerForInstanceSegmentation.from_pretrained("facebook/maskformer-swin-small-coco" )
.to(a__ )
.eval()
)
snake_case_ = self.default_image_processor
snake_case_ = image_processor(
[np.zeros((3, 800, 1_333) ), np.zeros((3, 800, 1_333) )] , segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] , return_tensors="pt" , )
snake_case_ = inputs["pixel_values"].to(a__ )
snake_case_ = [el.to(a__ ) for el in inputs["mask_labels"]]
snake_case_ = [el.to(a__ ) for el in inputs["class_labels"]]
with torch.no_grad():
snake_case_ = model(**a__ )
self.assertTrue(outputs.loss is not None )
| 85 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_mbart import MBartTokenizer
else:
lowerCAmelCase__ = None
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'}
lowerCAmelCase__ = {
'vocab_file': {
'facebook/mbart-large-en-ro': (
'https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model'
),
'facebook/mbart-large-cc25': (
'https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model'
),
},
'tokenizer_file': {
'facebook/mbart-large-en-ro': 'https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json',
'facebook/mbart-large-cc25': 'https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json',
},
}
lowerCAmelCase__ = {
'facebook/mbart-large-en-ro': 10_24,
'facebook/mbart-large-cc25': 10_24,
}
# fmt: off
lowerCAmelCase__ = ['ar_AR', 'cs_CZ', 'de_DE', 'en_XX', 'es_XX', 'et_EE', 'fi_FI', 'fr_XX', 'gu_IN', 'hi_IN', 'it_IT', 'ja_XX', 'kk_KZ', 'ko_KR', 'lt_LT', 'lv_LV', 'my_MM', 'ne_NP', 'nl_XX', 'ro_RO', 'ru_RU', 'si_LK', 'tr_TR', 'vi_VN', 'zh_CN']
class lowerCAmelCase__ ( a):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE = ["input_ids", "attention_mask"]
__SCREAMING_SNAKE_CASE = MBartTokenizer
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = []
def __init__( self , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase="<s>" , __lowerCamelCase="</s>" , __lowerCamelCase="</s>" , __lowerCamelCase="<s>" , __lowerCamelCase="<unk>" , __lowerCamelCase="<pad>" , __lowerCamelCase="<mask>" , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=None , **__lowerCamelCase , ) -> Optional[int]:
# Mask token behave like a normal word, i.e. include the space before it
_A : List[str] = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase) if isinstance(__lowerCamelCase , __lowerCamelCase) else mask_token
super().__init__(
vocab_file=__lowerCamelCase , tokenizer_file=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , sep_token=__lowerCamelCase , cls_token=__lowerCamelCase , unk_token=__lowerCamelCase , pad_token=__lowerCamelCase , mask_token=__lowerCamelCase , src_lang=__lowerCamelCase , tgt_lang=__lowerCamelCase , additional_special_tokens=__lowerCamelCase , **__lowerCamelCase , )
_A : Union[str, Any] = vocab_file
_A : int = False if not self.vocab_file else True
_A : Optional[int] = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens])
self.add_special_tokens({"additional_special_tokens": _additional_special_tokens})
_A : Union[str, Any] = {
lang_code: self.convert_tokens_to_ids(__lowerCamelCase) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
_A : Optional[int] = src_lang if src_lang is not None else "en_XX"
_A : Union[str, Any] = self.convert_tokens_to_ids(self._src_lang)
_A : int = tgt_lang
self.set_src_lang_special_tokens(self._src_lang)
@property
def _lowerCamelCase ( self) -> str:
return self._src_lang
@src_lang.setter
def _lowerCamelCase ( self , __lowerCamelCase) -> None:
_A : Dict = new_src_lang
self.set_src_lang_special_tokens(self._src_lang)
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = None) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = None) -> List[int]:
_A : List[str] = [self.sep_token_id]
_A : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase) -> Dict:
if src_lang is None or tgt_lang is None:
raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model")
_A : str = src_lang
_A : Any = self(__lowerCamelCase , add_special_tokens=__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase)
_A : Tuple = self.convert_tokens_to_ids(__lowerCamelCase)
_A : Dict = tgt_lang_id
return inputs
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = "en_XX" , __lowerCamelCase = None , __lowerCamelCase = "ro_RO" , **__lowerCamelCase , ) -> BatchEncoding:
_A : Any = src_lang
_A : int = tgt_lang
return super().prepare_seqaseq_batch(__lowerCamelCase , __lowerCamelCase , **__lowerCamelCase)
def _lowerCamelCase ( self) -> List[str]:
return self.set_src_lang_special_tokens(self.src_lang)
def _lowerCamelCase ( self) -> List[Any]:
return self.set_tgt_lang_special_tokens(self.tgt_lang)
def _lowerCamelCase ( self , __lowerCamelCase) -> None:
_A : int = self.convert_tokens_to_ids(__lowerCamelCase)
_A : int = []
_A : List[str] = [self.eos_token_id, self.cur_lang_code]
_A : Union[str, Any] = self.convert_ids_to_tokens(self.prefix_tokens)
_A : str = self.convert_ids_to_tokens(self.suffix_tokens)
_A : List[Any] = processors.TemplateProcessing(
single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens)) , )
def _lowerCamelCase ( self , __lowerCamelCase) -> None:
_A : Optional[int] = self.convert_tokens_to_ids(__lowerCamelCase)
_A : List[Any] = []
_A : str = [self.eos_token_id, self.cur_lang_code]
_A : Optional[int] = self.convert_ids_to_tokens(self.prefix_tokens)
_A : int = self.convert_ids_to_tokens(self.suffix_tokens)
_A : str = processors.TemplateProcessing(
single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens)) , )
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = None) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer.")
if not os.path.isdir(__lowerCamelCase):
logger.error(F"Vocabulary path ({save_directory}) should be a directory.")
return
_A : int = os.path.join(
__lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
if os.path.abspath(self.vocab_file) != os.path.abspath(__lowerCamelCase):
copyfile(self.vocab_file , __lowerCamelCase)
return (out_vocab_file,)
| 11 | 0 |
"""simple docstring"""
from __future__ import annotations
class A__ :
def __init__( self , _SCREAMING_SNAKE_CASE=None ):
__lowerCAmelCase : Dict = data
__lowerCAmelCase : Union[str, Any] = None
def __repr__( self ):
__lowerCAmelCase : List[Any] = []
__lowerCAmelCase : Union[str, Any] = self
while temp:
string_rep.append(f"{temp.data}" )
__lowerCAmelCase : Union[str, Any] = temp.next
return "->".join(_SCREAMING_SNAKE_CASE )
def __lowerCAmelCase (_UpperCamelCase ):
if not elements_list:
raise Exception('The Elements List is empty' )
__lowerCAmelCase : str = Node(elements_list[0] )
for i in range(1 , len(_UpperCamelCase ) ):
__lowerCAmelCase : Optional[Any] = Node(elements_list[i] )
__lowerCAmelCase : Dict = current.next
return head
def __lowerCAmelCase (_UpperCamelCase ):
if head_node is not None and isinstance(_UpperCamelCase , _UpperCamelCase ):
print_reverse(head_node.next )
print(head_node.data )
def __lowerCAmelCase ():
from doctest import testmod
testmod()
__lowerCAmelCase : str = make_linked_list([14, 52, 14, 12, 43] )
print('Linked List:' )
print(_UpperCamelCase )
print('Elements in Reverse:' )
print_reverse(_UpperCamelCase )
if __name__ == "__main__":
main() | 86 |
import json
import os
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'vocab_file': 'vocab.json',
'tokenizer_config_file': 'tokenizer_config.json',
'merges_file': 'merges.txt',
}
lowerCAmelCase__ = {
'vocab_file': {
'facebook/s2t-wav2vec2-large-en-de': (
'https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/vocab.json'
),
},
'tokenizer_config_file': {
'facebook/s2t-wav2vec2-large-en-de': (
'https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/tokenizer_config.json'
),
},
'merges_file': {
'facebook/s2t-wav2vec2-large-en-de': (
'https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/merges.txt'
),
},
}
lowerCAmelCase__ = '</w>'
lowerCAmelCase__ = '@@ '
def _UpperCAmelCase (UpperCamelCase__ : Optional[Any] ):
_A : Optional[int] = set()
_A : Optional[Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_A : List[Any] = char
return pairs
# Speech2Text2 has no max input length
lowerCAmelCase__ = {'facebook/s2t-wav2vec2-large-en-de': 10_24}
class lowerCAmelCase__ ( a):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE = ["input_ids", "attention_mask"]
def __init__( self , __lowerCamelCase , __lowerCamelCase="<s>" , __lowerCamelCase="<pad>" , __lowerCamelCase="</s>" , __lowerCamelCase="<unk>" , __lowerCamelCase=False , __lowerCamelCase=None , **__lowerCamelCase , ) -> Optional[Any]:
super().__init__(
unk_token=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , pad_token=__lowerCamelCase , do_lower_case=__lowerCamelCase , **__lowerCamelCase , )
_A : Dict = do_lower_case
with open(__lowerCamelCase , encoding="utf-8") as vocab_handle:
_A : Optional[int] = json.load(__lowerCamelCase)
_A : Optional[Any] = {v: k for k, v in self.encoder.items()}
if merges_file is None:
logger.info(F"No merges files provided. {self.__class__.__name__} can only be used for decoding.")
_A : Optional[Any] = None
_A : Tuple = None
else:
with open(__lowerCamelCase , encoding="utf-8") as merges_handle:
_A : Optional[int] = merges_handle.read().split("\n")[:-1]
_A : Union[str, Any] = [tuple(merge.split()[:2]) for merge in merges]
_A : Optional[int] = dict(zip(__lowerCamelCase , range(len(__lowerCamelCase))))
_A : List[Any] = {}
@property
def _lowerCamelCase ( self) -> int:
return len(self.decoder)
def _lowerCamelCase ( self) -> Dict:
return dict(self.encoder , **self.added_tokens_encoder)
def _lowerCamelCase ( self , __lowerCamelCase) -> Dict:
_A : Tuple = tuple(token[:-1]) + (token[-1] + BPE_TOKEN_MERGES,)
if token in self.cache:
return self.cache[token]
_A : int = get_pairs(__lowerCamelCase)
if not pairs:
return token
while True:
_A : Any = min(__lowerCamelCase , key=lambda __lowerCamelCase: self.bpe_ranks.get(__lowerCamelCase , float("inf")))
if bigram not in self.bpe_ranks:
break
_A , _A : Optional[int] = bigram
_A : int = []
_A : str = 0
while i < len(__lowerCamelCase):
try:
_A : str = word.index(__lowerCamelCase , __lowerCamelCase)
except ValueError:
new_word.extend(word[i:])
break
else:
new_word.extend(word[i:j])
_A : str = j
if word[i] == first and i < len(__lowerCamelCase) - 1 and word[i + 1] == second:
new_word.append(first + second)
i += 2
else:
new_word.append(word[i])
i += 1
_A : List[str] = tuple(__lowerCamelCase)
_A : List[str] = new_word
if len(__lowerCamelCase) == 1:
break
else:
_A : List[Any] = get_pairs(__lowerCamelCase)
_A : Tuple = " ".join(__lowerCamelCase)
if word == "\n " + BPE_TOKEN_MERGES:
_A : List[str] = "\n" + BPE_TOKEN_MERGES
if word.endswith(__lowerCamelCase):
_A : int = word.replace(__lowerCamelCase , "")
_A : int = word.replace(" " , __lowerCamelCase)
_A : Union[str, Any] = word
return word
def _lowerCamelCase ( self , __lowerCamelCase) -> Optional[Any]:
if self.bpe_ranks is None:
raise ValueError(
"This tokenizer was instantiated without a `merges.txt` file, so"
" that it can only be used for decoding, not for encoding."
"Make sure to provide `merges.txt` file at instantiation to enable "
"encoding.")
if self.do_lower_case:
_A : List[Any] = text.lower()
_A : Optional[int] = text.split()
_A : List[str] = []
for token in text:
if token:
split_tokens.extend(list(self.bpe(__lowerCamelCase).split(" ")))
return split_tokens
def _lowerCamelCase ( self , __lowerCamelCase) -> int:
return self.encoder.get(__lowerCamelCase , self.encoder.get(self.unk_token))
def _lowerCamelCase ( self , __lowerCamelCase) -> str:
_A : List[str] = self.decoder.get(__lowerCamelCase , self.unk_token)
return result
def _lowerCamelCase ( self , __lowerCamelCase) -> str:
_A : str = " ".join(__lowerCamelCase)
# make sure @@ tokens are concatenated
_A : int = "".join(string.split(__lowerCamelCase))
return string
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = None) -> Tuple[str]:
if not os.path.isdir(__lowerCamelCase):
logger.error(F"Vocabulary path ({save_directory}) should be a directory")
return
_A : Any = os.path.join(
__lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
_A : Any = os.path.join(
__lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"])
with open(__lowerCamelCase , "w" , encoding="utf-8") as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__lowerCamelCase , ensure_ascii=__lowerCamelCase) + "\n")
_A : Union[str, Any] = 0
if self.bpe_ranks is None:
return (vocab_file,)
with open(__lowerCamelCase , "w" , encoding="utf-8") as writer:
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __lowerCamelCase: kv[1]):
if index != token_index:
logger.warning(
F"Saving vocabulary to {merges_file}: BPE merge indices are not consecutive."
" Please check that the tokenizer is not corrupted!")
_A : Optional[int] = token_index
writer.write(" ".join(__lowerCamelCase) + "\n")
index += 1
return (vocab_file, merges_file)
| 11 | 0 |
import math
from typing import Any, Callable, List, Optional, Tuple, Union
import numpy as np
import torch
from ...models import TaFilmDecoder
from ...schedulers import DDPMScheduler
from ...utils import is_onnx_available, logging, randn_tensor
if is_onnx_available():
from ..onnx_utils import OnnxRuntimeModel
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
from .continous_encoder import SpectrogramContEncoder
from .notes_encoder import SpectrogramNotesEncoder
UpperCamelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
UpperCamelCase = 256
class snake_case_ ( __A ):
__A : str = ["melgan"]
def __init__( self : str , lowercase_ : SpectrogramNotesEncoder , lowercase_ : SpectrogramContEncoder , lowercase_ : TaFilmDecoder , lowercase_ : DDPMScheduler , lowercase_ : OnnxRuntimeModel if is_onnx_available() else Any , ) -> None:
super().__init__()
# From MELGAN
lowercase__ : List[Any] = math.log(1E-5 ) # Matches MelGAN training.
lowercase__ : str = 4.0 # Largest value for most examples
lowercase__ : Any = 1_28
self.register_modules(
notes_encoder=lowercase_ , continuous_encoder=lowercase_ , decoder=lowercase_ , scheduler=lowercase_ , melgan=lowercase_ , )
def __UpperCamelCase ( self : Union[str, Any] , lowercase_ : Union[str, Any] , lowercase_ : List[str]=(-1.0, 1.0) , lowercase_ : Dict=False ) -> Optional[Any]:
lowercase__ , lowercase__ : int = output_range
if clip:
lowercase__ : Optional[Any] = torch.clip(lowercase_ , self.min_value , self.max_value )
# Scale to [0, 1].
lowercase__ : List[str] = (features - self.min_value) / (self.max_value - self.min_value)
# Scale to [min_out, max_out].
return zero_one * (max_out - min_out) + min_out
def __UpperCamelCase ( self : Optional[int] , lowercase_ : List[str] , lowercase_ : List[str]=(-1.0, 1.0) , lowercase_ : List[Any]=False ) -> Union[str, Any]:
lowercase__ , lowercase__ : Tuple = input_range
lowercase__ : Optional[Any] = torch.clip(lowercase_ , lowercase_ , lowercase_ ) if clip else outputs
# Scale to [0, 1].
lowercase__ : Union[str, Any] = (outputs - min_out) / (max_out - min_out)
# Scale to [self.min_value, self.max_value].
return zero_one * (self.max_value - self.min_value) + self.min_value
def __UpperCamelCase ( self : List[str] , lowercase_ : Any , lowercase_ : Optional[Any] , lowercase_ : Tuple ) -> List[str]:
lowercase__ : Optional[Any] = input_tokens > 0
lowercase__ , lowercase__ : int = self.notes_encoder(
encoder_input_tokens=lowercase_ , encoder_inputs_mask=lowercase_ )
lowercase__ , lowercase__ : List[Any] = self.continuous_encoder(
encoder_inputs=lowercase_ , encoder_inputs_mask=lowercase_ )
return [(tokens_encoded, tokens_mask), (continuous_encoded, continuous_mask)]
def __UpperCamelCase ( self : Optional[Any] , lowercase_ : Optional[int] , lowercase_ : Optional[int] , lowercase_ : str ) -> Tuple:
lowercase__ : Union[str, Any] = noise_time
if not torch.is_tensor(lowercase_ ):
lowercase__ : Optional[Any] = torch.tensor([timesteps] , dtype=torch.long , device=input_tokens.device )
elif torch.is_tensor(lowercase_ ) and len(timesteps.shape ) == 0:
lowercase__ : Optional[Any] = timesteps[None].to(input_tokens.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
lowercase__ : int = timesteps * torch.ones(input_tokens.shape[0] , dtype=timesteps.dtype , device=timesteps.device )
lowercase__ : str = self.decoder(
encodings_and_masks=lowercase_ , decoder_input_tokens=lowercase_ , decoder_noise_time=lowercase_ )
return logits
@torch.no_grad()
def __call__( self : List[str] , lowercase_ : List[List[int]] , lowercase_ : Optional[torch.Generator] = None , lowercase_ : int = 1_00 , lowercase_ : bool = True , lowercase_ : str = "numpy" , lowercase_ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , lowercase_ : int = 1 , ) -> Union[AudioPipelineOutput, Tuple]:
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(lowercase_ , lowercase_ ) or callback_steps <= 0)
):
raise ValueError(
F'''`callback_steps` has to be a positive integer but is {callback_steps} of type'''
F''' {type(lowercase_ )}.''' )
lowercase__ : str = np.zeros([1, TARGET_FEATURE_LENGTH, self.n_dims] , dtype=np.floataa )
lowercase__ : Optional[int] = np.zeros([1, 0, self.n_dims] , np.floataa )
lowercase__ : str = torch.ones((1, TARGET_FEATURE_LENGTH) , dtype=lowercase_ , device=self.device )
for i, encoder_input_tokens in enumerate(lowercase_ ):
if i == 0:
lowercase__ : Union[str, Any] = torch.from_numpy(pred_mel[:1].copy() ).to(
device=self.device , dtype=self.decoder.dtype )
# The first chunk has no previous context.
lowercase__ : List[str] = torch.zeros((1, TARGET_FEATURE_LENGTH) , dtype=lowercase_ , device=self.device )
else:
# The full song pipeline does not feed in a context feature, so the mask
# will be all 0s after the feature converter. Because we know we're
# feeding in a full context chunk from the previous prediction, set it
# to all 1s.
lowercase__ : str = ones
lowercase__ : str = self.scale_features(
lowercase_ , output_range=[-1.0, 1.0] , clip=lowercase_ )
lowercase__ : str = self.encode(
input_tokens=torch.IntTensor([encoder_input_tokens] ).to(device=self.device ) , continuous_inputs=lowercase_ , continuous_mask=lowercase_ , )
# Sample encoder_continuous_inputs shaped gaussian noise to begin loop
lowercase__ : List[str] = randn_tensor(
shape=encoder_continuous_inputs.shape , generator=lowercase_ , device=self.device , dtype=self.decoder.dtype , )
# set step values
self.scheduler.set_timesteps(lowercase_ )
# Denoising diffusion loop
for j, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
lowercase__ : Optional[int] = self.decode(
encodings_and_masks=lowercase_ , input_tokens=lowercase_ , noise_time=t / self.scheduler.config.num_train_timesteps , )
# Compute previous output: x_t -> x_t-1
lowercase__ : Optional[Any] = self.scheduler.step(lowercase_ , lowercase_ , lowercase_ , generator=lowercase_ ).prev_sample
lowercase__ : Tuple = self.scale_to_features(lowercase_ , input_range=[-1.0, 1.0] )
lowercase__ : List[str] = mel[:1]
lowercase__ : Optional[int] = mel.cpu().float().numpy()
lowercase__ : str = np.concatenate([full_pred_mel, pred_mel[:1]] , axis=1 )
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(lowercase_ , lowercase_ )
logger.info("Generated segment" , lowercase_ )
if output_type == "numpy" and not is_onnx_available():
raise ValueError(
"Cannot return output in 'np' format if ONNX is not available. Make sure to have ONNX installed or set 'output_type' to 'mel'." )
elif output_type == "numpy" and self.melgan is None:
raise ValueError(
"Cannot return output in 'np' format if melgan component is not defined. Make sure to define `self.melgan` or set 'output_type' to 'mel'." )
if output_type == "numpy":
lowercase__ : Union[str, Any] = self.melgan(input_features=full_pred_mel.astype(np.floataa ) )
else:
lowercase__ : Dict = full_pred_mel
if not return_dict:
return (output,)
return AudioPipelineOutput(audios=lowercase_ )
| 87 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'facebook/vit-mae-base': 'https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json',
# See all ViT MAE models at https://huggingface.co/models?filter=vit-mae
}
class lowerCAmelCase__ ( a):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = "vit_mae"
def __init__( self , __lowerCamelCase=7_6_8 , __lowerCamelCase=1_2 , __lowerCamelCase=1_2 , __lowerCamelCase=3_0_7_2 , __lowerCamelCase="gelu" , __lowerCamelCase=0.0 , __lowerCamelCase=0.0 , __lowerCamelCase=0.0_2 , __lowerCamelCase=1e-12 , __lowerCamelCase=2_2_4 , __lowerCamelCase=1_6 , __lowerCamelCase=3 , __lowerCamelCase=True , __lowerCamelCase=1_6 , __lowerCamelCase=5_1_2 , __lowerCamelCase=8 , __lowerCamelCase=2_0_4_8 , __lowerCamelCase=0.7_5 , __lowerCamelCase=False , **__lowerCamelCase , ) -> int:
super().__init__(**__lowerCamelCase)
_A : int = hidden_size
_A : List[str] = num_hidden_layers
_A : List[Any] = num_attention_heads
_A : Optional[Any] = intermediate_size
_A : Optional[int] = hidden_act
_A : List[Any] = hidden_dropout_prob
_A : List[Any] = attention_probs_dropout_prob
_A : Union[str, Any] = initializer_range
_A : str = layer_norm_eps
_A : Any = image_size
_A : int = patch_size
_A : int = num_channels
_A : Dict = qkv_bias
_A : Tuple = decoder_num_attention_heads
_A : Tuple = decoder_hidden_size
_A : List[str] = decoder_num_hidden_layers
_A : Optional[Any] = decoder_intermediate_size
_A : List[str] = mask_ratio
_A : Union[str, Any] = norm_pix_loss
| 11 | 0 |
import inspect
import tempfile
from collections import OrderedDict, UserDict
from collections.abc import MutableMapping
from contextlib import ExitStack, contextmanager
from dataclasses import fields
from enum import Enum
from typing import Any, ContextManager, List, Tuple
import numpy as np
from .import_utils import is_flax_available, is_tf_available, is_torch_available, is_torch_fx_proxy
if is_flax_available():
import jax.numpy as jnp
class UpperCAmelCase_ ( _A ):
'''simple docstring'''
def __get__( self : Union[str, Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Any=None ) -> str:
"""simple docstring"""
if obj is None:
return self
if self.fget is None:
raise AttributeError("""unreadable attribute""" )
__magic_name__ = """__cached_""" + self.fget.__name__
__magic_name__ = getattr(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
if cached is None:
__magic_name__ = self.fget(UpperCamelCase__ )
setattr(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
return cached
def a__ ( A_ ):
'''simple docstring'''
__magic_name__ = val.lower()
if val in {"y", "yes", "t", "true", "on", "1"}:
return 1
if val in {"n", "no", "f", "false", "off", "0"}:
return 0
raise ValueError(f'''invalid truth value {val!r}''' )
def a__ ( A_ ):
'''simple docstring'''
if is_torch_fx_proxy(A_ ):
return True
if is_torch_available():
import torch
if isinstance(A_, torch.Tensor ):
return True
if is_tf_available():
import tensorflow as tf
if isinstance(A_, tf.Tensor ):
return True
if is_flax_available():
import jax.numpy as jnp
from jax.core import Tracer
if isinstance(A_, (jnp.ndarray, Tracer) ):
return True
return isinstance(A_, np.ndarray )
def a__ ( A_ ):
'''simple docstring'''
return isinstance(A_, np.ndarray )
def a__ ( A_ ):
'''simple docstring'''
return _is_numpy(A_ )
def a__ ( A_ ):
'''simple docstring'''
import torch
return isinstance(A_, torch.Tensor )
def a__ ( A_ ):
'''simple docstring'''
return False if not is_torch_available() else _is_torch(A_ )
def a__ ( A_ ):
'''simple docstring'''
import torch
return isinstance(A_, torch.device )
def a__ ( A_ ):
'''simple docstring'''
return False if not is_torch_available() else _is_torch_device(A_ )
def a__ ( A_ ):
'''simple docstring'''
import torch
if isinstance(A_, A_ ):
if hasattr(A_, A_ ):
__magic_name__ = getattr(A_, A_ )
else:
return False
return isinstance(A_, torch.dtype )
def a__ ( A_ ):
'''simple docstring'''
return False if not is_torch_available() else _is_torch_dtype(A_ )
def a__ ( A_ ):
'''simple docstring'''
import tensorflow as tf
return isinstance(A_, tf.Tensor )
def a__ ( A_ ):
'''simple docstring'''
return False if not is_tf_available() else _is_tensorflow(A_ )
def a__ ( A_ ):
'''simple docstring'''
import tensorflow as tf
# the `is_symbolic_tensor` predicate is only available starting with TF 2.14
if hasattr(A_, """is_symbolic_tensor""" ):
return tf.is_symbolic_tensor(A_ )
return type(A_ ) == tf.Tensor
def a__ ( A_ ):
'''simple docstring'''
return False if not is_tf_available() else _is_tf_symbolic_tensor(A_ )
def a__ ( A_ ):
'''simple docstring'''
import jax.numpy as jnp # noqa: F811
return isinstance(A_, jnp.ndarray )
def a__ ( A_ ):
'''simple docstring'''
return False if not is_flax_available() else _is_jax(A_ )
def a__ ( A_ ):
'''simple docstring'''
if isinstance(A_, (dict, UserDict) ):
return {k: to_py_obj(A_ ) for k, v in obj.items()}
elif isinstance(A_, (list, tuple) ):
return [to_py_obj(A_ ) for o in obj]
elif is_tf_tensor(A_ ):
return obj.numpy().tolist()
elif is_torch_tensor(A_ ):
return obj.detach().cpu().tolist()
elif is_jax_tensor(A_ ):
return np.asarray(A_ ).tolist()
elif isinstance(A_, (np.ndarray, np.number) ): # tolist also works on 0d np arrays
return obj.tolist()
else:
return obj
def a__ ( A_ ):
'''simple docstring'''
if isinstance(A_, (dict, UserDict) ):
return {k: to_numpy(A_ ) for k, v in obj.items()}
elif isinstance(A_, (list, tuple) ):
return np.array(A_ )
elif is_tf_tensor(A_ ):
return obj.numpy()
elif is_torch_tensor(A_ ):
return obj.detach().cpu().numpy()
elif is_jax_tensor(A_ ):
return np.asarray(A_ )
else:
return obj
class UpperCAmelCase_ ( _A ):
'''simple docstring'''
def _lowercase ( self : List[Any] ) -> str:
"""simple docstring"""
__magic_name__ = fields(self )
# Safety and consistency checks
if not len(UpperCamelCase__ ):
raise ValueError(F'''{self.__class__.__name__} has no fields.''' )
if not all(field.default is None for field in class_fields[1:] ):
raise ValueError(F'''{self.__class__.__name__} should not have more than one required field.''' )
__magic_name__ = getattr(self , class_fields[0].name )
__magic_name__ = all(getattr(self , field.name ) is None for field in class_fields[1:] )
if other_fields_are_none and not is_tensor(UpperCamelCase__ ):
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
__magic_name__ = first_field.items()
__magic_name__ = True
else:
try:
__magic_name__ = iter(UpperCamelCase__ )
__magic_name__ = True
except TypeError:
__magic_name__ = False
# if we provided an iterator as first field and the iterator is a (key, value) iterator
# set the associated fields
if first_field_iterator:
for idx, element in enumerate(UpperCamelCase__ ):
if (
not isinstance(UpperCamelCase__ , (list, tuple) )
or not len(UpperCamelCase__ ) == 2
or not isinstance(element[0] , UpperCamelCase__ )
):
if idx == 0:
# If we do not have an iterator of key/values, set it as attribute
__magic_name__ = first_field
else:
# If we have a mixed iterator, raise an error
raise ValueError(
F'''Cannot set key/value for {element}. It needs to be a tuple (key, value).''' )
break
setattr(self , element[0] , element[1] )
if element[1] is not None:
__magic_name__ = element[1]
elif first_field is not None:
__magic_name__ = first_field
else:
for field in class_fields:
__magic_name__ = getattr(self , field.name )
if v is not None:
__magic_name__ = v
def __delitem__( self : Any , *UpperCamelCase__ : List[Any] , **UpperCamelCase__ : Any ) -> Optional[Any]:
"""simple docstring"""
raise Exception(F'''You cannot use ``__delitem__`` on a {self.__class__.__name__} instance.''' )
def _lowercase ( self : Optional[Any] , *UpperCamelCase__ : Union[str, Any] , **UpperCamelCase__ : Union[str, Any] ) -> str:
"""simple docstring"""
raise Exception(F'''You cannot use ``setdefault`` on a {self.__class__.__name__} instance.''' )
def _lowercase ( self : Any , *UpperCamelCase__ : List[str] , **UpperCamelCase__ : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
raise Exception(F'''You cannot use ``pop`` on a {self.__class__.__name__} instance.''' )
def _lowercase ( self : List[str] , *UpperCamelCase__ : Dict , **UpperCamelCase__ : Optional[int] ) -> Tuple:
"""simple docstring"""
raise Exception(F'''You cannot use ``update`` on a {self.__class__.__name__} instance.''' )
def __getitem__( self : Any , UpperCamelCase__ : Optional[Any] ) -> Any:
"""simple docstring"""
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
__magic_name__ = dict(self.items() )
return inner_dict[k]
else:
return self.to_tuple()[k]
def __setattr__( self : Optional[int] , UpperCamelCase__ : Dict , UpperCamelCase__ : str ) -> Optional[Any]:
"""simple docstring"""
if name in self.keys() and value is not None:
# Don't call self.__setitem__ to avoid recursion errors
super().__setitem__(UpperCamelCase__ , UpperCamelCase__ )
super().__setattr__(UpperCamelCase__ , UpperCamelCase__ )
def __setitem__( self : List[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : int ) -> int:
"""simple docstring"""
super().__setitem__(UpperCamelCase__ , UpperCamelCase__ )
# Don't call self.__setattr__ to avoid recursion errors
super().__setattr__(UpperCamelCase__ , UpperCamelCase__ )
def _lowercase ( self : str ) -> Tuple[Any]:
"""simple docstring"""
return tuple(self[k] for k in self.keys() )
class UpperCAmelCase_ ( _A , _A ):
'''simple docstring'''
@classmethod
def _lowercase ( cls : Union[str, Any] , UpperCamelCase__ : Optional[int] ) -> Optional[int]:
"""simple docstring"""
raise ValueError(
F'''{value} is not a valid {cls.__name__}, please select one of {list(cls._valueamember_map_.keys() )}''' )
class UpperCAmelCase_ ( _A ):
'''simple docstring'''
a__ = """longest"""
a__ = """max_length"""
a__ = """do_not_pad"""
class UpperCAmelCase_ ( _A ):
'''simple docstring'''
a__ = """pt"""
a__ = """tf"""
a__ = """np"""
a__ = """jax"""
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self : Dict , UpperCamelCase__ : List[ContextManager] ) -> Any:
"""simple docstring"""
__magic_name__ = context_managers
__magic_name__ = ExitStack()
def __enter__( self : Optional[int] ) -> int:
"""simple docstring"""
for context_manager in self.context_managers:
self.stack.enter_context(UpperCamelCase__ )
def __exit__( self : Dict , *UpperCamelCase__ : str , **UpperCamelCase__ : Optional[int] ) -> str:
"""simple docstring"""
self.stack.__exit__(*UpperCamelCase__ , **UpperCamelCase__ )
def a__ ( A_ ):
'''simple docstring'''
__magic_name__ = infer_framework(A_ )
if framework == "tf":
__magic_name__ = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
__magic_name__ = inspect.signature(model_class.forward ) # PyTorch models
else:
__magic_name__ = inspect.signature(model_class.__call__ ) # Flax models
for p in signature.parameters:
if p == "return_loss" and signature.parameters[p].default is True:
return True
return False
def a__ ( A_ ):
'''simple docstring'''
__magic_name__ = model_class.__name__
__magic_name__ = infer_framework(A_ )
if framework == "tf":
__magic_name__ = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
__magic_name__ = inspect.signature(model_class.forward ) # PyTorch models
else:
__magic_name__ = inspect.signature(model_class.__call__ ) # Flax models
if "QuestionAnswering" in model_name:
return [p for p in signature.parameters if "label" in p or p in ("start_positions", "end_positions")]
else:
return [p for p in signature.parameters if "label" in p]
def a__ ( A_, A_ = "", A_ = "." ):
'''simple docstring'''
def _flatten_dict(A_, A_="", A_="." ):
for k, v in d.items():
__magic_name__ = str(A_ ) + delimiter + str(A_ ) if parent_key else k
if v and isinstance(A_, A_ ):
yield from flatten_dict(A_, A_, delimiter=A_ ).items()
else:
yield key, v
return dict(_flatten_dict(A_, A_, A_ ) )
@contextmanager
def a__ ( A_, A_ = False ):
'''simple docstring'''
if use_temp_dir:
with tempfile.TemporaryDirectory() as tmp_dir:
yield tmp_dir
else:
yield working_dir
def a__ ( A_, A_=None ):
'''simple docstring'''
if is_numpy_array(A_ ):
return np.transpose(A_, axes=A_ )
elif is_torch_tensor(A_ ):
return array.T if axes is None else array.permute(*A_ )
elif is_tf_tensor(A_ ):
import tensorflow as tf
return tf.transpose(A_, perm=A_ )
elif is_jax_tensor(A_ ):
return jnp.transpose(A_, axes=A_ )
else:
raise ValueError(f'''Type not supported for transpose: {type(A_ )}.''' )
def a__ ( A_, A_ ):
'''simple docstring'''
if is_numpy_array(A_ ):
return np.reshape(A_, A_ )
elif is_torch_tensor(A_ ):
return array.reshape(*A_ )
elif is_tf_tensor(A_ ):
import tensorflow as tf
return tf.reshape(A_, A_ )
elif is_jax_tensor(A_ ):
return jnp.reshape(A_, A_ )
else:
raise ValueError(f'''Type not supported for reshape: {type(A_ )}.''' )
def a__ ( A_, A_=None ):
'''simple docstring'''
if is_numpy_array(A_ ):
return np.squeeze(A_, axis=A_ )
elif is_torch_tensor(A_ ):
return array.squeeze() if axis is None else array.squeeze(dim=A_ )
elif is_tf_tensor(A_ ):
import tensorflow as tf
return tf.squeeze(A_, axis=A_ )
elif is_jax_tensor(A_ ):
return jnp.squeeze(A_, axis=A_ )
else:
raise ValueError(f'''Type not supported for squeeze: {type(A_ )}.''' )
def a__ ( A_, A_ ):
'''simple docstring'''
if is_numpy_array(A_ ):
return np.expand_dims(A_, A_ )
elif is_torch_tensor(A_ ):
return array.unsqueeze(dim=A_ )
elif is_tf_tensor(A_ ):
import tensorflow as tf
return tf.expand_dims(A_, axis=A_ )
elif is_jax_tensor(A_ ):
return jnp.expand_dims(A_, axis=A_ )
else:
raise ValueError(f'''Type not supported for expand_dims: {type(A_ )}.''' )
def a__ ( A_ ):
'''simple docstring'''
if is_numpy_array(A_ ):
return np.size(A_ )
elif is_torch_tensor(A_ ):
return array.numel()
elif is_tf_tensor(A_ ):
import tensorflow as tf
return tf.size(A_ )
elif is_jax_tensor(A_ ):
return array.size
else:
raise ValueError(f'''Type not supported for expand_dims: {type(A_ )}.''' )
def a__ ( A_, A_ ):
'''simple docstring'''
for key, value in auto_map.items():
if isinstance(A_, (tuple, list) ):
__magic_name__ = [f'''{repo_id}--{v}''' if (v is not None and """--""" not in v) else v for v in value]
elif value is not None and "--" not in value:
__magic_name__ = f'''{repo_id}--{value}'''
return auto_map
def a__ ( A_ ):
'''simple docstring'''
for base_class in inspect.getmro(A_ ):
__magic_name__ = base_class.__module__
__magic_name__ = base_class.__name__
if module.startswith("""tensorflow""" ) or module.startswith("""keras""" ) or name == "TFPreTrainedModel":
return "tf"
elif module.startswith("""torch""" ) or name == "PreTrainedModel":
return "pt"
elif module.startswith("""flax""" ) or module.startswith("""jax""" ) or name == "FlaxPreTrainedModel":
return "flax"
else:
raise TypeError(f'''Could not infer framework from class {model_class}.''' )
| 88 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
lowerCAmelCase__ = {'configuration_speech_encoder_decoder': ['SpeechEncoderDecoderConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['SpeechEncoderDecoderModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['FlaxSpeechEncoderDecoderModel']
if TYPE_CHECKING:
from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 11 | 0 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {
'''salesforce/blip2-opt-2.7b''': '''https://huggingface.co/salesforce/blip2-opt-2.7b/resolve/main/config.json''',
}
class __magic_name__ ( _UpperCamelCase ):
lowerCAmelCase : Tuple = 'blip_2_vision_model'
def __init__( self : List[Any] ,_UpperCAmelCase : List[Any]=1408 ,_UpperCAmelCase : Optional[int]=6144 ,_UpperCAmelCase : List[str]=39 ,_UpperCAmelCase : Dict=16 ,_UpperCAmelCase : Optional[Any]=224 ,_UpperCAmelCase : Any=14 ,_UpperCAmelCase : Optional[int]="gelu" ,_UpperCAmelCase : Optional[int]=0.0_00_01 ,_UpperCAmelCase : Tuple=0.0 ,_UpperCAmelCase : Union[str, Any]=1E-10 ,_UpperCAmelCase : Any=True ,**_UpperCAmelCase : Dict ,):
super().__init__(**_UpperCAmelCase )
_a : int = hidden_size
_a : List[str] = intermediate_size
_a : List[Any] = num_hidden_layers
_a : Optional[int] = num_attention_heads
_a : Tuple = patch_size
_a : List[str] = image_size
_a : int = initializer_range
_a : Union[str, Any] = attention_dropout
_a : Union[str, Any] = layer_norm_eps
_a : List[str] = hidden_act
_a : Any = qkv_bias
@classmethod
def __lowercase ( cls : Tuple ,_UpperCAmelCase : Union[str, os.PathLike] ,**_UpperCAmelCase : Any ):
cls._set_token_in_kwargs(_UpperCAmelCase )
_a , _a : List[Any] = cls.get_config_dict(_UpperCAmelCase ,**_UpperCAmelCase )
# get the vision config dict if we are loading from Blip2Config
if config_dict.get('model_type' ) == "blip-2":
_a : Union[str, Any] = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls ,'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(_UpperCAmelCase ,**_UpperCAmelCase )
class __magic_name__ ( _UpperCamelCase ):
lowerCAmelCase : Tuple = 'blip_2_qformer'
def __init__( self : int ,_UpperCAmelCase : List[str]=30522 ,_UpperCAmelCase : Dict=768 ,_UpperCAmelCase : Tuple=12 ,_UpperCAmelCase : Any=12 ,_UpperCAmelCase : int=3072 ,_UpperCAmelCase : str="gelu" ,_UpperCAmelCase : Dict=0.1 ,_UpperCAmelCase : str=0.1 ,_UpperCAmelCase : int=512 ,_UpperCAmelCase : Union[str, Any]=0.02 ,_UpperCAmelCase : List[str]=1E-12 ,_UpperCAmelCase : int=0 ,_UpperCAmelCase : Optional[int]="absolute" ,_UpperCAmelCase : List[str]=2 ,_UpperCAmelCase : Any=1408 ,**_UpperCAmelCase : Optional[int] ,):
super().__init__(pad_token_id=_UpperCAmelCase ,**_UpperCAmelCase )
_a : Any = vocab_size
_a : Optional[int] = hidden_size
_a : Dict = num_hidden_layers
_a : Dict = num_attention_heads
_a : str = hidden_act
_a : Any = intermediate_size
_a : Dict = hidden_dropout_prob
_a : str = attention_probs_dropout_prob
_a : Union[str, Any] = max_position_embeddings
_a : Tuple = initializer_range
_a : Optional[int] = layer_norm_eps
_a : int = position_embedding_type
_a : Optional[Any] = cross_attention_frequency
_a : List[str] = encoder_hidden_size
@classmethod
def __lowercase ( cls : Dict ,_UpperCAmelCase : Union[str, os.PathLike] ,**_UpperCAmelCase : Union[str, Any] ):
cls._set_token_in_kwargs(_UpperCAmelCase )
_a , _a : str = cls.get_config_dict(_UpperCAmelCase ,**_UpperCAmelCase )
# get the qformer config dict if we are loading from Blip2Config
if config_dict.get('model_type' ) == "blip-2":
_a : Any = config_dict['qformer_config']
if "model_type" in config_dict and hasattr(cls ,'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(_UpperCAmelCase ,**_UpperCAmelCase )
class __magic_name__ ( _UpperCamelCase ):
lowerCAmelCase : Tuple = 'blip-2'
lowerCAmelCase : Any = True
def __init__( self : Any ,_UpperCAmelCase : Union[str, Any]=None ,_UpperCAmelCase : Tuple=None ,_UpperCAmelCase : Dict=None ,_UpperCAmelCase : int=32 ,**_UpperCAmelCase : str ):
super().__init__(**_UpperCAmelCase )
if vision_config is None:
_a : str = {}
logger.info('vision_config is None. initializing the Blip2VisionConfig with default values.' )
if qformer_config is None:
_a : List[str] = {}
logger.info('qformer_config is None. Initializing the Blip2QFormerConfig with default values.' )
if text_config is None:
_a : List[Any] = {}
logger.info('text_config is None. Initializing the text config with default values (`OPTConfig`).' )
_a : Optional[Any] = BlipaVisionConfig(**_UpperCAmelCase )
_a : Tuple = BlipaQFormerConfig(**_UpperCAmelCase )
_a : Optional[int] = text_config['model_type'] if 'model_type' in text_config else 'opt'
_a : Any = CONFIG_MAPPING[text_model_type](**_UpperCAmelCase )
_a : Dict = self.text_config.tie_word_embeddings
_a : Union[str, Any] = self.text_config.is_encoder_decoder
_a : int = num_query_tokens
_a : str = self.vision_config.hidden_size
_a : Optional[Any] = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
_a : Optional[int] = 1.0
_a : str = 0.02
@classmethod
def __lowercase ( cls : str ,_UpperCAmelCase : BlipaVisionConfig ,_UpperCAmelCase : BlipaQFormerConfig ,_UpperCAmelCase : PretrainedConfig ,**_UpperCAmelCase : List[Any] ,):
return cls(
vision_config=vision_config.to_dict() ,qformer_config=qformer_config.to_dict() ,text_config=text_config.to_dict() ,**_UpperCAmelCase ,)
def __lowercase ( self : int ):
_a : Dict = copy.deepcopy(self.__dict__ )
_a : Any = self.vision_config.to_dict()
_a : Optional[int] = self.qformer_config.to_dict()
_a : Dict = self.text_config.to_dict()
_a : Optional[Any] = self.__class__.model_type
return output
| 89 |
# HF Trainer benchmarking tool
#
# This tool can be used to run and compare multiple dimensions of the HF Trainers args.
#
# It then prints a report once in github format with all the information that needs to be shared
# with others and second time in a console-friendly format, so it's easier to use for tuning things up.
#
# The main idea is:
#
# ./trainer-benchmark.py --base-cmd '<cmd args that don't change>' \
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1' \
# --target-metric-key train_samples_per_second
#
# The variations can be any command line argument that you want to compare and not just dtype as in
# the example.
#
# --variations allows you to compare variations in multiple dimensions.
#
# as the first dimention has 2 options and the second 3 in our example, this will run the trainer 6
# times adding one of:
#
# 1. --tf32 0 --fp16 0
# 2. --tf32 0 --fp16 1
# 3. --tf32 0 --bf16 1
# 4. --tf32 1 --fp16 0
# 5. --tf32 1 --fp16 1
# 6. --tf32 1 --bf16 1
#
# and print the results. This is just a cartesian product - and more than 2 dimensions can be used.
#
# If you want to rely on defaults, this:
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1'
# is identical to this:
# --variations '--tf32 0|--tf32 1' '|--fp16|--bf16'
#
# the leading empty variation in the 2nd dimension is a valid variation.
#
# So here we get the following 6 variations:
#
# 1. --tf32 0
# 2. --tf32 0 --fp16
# 3. --tf32 0 --bf16
# 4. --tf32 1
# 5. --tf32 1 --fp16
# 6. --tf32 1 --bf16
#
# In this particular case we don't know what the default tf32 setting is as it's normally
# pytorch-version dependent). That's why it's best to do an explicit setting of each variation:
# `--tf32 0|--tf32 1`
#
# Here is a full example of a train:
#
# CUDA_VISIBLE_DEVICES=0 python ./scripts/benchmark/trainer-benchmark.py \
# --base-cmd \
# ' examples/pytorch/translation/run_translation.py --model_name_or_path t5-small \
# --output_dir output_dir --do_train --label_smoothing 0.1 --logging_strategy no \
# --save_strategy no --per_device_train_batch_size 32 --max_source_length 512 \
# --max_target_length 512 --num_train_epochs 1 --overwrite_output_dir \
# --source_lang en --target_lang ro --dataset_name wmt16 --dataset_config "ro-en" \
# --source_prefix "translate English to Romanian: " --warmup_steps 50 \
# --max_train_samples 20000 --dataloader_num_workers 2 ' \
# --target-metric-key train_samples_per_second --repeat-times 1 --variations \
# '|--fp16|--bf16' '--tf32 0|--tf32 1' --report-metric-keys train_loss \
# --repeat-times 1 --base-variation '--tf32 0'
#
# and here is a possible output:
#
#
# | Variation | Train | Diff | Train |
# | | samples | % | loss |
# | | per | | |
# | | second | | |
# |:----------------|----------:|-------:|--------:|
# | --tf32 0 | 285.11 | 0 | 2.51 |
# | --tf32 1 | 342.09 | 20 | 2.51 |
# | --fp16 --tf32 0 | 423.49 | 49 | 2.51 |
# | --fp16 --tf32 1 | 423.13 | 48 | 2.51 |
# | --bf16 --tf32 0 | 416.80 | 46 | 2.52 |
# | --bf16 --tf32 1 | 415.87 | 46 | 2.52 |
#
#
# So you can quickly compare the different outcomes.
#
# Typically running each experiment once is enough, but if the environment is unstable you can
# re-run each multiple times, e.g., 3 using --repeat-times 3 and it will report the averaged results.
#
# By default it'll use the lowest result as the base line to use as 100% and then compare the rest to
# it as can be seen from the table above, but you can also specify which combination is the one to use as
# the baseline, e.g., to change to another entry use: --base-variation '--tf32 1 --fp16 0'
#
# --target-metric-key is there to tell the program which metrics to compare - the different metric keys are
# inside output_dir/all_results.json. e.g., to measure eval performance instead of train use:
# --target-metric-key eval_samples_per_second
# but of course you will need to adjust the --base-cmd value in the example to perform evaluation as
# well (as currently it doesn't)
#
import argparse
import datetime
import io
import itertools
import json
import math
import os
import platform
import re
import shlex
import subprocess
import sys
from pathlib import Path
from statistics import fmean
import pandas as pd
import torch
from tqdm import tqdm
import transformers
lowerCAmelCase__ = float('nan')
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self , __lowerCamelCase) -> Optional[Any]:
_A : List[Any] = sys.stdout
_A : str = open(__lowerCamelCase , "a")
def __getattr__( self , __lowerCamelCase) -> List[str]:
return getattr(self.stdout , __lowerCamelCase)
def _lowerCamelCase ( self , __lowerCamelCase) -> str:
self.stdout.write(__lowerCamelCase)
# strip tqdm codes
self.file.write(re.sub(r"^.*\r" , "" , __lowerCamelCase , 0 , re.M))
def _UpperCAmelCase (UpperCamelCase__ : str=80 , UpperCamelCase__ : Tuple=False ):
_A : Tuple = []
# deal with critical env vars
_A : Dict = ["CUDA_VISIBLE_DEVICES"]
for key in env_keys:
_A : Optional[int] = os.environ.get(UpperCamelCase__ , UpperCamelCase__ )
if val is not None:
cmd.append(f"{key}={val}" )
# python executable (not always needed if the script is executable)
_A : Optional[int] = sys.executable if full_python_path else sys.executable.split("/" )[-1]
cmd.append(UpperCamelCase__ )
# now the normal args
cmd += list(map(shlex.quote , sys.argv ) )
# split up into up to MAX_WIDTH lines with shell multi-line escapes
_A : Tuple = []
_A : Dict = ""
while len(UpperCamelCase__ ) > 0:
current_line += f"{cmd.pop(0 )} "
if len(UpperCamelCase__ ) == 0 or len(UpperCamelCase__ ) + len(cmd[0] ) + 1 > max_width - 1:
lines.append(UpperCamelCase__ )
_A : Union[str, Any] = ""
return "\\\n".join(UpperCamelCase__ )
def _UpperCAmelCase (UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Tuple ):
# unwrap multi-line input
_A : Union[str, Any] = re.sub(r"[\\\n]+" , " " , args.base_cmd )
# remove --output_dir if any and set our own
_A : int = re.sub("--output_dir\s+[^\s]+" , "" , args.base_cmd )
args.base_cmd += f" --output_dir {output_dir}"
# ensure we have --overwrite_output_dir
_A : int = re.sub("--overwrite_output_dir\s+" , "" , args.base_cmd )
args.base_cmd += " --overwrite_output_dir"
return [sys.executable] + shlex.split(args.base_cmd )
def _UpperCAmelCase (UpperCamelCase__ : List[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Tuple , UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[int] ):
# Enable to debug everything but the run itself, to do it fast and see the progress.
# This is useful for debugging the output formatting quickly - we can remove it later once
# everybody is happy with the output
if 0:
import random
from time import sleep
sleep(0 )
return dict(
{k: random.uniform(0 , 100 ) for k in metric_keys} , **{target_metric_key: random.choice([nan, 10.31, 1_00.2, 55.66_66, 2_22.22_22_22_22] )} , )
_A : Dict = subprocess.run(UpperCamelCase__ , capture_output=UpperCamelCase__ , text=UpperCamelCase__ )
if verbose:
print("STDOUT" , result.stdout )
print("STDERR" , result.stderr )
# save the streams
_A : Tuple = variation.replace(" " , "-" )
with open(Path(UpperCamelCase__ ) / f"log.{prefix}.stdout.txt" , "w" ) as f:
f.write(result.stdout )
with open(Path(UpperCamelCase__ ) / f"log.{prefix}.stderr.txt" , "w" ) as f:
f.write(result.stderr )
if result.returncode != 0:
if verbose:
print("failed" )
return {target_metric_key: nan}
with io.open(f"{output_dir}/all_results.json" , "r" , encoding="utf-8" ) as f:
_A : List[str] = json.load(UpperCamelCase__ )
# filter out just the keys we want
return {k: v for k, v in metrics.items() if k in metric_keys}
def _UpperCAmelCase (UpperCamelCase__ : int , UpperCamelCase__ : Dict , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : int , UpperCamelCase__ : Tuple , UpperCamelCase__ : List[Any] , UpperCamelCase__ : str , UpperCamelCase__ : Any , ):
_A : Union[str, Any] = []
_A : Optional[int] = []
_A : Any = f"{id}: {variation:<{longest_variation_len}}"
_A : Dict = f"{preamble}: "
_A : Union[str, Any] = set(report_metric_keys + [target_metric_key] )
for i in tqdm(range(UpperCamelCase__ ) , desc=UpperCamelCase__ , leave=UpperCamelCase__ ):
_A : Optional[Any] = process_run_single(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
_A : Optional[Any] = single_run_metrics[target_metric_key]
if not math.isnan(UpperCamelCase__ ):
metrics.append(UpperCamelCase__ )
results.append(UpperCamelCase__ )
outcome += "✓"
else:
outcome += "✘"
_A : str = f"\33[2K\r{outcome}"
if len(UpperCamelCase__ ) > 0:
_A : List[str] = {k: fmean([x[k] for x in metrics] ) for k in metrics[0].keys()}
_A : Any = round(mean_metrics[target_metric_key] , 2 )
_A : Tuple = f"{outcome} {mean_target}"
if len(UpperCamelCase__ ) > 1:
results_str += f" {tuple(round(UpperCamelCase__ , 2 ) for x in results )}"
print(UpperCamelCase__ )
_A : Optional[int] = variation
return mean_metrics
else:
print(UpperCamelCase__ )
return {variation_key: variation, target_metric_key: nan}
def _UpperCAmelCase ():
_A : int = torch.cuda.get_device_properties(torch.device("cuda" ) )
return f"\nDatetime : {datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S' )}\n\nSoftware:\ntransformers: {transformers.__version__}\ntorch : {torch.__version__}\ncuda : {torch.version.cuda}\npython : {platform.python_version()}\n\nHardware:\n{torch.cuda.device_count()} GPUs : {properties.name}, {properties.total_memory/2**30:0.2f}GB\n"
def _UpperCAmelCase (UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : Any , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Dict ):
_A : Any = pd.DataFrame(UpperCamelCase__ )
_A : List[str] = "variation"
_A : List[Any] = "diff_%"
_A : int = nan
if base_variation is not None and len(df[df[variation_key] == base_variation] ):
# this may still return nan
_A : int = df.loc[df[variation_key] == base_variation][target_metric_key].item()
if math.isnan(UpperCamelCase__ ):
# as a fallback, use the minimal value as the sentinel
_A : List[str] = df.loc[df[target_metric_key] != nan][target_metric_key].min()
# create diff column if possible
if not math.isnan(UpperCamelCase__ ):
_A : Optional[Any] = df.apply(
lambda UpperCamelCase__ : round(100 * (r[target_metric_key] - sentinel_value) / sentinel_value )
if not math.isnan(r[target_metric_key] )
else 0 , axis="columns" , )
# re-order columns
_A : Union[str, Any] = [variation_key, target_metric_key, diff_key, *report_metric_keys]
_A : Any = df.reindex(UpperCamelCase__ , axis="columns" ) # reorder cols
# capitalize
_A : Tuple = df.rename(str.capitalize , axis="columns" )
# make the cols as narrow as possible
_A : List[str] = df.rename(lambda UpperCamelCase__ : c.replace("_" , "<br>" ) , axis="columns" )
_A : Union[str, Any] = df.rename(lambda UpperCamelCase__ : c.replace("_" , "\n" ) , axis="columns" )
_A : Optional[int] = ["", "Copy between the cut-here-lines and paste as is to github or a forum"]
report += ["----------8<-----------------8<--------"]
report += ["*** Results:", df_github.to_markdown(index=UpperCamelCase__ , floatfmt=".2f" )]
report += ["```"]
report += ["*** Setup:", get_versions()]
report += ["*** The benchmark command line was:", get_original_command()]
report += ["```"]
report += ["----------8<-----------------8<--------"]
report += ["*** Results (console):", df_console.to_markdown(index=UpperCamelCase__ , floatfmt=".2f" )]
print("\n\n".join(UpperCamelCase__ ) )
def _UpperCAmelCase ():
_A : int = argparse.ArgumentParser()
parser.add_argument(
"--base-cmd" , default=UpperCamelCase__ , type=UpperCamelCase__ , required=UpperCamelCase__ , help="Base cmd" , )
parser.add_argument(
"--variations" , default=UpperCamelCase__ , type=UpperCamelCase__ , nargs="+" , required=UpperCamelCase__ , help="Multi-dimensional variations, example: '|--fp16|--bf16' '|--tf32'" , )
parser.add_argument(
"--base-variation" , default=UpperCamelCase__ , type=UpperCamelCase__ , help="Baseline variation to compare to. if None the minimal target value will be used to compare against" , )
parser.add_argument(
"--target-metric-key" , default=UpperCamelCase__ , type=UpperCamelCase__ , required=UpperCamelCase__ , help="Target metric key in output_dir/all_results.json, e.g., train_samples_per_second" , )
parser.add_argument(
"--report-metric-keys" , default="" , type=UpperCamelCase__ , help="Report metric keys - other metric keys from output_dir/all_results.json to report, e.g., train_loss. Use a single argument e.g., 'train_loss train_samples" , )
parser.add_argument(
"--repeat-times" , default=1 , type=UpperCamelCase__ , help="How many times to re-run each variation - an average will be reported" , )
parser.add_argument(
"--output_dir" , default="output_benchmark" , type=UpperCamelCase__ , help="The output directory where all the benchmark reports will go to and additionally this directory will be used to override --output_dir in the script that is being benchmarked" , )
parser.add_argument(
"--verbose" , default=UpperCamelCase__ , action="store_true" , help="Whether to show the outputs of each run or just the benchmark progress" , )
_A : int = parser.parse_args()
_A : Union[str, Any] = args.output_dir
Path(UpperCamelCase__ ).mkdir(exist_ok=UpperCamelCase__ )
_A : Tuple = get_base_command(UpperCamelCase__ , UpperCamelCase__ )
# split each dimension into its --foo variations
_A : Dict = [list(map(str.strip , re.split(r"\|" , UpperCamelCase__ ) ) ) for x in args.variations]
# build a cartesian product of dimensions and convert those back into cmd-line arg strings,
# while stripping white space for inputs that were empty
_A : Union[str, Any] = list(map(str.strip , map(" ".join , itertools.product(*UpperCamelCase__ ) ) ) )
_A : Union[str, Any] = max(len(UpperCamelCase__ ) for x in variations )
# split wanted keys
_A : str = args.report_metric_keys.split()
# capture prints into a log file for convenience
_A : Optional[int] = f"benchmark-report-{datetime.datetime.now().strftime('%Y-%m-%d-%H-%M-%S' )}.txt"
print(f"\nNote: each run's output is also logged under {output_dir}/log.*.std*.txt" )
print(f"and this script's output is also piped into {report_fn}" )
_A : Tuple = Tee(UpperCamelCase__ )
print(f"\n*** Running {len(UpperCamelCase__ )} benchmarks:" )
print(f"Base command: {' '.join(UpperCamelCase__ )}" )
_A : str = "variation"
_A : Union[str, Any] = []
for id, variation in enumerate(tqdm(UpperCamelCase__ , desc="Total completion: " , leave=UpperCamelCase__ ) ):
_A : Dict = base_cmd + variation.split()
results.append(
process_run(
id + 1 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , args.target_metric_key , UpperCamelCase__ , args.repeat_times , UpperCamelCase__ , args.verbose , ) )
process_results(UpperCamelCase__ , args.target_metric_key , UpperCamelCase__ , args.base_variation , UpperCamelCase__ )
if __name__ == "__main__":
main()
| 11 | 0 |
def lowerCamelCase_ ( UpperCamelCase__ : int , UpperCamelCase__ : int ) -> str:
"""simple docstring"""
if number < 0 or shift_amount < 0:
raise ValueError('both inputs must be positive integers' )
__lowerCamelCase = str(bin(UpperCamelCase__ ) )
binary_number += "0" * shift_amount
return binary_number
def lowerCamelCase_ ( UpperCamelCase__ : int , UpperCamelCase__ : int ) -> str:
"""simple docstring"""
if number < 0 or shift_amount < 0:
raise ValueError('both inputs must be positive integers' )
__lowerCamelCase = str(bin(UpperCamelCase__ ) )[2:]
if shift_amount >= len(UpperCamelCase__ ):
return "0b0"
__lowerCamelCase = binary_number[: len(UpperCamelCase__ ) - shift_amount]
return "0b" + shifted_binary_number
def lowerCamelCase_ ( UpperCamelCase__ : int , UpperCamelCase__ : int ) -> str:
"""simple docstring"""
if number >= 0: # Get binary representation of positive number
__lowerCamelCase = '0' + str(bin(UpperCamelCase__ ) ).strip('-' )[2:]
else: # Get binary (2's complement) representation of negative number
__lowerCamelCase = len(bin(UpperCamelCase__ )[3:] ) # Find 2's complement of number
__lowerCamelCase = bin(abs(UpperCamelCase__ ) - (1 << binary_number_length) )[3:]
__lowerCamelCase = (
'1' + '0' * (binary_number_length - len(UpperCamelCase__ )) + binary_number
)
if shift_amount >= len(UpperCamelCase__ ):
return "0b" + binary_number[0] * len(UpperCamelCase__ )
return (
"0b"
+ binary_number[0] * shift_amount
+ binary_number[: len(UpperCamelCase__ ) - shift_amount]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 90 |
import logging
import os
import sys
from dataclasses import dataclass, field
from itertools import chain
from typing import Optional, Union
import datasets
import numpy as np
import torch
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.31.0')
lowerCAmelCase__ = logging.getLogger(__name__)
@dataclass
class lowerCAmelCase__ :
'''simple docstring'''
__SCREAMING_SNAKE_CASE = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"})
__SCREAMING_SNAKE_CASE = field(
default=a , metadata={"help": "Pretrained config name or path if not the same as model_name"})
__SCREAMING_SNAKE_CASE = field(
default=a , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"})
__SCREAMING_SNAKE_CASE = field(
default=a , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
__SCREAMING_SNAKE_CASE = field(
default=a , metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."} , )
__SCREAMING_SNAKE_CASE = field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
__SCREAMING_SNAKE_CASE = field(
default=a , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
@dataclass
class lowerCAmelCase__ :
'''simple docstring'''
__SCREAMING_SNAKE_CASE = field(default=a , metadata={"help": "The input training data file (a text file)."})
__SCREAMING_SNAKE_CASE = field(
default=a , metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."} , )
__SCREAMING_SNAKE_CASE = field(
default=a , metadata={"help": "Overwrite the cached training and evaluation sets"})
__SCREAMING_SNAKE_CASE = field(
default=a , metadata={"help": "The number of processes to use for the preprocessing."} , )
__SCREAMING_SNAKE_CASE = field(
default=a , metadata={
"help": (
"The maximum total input sequence length after tokenization. If passed, sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
__SCREAMING_SNAKE_CASE = field(
default=a , metadata={
"help": (
"Whether to pad all samples to the maximum sentence length. "
"If False, will pad the samples dynamically when batching to the maximum length in the batch. More "
"efficient on GPU but very bad for TPU."
)
} , )
__SCREAMING_SNAKE_CASE = field(
default=a , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
__SCREAMING_SNAKE_CASE = field(
default=a , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} , )
def _lowerCamelCase ( self) -> int:
if self.train_file is not None:
_A : Optional[int] = self.train_file.split(".")[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
_A : Dict = self.validation_file.split(".")[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
@dataclass
class lowerCAmelCase__ :
'''simple docstring'''
__SCREAMING_SNAKE_CASE = 42
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
def __call__( self , __lowerCamelCase) -> str:
_A : List[Any] = "label" if "label" in features[0].keys() else "labels"
_A : Any = [feature.pop(__lowerCamelCase) for feature in features]
_A : Optional[int] = len(__lowerCamelCase)
_A : int = len(features[0]["input_ids"])
_A : Tuple = [
[{k: v[i] for k, v in feature.items()} for i in range(__lowerCamelCase)] for feature in features
]
_A : str = list(chain(*__lowerCamelCase))
_A : Tuple = self.tokenizer.pad(
__lowerCamelCase , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="pt" , )
# Un-flatten
_A : Optional[int] = {k: v.view(__lowerCamelCase , __lowerCamelCase , -1) for k, v in batch.items()}
# Add back labels
_A : Optional[int] = torch.tensor(__lowerCamelCase , dtype=torch.intaa)
return batch
def _UpperCAmelCase ():
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_A : int = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_A , _A , _A : int = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_A , _A , _A : Union[str, Any] = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_swag" , UpperCamelCase__ , UpperCamelCase__ )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
_A : int = training_args.get_process_log_level()
logger.setLevel(UpperCamelCase__ )
datasets.utils.logging.set_verbosity(UpperCamelCase__ )
transformers.utils.logging.set_verbosity(UpperCamelCase__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ f"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" )
logger.info(f"Training/evaluation parameters {training_args}" )
# Detecting last checkpoint.
_A : List[Any] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_A : Optional[int] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
"Use --overwrite_output_dir to overcome." )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.train_file is not None or data_args.validation_file is not None:
_A : List[str] = {}
if data_args.train_file is not None:
_A : Optional[int] = data_args.train_file
if data_args.validation_file is not None:
_A : Tuple = data_args.validation_file
_A : Union[str, Any] = data_args.train_file.split("." )[-1]
_A : List[str] = load_dataset(
UpperCamelCase__ , data_files=UpperCamelCase__ , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
# Downloading and loading the swag dataset from the hub.
_A : Union[str, Any] = load_dataset(
"swag" , "regular" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_A : Optional[Any] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
_A : Optional[Any] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
_A : List[Any] = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=UpperCamelCase__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# When using your own dataset or a different dataset from swag, you will probably need to change this.
_A : str = [f"ending{i}" for i in range(4 )]
_A : Union[str, Any] = "sent1"
_A : str = "sent2"
if data_args.max_seq_length is None:
_A : Any = tokenizer.model_max_length
if max_seq_length > 1024:
logger.warning(
"The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value"
" of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can"
" override this default with `--block_size xxx`." )
_A : Optional[Any] = 1024
else:
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f"The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"
f"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}." )
_A : int = min(data_args.max_seq_length , tokenizer.model_max_length )
# Preprocessing the datasets.
def preprocess_function(UpperCamelCase__ : List[Any] ):
_A : List[Any] = [[context] * 4 for context in examples[context_name]]
_A : Any = examples[question_header_name]
_A : Union[str, Any] = [
[f"{header} {examples[end][i]}" for end in ending_names] for i, header in enumerate(UpperCamelCase__ )
]
# Flatten out
_A : Dict = list(chain(*UpperCamelCase__ ) )
_A : List[Any] = list(chain(*UpperCamelCase__ ) )
# Tokenize
_A : str = tokenizer(
UpperCamelCase__ , UpperCamelCase__ , truncation=UpperCamelCase__ , max_length=UpperCamelCase__ , padding="max_length" if data_args.pad_to_max_length else False , )
# Un-flatten
return {k: [v[i : i + 4] for i in range(0 , len(UpperCamelCase__ ) , 4 )] for k, v in tokenized_examples.items()}
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError("--do_train requires a train dataset" )
_A : Optional[int] = raw_datasets["train"]
if data_args.max_train_samples is not None:
_A : Union[str, Any] = min(len(UpperCamelCase__ ) , data_args.max_train_samples )
_A : Any = train_dataset.select(range(UpperCamelCase__ ) )
with training_args.main_process_first(desc="train dataset map pre-processing" ):
_A : Optional[int] = train_dataset.map(
UpperCamelCase__ , batched=UpperCamelCase__ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
if training_args.do_eval:
if "validation" not in raw_datasets:
raise ValueError("--do_eval requires a validation dataset" )
_A : Optional[int] = raw_datasets["validation"]
if data_args.max_eval_samples is not None:
_A : str = min(len(UpperCamelCase__ ) , data_args.max_eval_samples )
_A : Dict = eval_dataset.select(range(UpperCamelCase__ ) )
with training_args.main_process_first(desc="validation dataset map pre-processing" ):
_A : List[str] = eval_dataset.map(
UpperCamelCase__ , batched=UpperCamelCase__ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
# Data collator
_A : str = (
default_data_collator
if data_args.pad_to_max_length
else DataCollatorForMultipleChoice(tokenizer=UpperCamelCase__ , pad_to_multiple_of=8 if training_args.fpaa else None )
)
# Metric
def compute_metrics(UpperCamelCase__ : Tuple ):
_A , _A : List[str] = eval_predictions
_A : Optional[int] = np.argmax(UpperCamelCase__ , axis=1 )
return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()}
# Initialize our Trainer
_A : List[str] = Trainer(
model=UpperCamelCase__ , args=UpperCamelCase__ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=UpperCamelCase__ , data_collator=UpperCamelCase__ , compute_metrics=UpperCamelCase__ , )
# Training
if training_args.do_train:
_A : Any = None
if training_args.resume_from_checkpoint is not None:
_A : Optional[int] = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
_A : int = last_checkpoint
_A : Any = trainer.train(resume_from_checkpoint=UpperCamelCase__ )
trainer.save_model() # Saves the tokenizer too for easy upload
_A : Optional[int] = train_result.metrics
_A : Tuple = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(UpperCamelCase__ )
)
_A : Tuple = min(UpperCamelCase__ , len(UpperCamelCase__ ) )
trainer.log_metrics("train" , UpperCamelCase__ )
trainer.save_metrics("train" , UpperCamelCase__ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("*** Evaluate ***" )
_A : List[Any] = trainer.evaluate()
_A : int = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(UpperCamelCase__ )
_A : Optional[Any] = min(UpperCamelCase__ , len(UpperCamelCase__ ) )
trainer.log_metrics("eval" , UpperCamelCase__ )
trainer.save_metrics("eval" , UpperCamelCase__ )
_A : Tuple = {
"finetuned_from": model_args.model_name_or_path,
"tasks": "multiple-choice",
"dataset_tags": "swag",
"dataset_args": "regular",
"dataset": "SWAG",
"language": "en",
}
if training_args.push_to_hub:
trainer.push_to_hub(**UpperCamelCase__ )
else:
trainer.create_model_card(**UpperCamelCase__ )
def _UpperCAmelCase (UpperCamelCase__ : Optional[int] ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 11 | 0 |
"""simple docstring"""
import argparse
import os
import re
import torch
from flax.traverse_util import flatten_dict
from tax import checkpoints
from transformers import (
AutoTokenizer,
PixaStructConfig,
PixaStructForConditionalGeneration,
PixaStructImageProcessor,
PixaStructProcessor,
PixaStructTextConfig,
PixaStructVisionConfig,
)
def _A (__a ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = checkpoints.load_tax_checkpoint(__a )
SCREAMING_SNAKE_CASE_ : Optional[Any] = flatten_dict(__a )
return flax_params
def _A (__a ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = {}
SCREAMING_SNAKE_CASE_ : Optional[Any] = {
'''token_embedder''': '''embeddings''',
'''encoder_norm''': '''layernorm''',
'''kernel''': '''weight''',
'''.out''': '''.output''',
'''scale''': '''weight''',
'''embedders_0.pos_embedding''': '''row_embedder.weight''',
'''embedders_1.pos_embedding''': '''column_embedder.weight''',
}
SCREAMING_SNAKE_CASE_ : Any = {
'''query''': '''attention.query''',
'''key''': '''attention.key''',
'''value''': '''attention.value''',
'''output.dense''': '''output''',
'''encoder_decoder_attention.o''': '''encoder_decoder_attention.attention.o''',
'''pre_self_attention_layer_norm''': '''self_attention.layer_norm''',
'''pre_cross_attention_layer_norm''': '''encoder_decoder_attention.layer_norm''',
'''mlp.''': '''mlp.DenseReluDense.''',
'''pre_mlp_layer_norm''': '''mlp.layer_norm''',
'''self_attention.o''': '''self_attention.attention.o''',
'''decoder.embeddings.embedding''': '''decoder.embed_tokens.weight''',
'''decoder.relpos_bias.rel_embedding''': '''decoder.layer.0.self_attention.attention.relative_attention_bias.weight''',
'''decoder.decoder_norm.weight''': '''decoder.final_layer_norm.weight''',
'''decoder.logits_dense.weight''': '''decoder.lm_head.weight''',
}
for key in flax_dict.keys():
if "target" in key:
# remove the first prefix from the key
SCREAMING_SNAKE_CASE_ : str = '''.'''.join(key[1:] )
# rename the key
for old, new in CONVERSION_MAPPING.items():
SCREAMING_SNAKE_CASE_ : str = new_key.replace(__a , __a )
if "decoder" in new_key:
for old, new in DECODER_CONVERSION_MAPPING.items():
SCREAMING_SNAKE_CASE_ : Any = new_key.replace(__a , __a )
if "layers" in new_key and "decoder" not in new_key:
# use regex to replace the layer number
SCREAMING_SNAKE_CASE_ : int = re.sub(R'''layers_(\d+)''' , R'''layer.\1''' , __a )
SCREAMING_SNAKE_CASE_ : Tuple = new_key.replace('''encoder''' , '''encoder.encoder''' )
elif "layers" in new_key and "decoder" in new_key:
# use regex to replace the layer number
SCREAMING_SNAKE_CASE_ : Any = re.sub(R'''layers_(\d+)''' , R'''layer.\1''' , __a )
SCREAMING_SNAKE_CASE_ : str = flax_dict[key]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {}
# convert converted_dict into torch format
for key in converted_dict.keys():
if ("embed_tokens" not in key) and ("embedder" not in key):
SCREAMING_SNAKE_CASE_ : List[Any] = torch.from_numpy(converted_dict[key].T )
else:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.from_numpy(converted_dict[key] )
return converted_torch_dict
def _A (__a , __a , __a=False , __a=False ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = get_flax_param(__a )
if not use_large:
SCREAMING_SNAKE_CASE_ : Optional[int] = PixaStructVisionConfig()
SCREAMING_SNAKE_CASE_ : int = PixaStructTextConfig()
else:
SCREAMING_SNAKE_CASE_ : Dict = PixaStructVisionConfig(
hidden_size=15_36 , d_ff=39_68 , num_attention_heads=24 , num_hidden_layers=18 )
SCREAMING_SNAKE_CASE_ : Optional[Any] = PixaStructTextConfig(hidden_size=15_36 , d_ff=39_68 , num_heads=24 , num_layers=18 )
SCREAMING_SNAKE_CASE_ : Tuple = PixaStructConfig(
vision_config=encoder_config.to_dict() , text_config=decoder_config.to_dict() , is_vqa=__a )
SCREAMING_SNAKE_CASE_ : Dict = PixaStructForConditionalGeneration(__a )
SCREAMING_SNAKE_CASE_ : Optional[int] = rename_and_convert_flax_params(__a )
model.load_state_dict(__a )
SCREAMING_SNAKE_CASE_ : Tuple = AutoTokenizer.from_pretrained('''ybelkada/test-pix2struct-tokenizer''' )
SCREAMING_SNAKE_CASE_ : List[str] = PixaStructImageProcessor()
SCREAMING_SNAKE_CASE_ : str = PixaStructProcessor(image_processor=__a , tokenizer=__a )
if use_large:
SCREAMING_SNAKE_CASE_ : int = 40_96
SCREAMING_SNAKE_CASE_ : int = True
# mkdir if needed
os.makedirs(__a , exist_ok=__a )
model.save_pretrained(__a )
processor.save_pretrained(__a )
print('''Model saved in {}'''.format(__a ) )
if __name__ == "__main__":
UpperCAmelCase_ : Any = argparse.ArgumentParser()
parser.add_argument("""--t5x_checkpoint_path""", default=None, type=str, help="""Path to the original T5x checkpoint.""")
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--use_large""", action="""store_true""", help="""Use large model.""")
parser.add_argument("""--is_vqa""", action="""store_true""", help="""Use large model.""")
UpperCAmelCase_ : str = parser.parse_args()
convert_pixastruct_original_pytorch_checkpoint_to_hf(
args.tax_checkpoint_path, args.pytorch_dump_folder_path, args.use_large
)
| 91 |
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("TEST_SAGEMAKER" , "False")) is not True , reason="Skipping test because should only be run when releasing minor transformers version" , )
@pytest.mark.usefixtures("sm_env")
@parameterized_class(
[
{
"framework": "pytorch",
"script": "run_glue.py",
"model_name_or_path": "distilbert-base-cased",
"instance_type": "ml.p3.16xlarge",
"results": {"train_runtime": 650, "eval_accuracy": 0.7, "eval_loss": 0.6},
},
{
"framework": "pytorch",
"script": "run_ddp.py",
"model_name_or_path": "distilbert-base-cased",
"instance_type": "ml.p3.16xlarge",
"results": {"train_runtime": 600, "eval_accuracy": 0.7, "eval_loss": 0.6},
},
{
"framework": "tensorflow",
"script": "run_tf_dist.py",
"model_name_or_path": "distilbert-base-cased",
"instance_type": "ml.p3.16xlarge",
"results": {"train_runtime": 600, "eval_accuracy": 0.6, "eval_loss": 0.7},
},
])
class lowerCAmelCase__ ( unittest.TestCase):
'''simple docstring'''
def _lowerCamelCase ( self) -> str:
if self.framework == "pytorch":
subprocess.run(
F"cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py".split() , encoding="utf-8" , check=__lowerCamelCase , )
assert hasattr(self , "env")
def _lowerCamelCase ( self , __lowerCamelCase) -> Tuple:
_A : Dict = F"{self.env.base_job_name}-{instance_count}-{'ddp' if 'ddp' in self.script else 'smd'}"
# distributed data settings
_A : Optional[Any] = {"smdistributed": {"dataparallel": {"enabled": True}}} if self.script != "run_ddp.py" else None
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=__lowerCamelCase , instance_count=__lowerCamelCase , instance_type=self.instance_type , debugger_hook_config=__lowerCamelCase , hyperparameters={**self.env.distributed_hyperparameters, "model_name_or_path": self.model_name_or_path} , metric_definitions=self.env.metric_definitions , distribution=__lowerCamelCase , py_version="py36" , )
def _lowerCamelCase ( self , __lowerCamelCase) -> Optional[Any]:
TrainingJobAnalytics(__lowerCamelCase).export_csv(F"{self.env.test_path}/{job_name}_metrics.csv")
@parameterized.expand([(2,)])
def _lowerCamelCase ( self , __lowerCamelCase) -> Any:
# create estimator
_A : Union[str, Any] = self.create_estimator(__lowerCamelCase)
# run training
estimator.fit()
# result dataframe
_A : Optional[Any] = TrainingJobAnalytics(estimator.latest_training_job.name).dataframe()
# extract kpis
_A : List[Any] = list(result_metrics_df[result_metrics_df.metric_name == "eval_accuracy"]["value"])
_A : Dict = list(result_metrics_df[result_metrics_df.metric_name == "eval_loss"]["value"])
# get train time from SageMaker job, this includes starting, preprocessing, stopping
_A : Optional[Any] = (
Session().describe_training_job(estimator.latest_training_job.name).get("TrainingTimeInSeconds" , 9_9_9_9_9_9)
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["eval_accuracy"] for t in eval_accuracy)
assert all(t <= self.results["eval_loss"] for t in eval_loss)
# dump tests result into json file to share in PR
with open(F"{estimator.latest_training_job.name}.json" , "w") as outfile:
json.dump({"train_time": train_runtime, "eval_accuracy": eval_accuracy, "eval_loss": eval_loss} , __lowerCamelCase)
| 11 | 0 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase__ = logging.get_logger(__name__)
def _a ( SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : List[str]=False ):
__lowerCAmelCase = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"""blocks.{i}.norm1.weight""", F"""vit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""blocks.{i}.norm1.bias""", F"""vit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append((F"""blocks.{i}.attn.proj.weight""", F"""vit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((F"""blocks.{i}.attn.proj.bias""", F"""vit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((F"""blocks.{i}.norm2.weight""", F"""vit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""blocks.{i}.norm2.bias""", F"""vit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((F"""blocks.{i}.mlp.fc1.weight""", F"""vit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((F"""blocks.{i}.mlp.fc1.bias""", F"""vit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((F"""blocks.{i}.mlp.fc2.weight""", F"""vit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""blocks.{i}.mlp.fc2.bias""", F"""vit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
("cls_token", "vit.embeddings.cls_token"),
("patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight"),
("patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias"),
("pos_embed", "vit.embeddings.position_embeddings"),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
__lowerCAmelCase = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("norm.weight", "vit.layernorm.weight"),
("norm.bias", "vit.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
return rename_keys
def _a ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[Any]=False ):
for i in range(config.num_hidden_layers ):
if base_model:
__lowerCAmelCase = ""
else:
__lowerCAmelCase = "vit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
__lowerCAmelCase = state_dict.pop(F"""blocks.{i}.attn.qkv.weight""" )
__lowerCAmelCase = state_dict.pop(F"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
__lowerCAmelCase = in_proj_weight[
: config.hidden_size, :
]
__lowerCAmelCase = in_proj_bias[: config.hidden_size]
__lowerCAmelCase = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__lowerCAmelCase = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
__lowerCAmelCase = in_proj_weight[
-config.hidden_size :, :
]
__lowerCAmelCase = in_proj_bias[-config.hidden_size :]
def _a ( SCREAMING_SNAKE_CASE_ : str ):
__lowerCAmelCase = ["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def _a ( SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
__lowerCAmelCase = dct.pop(SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase = val
def _a ( ):
__lowerCAmelCase = "http://images.cocodataset.org/val2017/000000039769.jpg"
__lowerCAmelCase = Image.open(requests.get(SCREAMING_SNAKE_CASE_ , stream=SCREAMING_SNAKE_CASE_ ).raw )
return im
@torch.no_grad()
def _a ( SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : int=True ):
__lowerCAmelCase = ViTConfig()
# patch_size
if model_name[-1] == "8":
__lowerCAmelCase = 8
# set labels if required
if not base_model:
__lowerCAmelCase = 10_00
__lowerCAmelCase = "huggingface/label-files"
__lowerCAmelCase = "imagenet-1k-id2label.json"
__lowerCAmelCase = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , repo_type="dataset" ) , "r" ) )
__lowerCAmelCase = {int(SCREAMING_SNAKE_CASE_ ): v for k, v in idalabel.items()}
__lowerCAmelCase = idalabel
__lowerCAmelCase = {v: k for k, v in idalabel.items()}
# size of the architecture
if model_name in ["dino_vits8", "dino_vits16"]:
__lowerCAmelCase = 3_84
__lowerCAmelCase = 15_36
__lowerCAmelCase = 12
__lowerCAmelCase = 6
# load original model from torch hub
__lowerCAmelCase = torch.hub.load("facebookresearch/dino:main" , SCREAMING_SNAKE_CASE_ )
original_model.eval()
# load state_dict of original model, remove and rename some keys
__lowerCAmelCase = original_model.state_dict()
if base_model:
remove_classification_head_(SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase = create_rename_keys(SCREAMING_SNAKE_CASE_ , base_model=SCREAMING_SNAKE_CASE_ )
for src, dest in rename_keys:
rename_key(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
read_in_q_k_v(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# load HuggingFace model
if base_model:
__lowerCAmelCase = ViTModel(SCREAMING_SNAKE_CASE_ , add_pooling_layer=SCREAMING_SNAKE_CASE_ ).eval()
else:
__lowerCAmelCase = ViTForImageClassification(SCREAMING_SNAKE_CASE_ ).eval()
model.load_state_dict(SCREAMING_SNAKE_CASE_ )
# Check outputs on an image, prepared by ViTImageProcessor
__lowerCAmelCase = ViTImageProcessor()
__lowerCAmelCase = image_processor(images=prepare_img() , return_tensors="pt" )
__lowerCAmelCase = encoding["pixel_values"]
__lowerCAmelCase = model(SCREAMING_SNAKE_CASE_ )
if base_model:
__lowerCAmelCase = original_model(SCREAMING_SNAKE_CASE_ )
assert torch.allclose(SCREAMING_SNAKE_CASE_ , outputs.last_hidden_state[:, 0, :] , atol=1E-1 )
else:
__lowerCAmelCase = original_model(SCREAMING_SNAKE_CASE_ )
assert logits.shape == outputs.logits.shape
assert torch.allclose(SCREAMING_SNAKE_CASE_ , outputs.logits , atol=1E-3 )
Path(SCREAMING_SNAKE_CASE_ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE_ )
print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""dino_vitb16""",
type=str,
help="""Name of the model trained with DINO you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--base_model""",
action="""store_true""",
help="""Whether to only convert the base model (no projection head weights).""",
)
parser.set_defaults(base_model=True)
UpperCamelCase__ = parser.parse_args()
convert_vit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.base_model)
| 92 |
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class lowerCAmelCase__ ( a):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = ["image_processor", "tokenizer"]
__SCREAMING_SNAKE_CASE = "OwlViTImageProcessor"
__SCREAMING_SNAKE_CASE = ("CLIPTokenizer", "CLIPTokenizerFast")
def __init__( self , __lowerCamelCase=None , __lowerCamelCase=None , **__lowerCamelCase) -> Union[str, Any]:
_A : int = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , __lowerCamelCase , )
_A : List[Any] = kwargs.pop("feature_extractor")
_A : Dict = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`.")
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`.")
super().__init__(__lowerCamelCase , __lowerCamelCase)
def __call__( self , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase="max_length" , __lowerCamelCase="np" , **__lowerCamelCase) -> Any:
if text is None and query_images is None and images is None:
raise ValueError(
"You have to specify at least one text or query image or image. All three cannot be none.")
if text is not None:
if isinstance(__lowerCamelCase , __lowerCamelCase) or (isinstance(__lowerCamelCase , __lowerCamelCase) and not isinstance(text[0] , __lowerCamelCase)):
_A : Union[str, Any] = [self.tokenizer(__lowerCamelCase , padding=__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase)]
elif isinstance(__lowerCamelCase , __lowerCamelCase) and isinstance(text[0] , __lowerCamelCase):
_A : Optional[Any] = []
# Maximum number of queries across batch
_A : str = max([len(__lowerCamelCase) for t in text])
# Pad all batch samples to max number of text queries
for t in text:
if len(__lowerCamelCase) != max_num_queries:
_A : Optional[int] = t + [" "] * (max_num_queries - len(__lowerCamelCase))
_A : List[Any] = self.tokenizer(__lowerCamelCase , padding=__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase)
encodings.append(__lowerCamelCase)
else:
raise TypeError("Input text should be a string, a list of strings or a nested list of strings")
if return_tensors == "np":
_A : Tuple = np.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0)
_A : Optional[Any] = np.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0)
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
_A : Optional[int] = jnp.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0)
_A : Optional[int] = jnp.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0)
elif return_tensors == "pt" and is_torch_available():
import torch
_A : Optional[Any] = torch.cat([encoding["input_ids"] for encoding in encodings] , dim=0)
_A : Union[str, Any] = torch.cat([encoding["attention_mask"] for encoding in encodings] , dim=0)
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
_A : Any = tf.stack([encoding["input_ids"] for encoding in encodings] , axis=0)
_A : Tuple = tf.stack([encoding["attention_mask"] for encoding in encodings] , axis=0)
else:
raise ValueError("Target return tensor type could not be returned")
_A : Optional[Any] = BatchEncoding()
_A : Tuple = input_ids
_A : Dict = attention_mask
if query_images is not None:
_A : Optional[Any] = BatchEncoding()
_A : List[str] = self.image_processor(
__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase).pixel_values
_A : Union[str, Any] = query_pixel_values
if images is not None:
_A : int = self.image_processor(__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase)
if text is not None and images is not None:
_A : Tuple = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
_A : int = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**__lowerCamelCase) , tensor_type=__lowerCamelCase)
def _lowerCamelCase ( self , *__lowerCamelCase , **__lowerCamelCase) -> str:
return self.image_processor.post_process(*__lowerCamelCase , **__lowerCamelCase)
def _lowerCamelCase ( self , *__lowerCamelCase , **__lowerCamelCase) -> List[str]:
return self.image_processor.post_process_object_detection(*__lowerCamelCase , **__lowerCamelCase)
def _lowerCamelCase ( self , *__lowerCamelCase , **__lowerCamelCase) -> Optional[int]:
return self.image_processor.post_process_image_guided_detection(*__lowerCamelCase , **__lowerCamelCase)
def _lowerCamelCase ( self , *__lowerCamelCase , **__lowerCamelCase) -> int:
return self.tokenizer.batch_decode(*__lowerCamelCase , **__lowerCamelCase)
def _lowerCamelCase ( self , *__lowerCamelCase , **__lowerCamelCase) -> Optional[int]:
return self.tokenizer.decode(*__lowerCamelCase , **__lowerCamelCase)
@property
def _lowerCamelCase ( self) -> int:
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , __lowerCamelCase , )
return self.image_processor_class
@property
def _lowerCamelCase ( self) -> List[str]:
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , __lowerCamelCase , )
return self.image_processor
| 11 | 0 |
'''simple docstring'''
import unittest
import numpy as np
from diffusers import OnnxStableDiffusionInpaintPipelineLegacy
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
load_numpy,
nightly,
require_onnxruntime,
require_torch_gpu,
)
if is_onnx_available():
import onnxruntime as ort
@nightly
@require_onnxruntime
@require_torch_gpu
class lowerCAmelCase__ ( unittest.TestCase ):
@property
def _snake_case ( self ):
"""simple docstring"""
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : Optional[int] = ort.SessionOptions()
lowercase_ : Any = False
return options
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : List[Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo.png''' )
lowercase_ : Union[str, Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo_mask.png''' )
lowercase_ : Any = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/red_cat_sitting_on_a_park_bench_onnx.npy''' )
# using the PNDM scheduler by default
lowercase_ : Any = OnnxStableDiffusionInpaintPipelineLegacy.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''onnx''' , safety_checker=__SCREAMING_SNAKE_CASE , feature_extractor=__SCREAMING_SNAKE_CASE , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
lowercase_ : List[Any] = '''A red cat sitting on a park bench'''
lowercase_ : Any = np.random.RandomState(0 )
lowercase_ : Dict = pipe(
prompt=__SCREAMING_SNAKE_CASE , image=__SCREAMING_SNAKE_CASE , mask_image=__SCREAMING_SNAKE_CASE , strength=0.75 , guidance_scale=7.5 , num_inference_steps=15 , generator=__SCREAMING_SNAKE_CASE , output_type='''np''' , )
lowercase_ : str = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert np.abs(expected_image - image ).max() < 1E-2
| 93 |
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_torch_available
from transformers.testing_utils import require_torch, torch_device
if is_torch_available():
from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments
@require_torch
class lowerCAmelCase__ ( unittest.TestCase):
'''simple docstring'''
def _lowerCamelCase ( self , __lowerCamelCase) -> Dict:
for model_result in results.values():
for batch_size, sequence_length in zip(model_result["bs"] , model_result["ss"]):
_A : Optional[int] = model_result["result"][batch_size][sequence_length]
self.assertIsNotNone(__lowerCamelCase)
def _lowerCamelCase ( self) -> int:
_A : Optional[int] = "sshleifer/tiny-gpt2"
_A : int = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , )
_A : List[str] = PyTorchBenchmark(__lowerCamelCase)
_A : Optional[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def _lowerCamelCase ( self) -> Dict:
_A : int = "sgugger/tiny-distilbert-classification"
_A : str = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , only_pretrain_model=__lowerCamelCase , )
_A : Dict = PyTorchBenchmark(__lowerCamelCase)
_A : Union[str, Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def _lowerCamelCase ( self) -> Optional[Any]:
_A : Tuple = "sshleifer/tiny-gpt2"
_A : Optional[Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , torchscript=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , )
_A : Union[str, Any] = PyTorchBenchmark(__lowerCamelCase)
_A : List[str] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
@unittest.skipIf(torch_device == "cpu" , "Cant do half precision")
def _lowerCamelCase ( self) -> int:
_A : Any = "sshleifer/tiny-gpt2"
_A : Tuple = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , fpaa=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , )
_A : Any = PyTorchBenchmark(__lowerCamelCase)
_A : Union[str, Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def _lowerCamelCase ( self) -> Any:
_A : Union[str, Any] = "sshleifer/tiny-gpt2"
_A : Any = AutoConfig.from_pretrained(__lowerCamelCase)
# set architectures equal to `None`
_A : Dict = None
_A : Any = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , )
_A : Union[str, Any] = PyTorchBenchmark(__lowerCamelCase , configs=[config])
_A : int = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def _lowerCamelCase ( self) -> int:
_A : List[Any] = "sshleifer/tiny-gpt2"
_A : int = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , )
_A : Optional[Any] = PyTorchBenchmark(__lowerCamelCase)
_A : int = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result)
self.check_results_dict_not_empty(results.memory_train_result)
@unittest.skipIf(torch_device == "cpu" , "Can't do half precision")
def _lowerCamelCase ( self) -> Optional[Any]:
_A : Any = "sshleifer/tiny-gpt2"
_A : Tuple = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , fpaa=__lowerCamelCase , multi_process=__lowerCamelCase , )
_A : List[Any] = PyTorchBenchmark(__lowerCamelCase)
_A : Tuple = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result)
self.check_results_dict_not_empty(results.memory_train_result)
def _lowerCamelCase ( self) -> str:
_A : List[str] = "sshleifer/tiny-gpt2"
_A : Union[str, Any] = AutoConfig.from_pretrained(__lowerCamelCase)
_A : Any = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , )
_A : Optional[Any] = PyTorchBenchmark(__lowerCamelCase , configs=[config])
_A : Tuple = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def _lowerCamelCase ( self) -> int:
_A : Tuple = "sshleifer/tinier_bart"
_A : Optional[Any] = AutoConfig.from_pretrained(__lowerCamelCase)
_A : Optional[int] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , )
_A : Dict = PyTorchBenchmark(__lowerCamelCase , configs=[config])
_A : Optional[int] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def _lowerCamelCase ( self) -> str:
_A : List[Any] = "sshleifer/tiny-gpt2"
_A : Optional[Any] = AutoConfig.from_pretrained(__lowerCamelCase)
_A : Optional[Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , )
_A : List[str] = PyTorchBenchmark(__lowerCamelCase , configs=[config])
_A : Dict = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result)
self.check_results_dict_not_empty(results.memory_train_result)
def _lowerCamelCase ( self) -> int:
_A : int = "sshleifer/tinier_bart"
_A : str = AutoConfig.from_pretrained(__lowerCamelCase)
_A : Tuple = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , )
_A : Tuple = PyTorchBenchmark(__lowerCamelCase , configs=[config])
_A : Union[str, Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result)
self.check_results_dict_not_empty(results.memory_train_result)
def _lowerCamelCase ( self) -> Dict:
_A : List[str] = "sshleifer/tiny-gpt2"
with tempfile.TemporaryDirectory() as tmp_dir:
_A : Optional[Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , save_to_csv=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(__lowerCamelCase , "inf_time.csv") , train_memory_csv_file=os.path.join(__lowerCamelCase , "train_mem.csv") , inference_memory_csv_file=os.path.join(__lowerCamelCase , "inf_mem.csv") , train_time_csv_file=os.path.join(__lowerCamelCase , "train_time.csv") , env_info_csv_file=os.path.join(__lowerCamelCase , "env.csv") , multi_process=__lowerCamelCase , )
_A : Tuple = PyTorchBenchmark(__lowerCamelCase)
benchmark.run()
self.assertTrue(Path(os.path.join(__lowerCamelCase , "inf_time.csv")).exists())
self.assertTrue(Path(os.path.join(__lowerCamelCase , "train_time.csv")).exists())
self.assertTrue(Path(os.path.join(__lowerCamelCase , "inf_mem.csv")).exists())
self.assertTrue(Path(os.path.join(__lowerCamelCase , "train_mem.csv")).exists())
self.assertTrue(Path(os.path.join(__lowerCamelCase , "env.csv")).exists())
def _lowerCamelCase ( self) -> int:
_A : Dict = "sshleifer/tiny-gpt2"
def _check_summary_is_not_empty(__lowerCamelCase):
self.assertTrue(hasattr(__lowerCamelCase , "sequential"))
self.assertTrue(hasattr(__lowerCamelCase , "cumulative"))
self.assertTrue(hasattr(__lowerCamelCase , "current"))
self.assertTrue(hasattr(__lowerCamelCase , "total"))
with tempfile.TemporaryDirectory() as tmp_dir:
_A : Union[str, Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(__lowerCamelCase , "log.txt") , log_print=__lowerCamelCase , trace_memory_line_by_line=__lowerCamelCase , multi_process=__lowerCamelCase , )
_A : Optional[int] = PyTorchBenchmark(__lowerCamelCase)
_A : Dict = benchmark.run()
_check_summary_is_not_empty(result.inference_summary)
_check_summary_is_not_empty(result.train_summary)
self.assertTrue(Path(os.path.join(__lowerCamelCase , "log.txt")).exists())
| 11 | 0 |
import string
import numpy
def __lowerCamelCase ( UpperCAmelCase_ : int , UpperCAmelCase_ : int ):
"""simple docstring"""
return b if a == 0 else greatest_common_divisor(b % a , UpperCAmelCase_ )
class _snake_case :
SCREAMING_SNAKE_CASE__ = string.ascii_uppercase + string.digits
# This cipher takes alphanumerics into account
# i.e. a total of 36 characters
# take x and return x % len(key_string)
SCREAMING_SNAKE_CASE__ = numpy.vectorize(lambda _snake_case : x % 36 )
SCREAMING_SNAKE_CASE__ = numpy.vectorize(_snake_case )
def __init__( self , _lowerCamelCase ):
a :List[Any] = self.modulus(_lowerCamelCase ) # mod36 calc's on the encrypt key
self.check_determinant() # validate the determinant of the encryption key
a :int = encrypt_key.shape[0]
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase ):
return self.key_string.index(_lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase ):
return self.key_string[round(_lowerCamelCase )]
def SCREAMING_SNAKE_CASE__ ( self ):
a :str = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
a :Any = det % len(self.key_string )
a :Dict = len(self.key_string )
if greatest_common_divisor(_lowerCamelCase , len(self.key_string ) ) != 1:
a :int = (
F'''determinant modular {req_l} of encryption key({det}) '''
F'''is not co prime w.r.t {req_l}.\nTry another key.'''
)
raise ValueError(_lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase ):
a :Optional[Any] = [char for char in text.upper() if char in self.key_string]
a :List[str] = chars[-1]
while len(_lowerCamelCase ) % self.break_key != 0:
chars.append(_lowerCamelCase )
return "".join(_lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase ):
a :Dict = self.process_text(text.upper() )
a :List[str] = ''''''
for i in range(0 , len(_lowerCamelCase ) - self.break_key + 1 , self.break_key ):
a :int = text[i : i + self.break_key]
a :Optional[int] = [self.replace_letters(_lowerCamelCase ) for char in batch]
a :Union[str, Any] = numpy.array([vec] ).T
a :str = self.modulus(self.encrypt_key.dot(_lowerCamelCase ) ).T.tolist()[
0
]
a :List[Any] = ''''''.join(
self.replace_digits(_lowerCamelCase ) for num in batch_encrypted )
encrypted += encrypted_batch
return encrypted
def SCREAMING_SNAKE_CASE__ ( self ):
a :List[Any] = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
a :int = det % len(self.key_string )
a :Tuple = None
for i in range(len(self.key_string ) ):
if (det * i) % len(self.key_string ) == 1:
a :Tuple = i
break
a :List[str] = (
det_inv
* numpy.linalg.det(self.encrypt_key )
* numpy.linalg.inv(self.encrypt_key )
)
return self.to_int(self.modulus(_lowerCamelCase ) )
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase ):
a :List[Any] = self.make_decrypt_key()
a :str = self.process_text(text.upper() )
a :List[Any] = ''''''
for i in range(0 , len(_lowerCamelCase ) - self.break_key + 1 , self.break_key ):
a :Optional[Any] = text[i : i + self.break_key]
a :List[Any] = [self.replace_letters(_lowerCamelCase ) for char in batch]
a :str = numpy.array([vec] ).T
a :Dict = self.modulus(decrypt_key.dot(_lowerCamelCase ) ).T.tolist()[0]
a :List[Any] = ''''''.join(
self.replace_digits(_lowerCamelCase ) for num in batch_decrypted )
decrypted += decrypted_batch
return decrypted
def __lowerCamelCase ( ):
"""simple docstring"""
a :Tuple = int(input('''Enter the order of the encryption key: ''' ) )
a :Dict = []
print('''Enter each row of the encryption key with space separated integers''' )
for _ in range(UpperCAmelCase_ ):
a :List[str] = [int(UpperCAmelCase_ ) for x in input().split()]
hill_matrix.append(UpperCAmelCase_ )
a :Any = HillCipher(numpy.array(UpperCAmelCase_ ) )
print('''Would you like to encrypt or decrypt some text? (1 or 2)''' )
a :Any = input('''\n1. Encrypt\n2. Decrypt\n''' )
if option == "1":
a :str = input('''What text would you like to encrypt?: ''' )
print('''Your encrypted text is:''' )
print(hc.encrypt(UpperCAmelCase_ ) )
elif option == "2":
a :Dict = input('''What text would you like to decrypt?: ''' )
print('''Your decrypted text is:''' )
print(hc.decrypt(UpperCAmelCase_ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 94 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_nllb import NllbTokenizer
else:
lowerCAmelCase__ = None
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'}
lowerCAmelCase__ = {
'vocab_file': {
'facebook/nllb-200-distilled-600M': (
'https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model'
),
},
'tokenizer_file': {
'facebook/nllb-200-distilled-600M': (
'https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json'
),
},
}
lowerCAmelCase__ = {
'facebook/nllb-large-en-ro': 10_24,
'facebook/nllb-200-distilled-600M': 10_24,
}
# fmt: off
lowerCAmelCase__ = ['ace_Arab', 'ace_Latn', 'acm_Arab', 'acq_Arab', 'aeb_Arab', 'afr_Latn', 'ajp_Arab', 'aka_Latn', 'amh_Ethi', 'apc_Arab', 'arb_Arab', 'ars_Arab', 'ary_Arab', 'arz_Arab', 'asm_Beng', 'ast_Latn', 'awa_Deva', 'ayr_Latn', 'azb_Arab', 'azj_Latn', 'bak_Cyrl', 'bam_Latn', 'ban_Latn', 'bel_Cyrl', 'bem_Latn', 'ben_Beng', 'bho_Deva', 'bjn_Arab', 'bjn_Latn', 'bod_Tibt', 'bos_Latn', 'bug_Latn', 'bul_Cyrl', 'cat_Latn', 'ceb_Latn', 'ces_Latn', 'cjk_Latn', 'ckb_Arab', 'crh_Latn', 'cym_Latn', 'dan_Latn', 'deu_Latn', 'dik_Latn', 'dyu_Latn', 'dzo_Tibt', 'ell_Grek', 'eng_Latn', 'epo_Latn', 'est_Latn', 'eus_Latn', 'ewe_Latn', 'fao_Latn', 'pes_Arab', 'fij_Latn', 'fin_Latn', 'fon_Latn', 'fra_Latn', 'fur_Latn', 'fuv_Latn', 'gla_Latn', 'gle_Latn', 'glg_Latn', 'grn_Latn', 'guj_Gujr', 'hat_Latn', 'hau_Latn', 'heb_Hebr', 'hin_Deva', 'hne_Deva', 'hrv_Latn', 'hun_Latn', 'hye_Armn', 'ibo_Latn', 'ilo_Latn', 'ind_Latn', 'isl_Latn', 'ita_Latn', 'jav_Latn', 'jpn_Jpan', 'kab_Latn', 'kac_Latn', 'kam_Latn', 'kan_Knda', 'kas_Arab', 'kas_Deva', 'kat_Geor', 'knc_Arab', 'knc_Latn', 'kaz_Cyrl', 'kbp_Latn', 'kea_Latn', 'khm_Khmr', 'kik_Latn', 'kin_Latn', 'kir_Cyrl', 'kmb_Latn', 'kon_Latn', 'kor_Hang', 'kmr_Latn', 'lao_Laoo', 'lvs_Latn', 'lij_Latn', 'lim_Latn', 'lin_Latn', 'lit_Latn', 'lmo_Latn', 'ltg_Latn', 'ltz_Latn', 'lua_Latn', 'lug_Latn', 'luo_Latn', 'lus_Latn', 'mag_Deva', 'mai_Deva', 'mal_Mlym', 'mar_Deva', 'min_Latn', 'mkd_Cyrl', 'plt_Latn', 'mlt_Latn', 'mni_Beng', 'khk_Cyrl', 'mos_Latn', 'mri_Latn', 'zsm_Latn', 'mya_Mymr', 'nld_Latn', 'nno_Latn', 'nob_Latn', 'npi_Deva', 'nso_Latn', 'nus_Latn', 'nya_Latn', 'oci_Latn', 'gaz_Latn', 'ory_Orya', 'pag_Latn', 'pan_Guru', 'pap_Latn', 'pol_Latn', 'por_Latn', 'prs_Arab', 'pbt_Arab', 'quy_Latn', 'ron_Latn', 'run_Latn', 'rus_Cyrl', 'sag_Latn', 'san_Deva', 'sat_Beng', 'scn_Latn', 'shn_Mymr', 'sin_Sinh', 'slk_Latn', 'slv_Latn', 'smo_Latn', 'sna_Latn', 'snd_Arab', 'som_Latn', 'sot_Latn', 'spa_Latn', 'als_Latn', 'srd_Latn', 'srp_Cyrl', 'ssw_Latn', 'sun_Latn', 'swe_Latn', 'swh_Latn', 'szl_Latn', 'tam_Taml', 'tat_Cyrl', 'tel_Telu', 'tgk_Cyrl', 'tgl_Latn', 'tha_Thai', 'tir_Ethi', 'taq_Latn', 'taq_Tfng', 'tpi_Latn', 'tsn_Latn', 'tso_Latn', 'tuk_Latn', 'tum_Latn', 'tur_Latn', 'twi_Latn', 'tzm_Tfng', 'uig_Arab', 'ukr_Cyrl', 'umb_Latn', 'urd_Arab', 'uzn_Latn', 'vec_Latn', 'vie_Latn', 'war_Latn', 'wol_Latn', 'xho_Latn', 'ydd_Hebr', 'yor_Latn', 'yue_Hant', 'zho_Hans', 'zho_Hant', 'zul_Latn']
class lowerCAmelCase__ ( a):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE = ["input_ids", "attention_mask"]
__SCREAMING_SNAKE_CASE = NllbTokenizer
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = []
def __init__( self , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase="<s>" , __lowerCamelCase="</s>" , __lowerCamelCase="</s>" , __lowerCamelCase="<s>" , __lowerCamelCase="<unk>" , __lowerCamelCase="<pad>" , __lowerCamelCase="<mask>" , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=False , **__lowerCamelCase , ) -> Tuple:
# Mask token behave like a normal word, i.e. include the space before it
_A : Any = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase) if isinstance(__lowerCamelCase , __lowerCamelCase) else mask_token
_A : Optional[int] = legacy_behaviour
super().__init__(
vocab_file=__lowerCamelCase , tokenizer_file=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , sep_token=__lowerCamelCase , cls_token=__lowerCamelCase , unk_token=__lowerCamelCase , pad_token=__lowerCamelCase , mask_token=__lowerCamelCase , src_lang=__lowerCamelCase , tgt_lang=__lowerCamelCase , additional_special_tokens=__lowerCamelCase , legacy_behaviour=__lowerCamelCase , **__lowerCamelCase , )
_A : int = vocab_file
_A : Optional[Any] = False if not self.vocab_file else True
_A : Tuple = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens])
self.add_special_tokens({"additional_special_tokens": _additional_special_tokens})
_A : Union[str, Any] = {
lang_code: self.convert_tokens_to_ids(__lowerCamelCase) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
_A : Optional[int] = src_lang if src_lang is not None else "eng_Latn"
_A : Union[str, Any] = self.convert_tokens_to_ids(self._src_lang)
_A : List[str] = tgt_lang
self.set_src_lang_special_tokens(self._src_lang)
@property
def _lowerCamelCase ( self) -> str:
return self._src_lang
@src_lang.setter
def _lowerCamelCase ( self , __lowerCamelCase) -> None:
_A : Tuple = new_src_lang
self.set_src_lang_special_tokens(self._src_lang)
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = None) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = None) -> List[int]:
_A : Tuple = [self.sep_token_id]
_A : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase) -> Optional[int]:
if src_lang is None or tgt_lang is None:
raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model")
_A : List[Any] = src_lang
_A : Optional[int] = self(__lowerCamelCase , add_special_tokens=__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase)
_A : Tuple = self.convert_tokens_to_ids(__lowerCamelCase)
_A : Tuple = tgt_lang_id
return inputs
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = "eng_Latn" , __lowerCamelCase = None , __lowerCamelCase = "fra_Latn" , **__lowerCamelCase , ) -> BatchEncoding:
_A : Tuple = src_lang
_A : int = tgt_lang
return super().prepare_seqaseq_batch(__lowerCamelCase , __lowerCamelCase , **__lowerCamelCase)
def _lowerCamelCase ( self) -> str:
return self.set_src_lang_special_tokens(self.src_lang)
def _lowerCamelCase ( self) -> List[str]:
return self.set_tgt_lang_special_tokens(self.tgt_lang)
def _lowerCamelCase ( self , __lowerCamelCase) -> None:
_A : Dict = self.convert_tokens_to_ids(__lowerCamelCase)
if self.legacy_behaviour:
_A : List[str] = []
_A : Dict = [self.eos_token_id, self.cur_lang_code]
else:
_A : Tuple = [self.cur_lang_code]
_A : Optional[Any] = [self.eos_token_id]
_A : Optional[int] = self.convert_ids_to_tokens(self.prefix_tokens)
_A : int = self.convert_ids_to_tokens(self.suffix_tokens)
_A : List[Any] = processors.TemplateProcessing(
single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens)) , )
def _lowerCamelCase ( self , __lowerCamelCase) -> None:
_A : Optional[Any] = self.convert_tokens_to_ids(__lowerCamelCase)
if self.legacy_behaviour:
_A : Tuple = []
_A : Any = [self.eos_token_id, self.cur_lang_code]
else:
_A : Union[str, Any] = [self.cur_lang_code]
_A : str = [self.eos_token_id]
_A : Optional[Any] = self.convert_ids_to_tokens(self.prefix_tokens)
_A : Dict = self.convert_ids_to_tokens(self.suffix_tokens)
_A : Union[str, Any] = processors.TemplateProcessing(
single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens)) , )
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = None) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer.")
if not os.path.isdir(__lowerCamelCase):
logger.error(F"Vocabulary path ({save_directory}) should be a directory.")
return
_A : Dict = os.path.join(
__lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
if os.path.abspath(self.vocab_file) != os.path.abspath(__lowerCamelCase):
copyfile(self.vocab_file , __lowerCamelCase)
return (out_vocab_file,)
| 11 | 0 |
import json
import os
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ImageGPTImageProcessor
class __lowerCAmelCase ( unittest.TestCase):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=7 , lowerCAmelCase__=3 , lowerCAmelCase__=1_8 , lowerCAmelCase__=3_0 , lowerCAmelCase__=4_0_0 , lowerCAmelCase__=True , lowerCAmelCase__=None , lowerCAmelCase__=True , ) -> List[Any]:
'''simple docstring'''
a__ : int =size if size is not None else {"height": 1_8, "width": 1_8}
a__ : Dict =parent
a__ : Union[str, Any] =batch_size
a__ : List[Any] =num_channels
a__ : str =image_size
a__ : Any =min_resolution
a__ : Dict =max_resolution
a__ : Optional[int] =do_resize
a__ : List[str] =size
a__ : Union[str, Any] =do_normalize
def _lowercase ( self ) -> Tuple:
'''simple docstring'''
return {
# here we create 2 clusters for the sake of simplicity
"clusters": np.asarray(
[
[0.88_66_44_36_34_03_32_03, 0.66_18_82_93_69_54_49_83, 0.38_91_74_64_01_78_68_04],
[-0.60_42_55_91_46_88_11_04, -0.0_22_95_00_88_60_52_84_69, 0.54_23_79_73_69_00_32_96],
] ),
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
}
@require_torch
@require_vision
class __lowerCAmelCase ( UpperCamelCase__ , unittest.TestCase):
_lowercase : Dict = ImageGPTImageProcessor if is_vision_available() else None
def _lowercase ( self ) -> str:
'''simple docstring'''
a__ : Tuple =ImageGPTImageProcessingTester(self )
@property
def _lowercase ( self ) -> Tuple:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def _lowercase ( self ) -> Dict:
'''simple docstring'''
a__ : Union[str, Any] =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCAmelCase__ , "clusters" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , "do_resize" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , "size" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , "do_normalize" ) )
def _lowercase ( self ) -> Any:
'''simple docstring'''
a__ : Optional[int] =self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"height": 1_8, "width": 1_8} )
a__ : Optional[Any] =self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 )
self.assertEqual(image_processor.size , {"height": 4_2, "width": 4_2} )
def _lowercase ( self ) -> Tuple:
'''simple docstring'''
a__ : Dict =self.image_processing_class(**self.image_processor_dict )
a__ : Optional[Any] =json.loads(image_processor.to_json_string() )
for key, value in self.image_processor_dict.items():
if key == "clusters":
self.assertTrue(np.array_equal(lowerCAmelCase__ , obj[key] ) )
else:
self.assertEqual(obj[key] , lowerCAmelCase__ )
def _lowercase ( self ) -> str:
'''simple docstring'''
a__ : Union[str, Any] =self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
a__ : Tuple =os.path.join(lowerCAmelCase__ , "image_processor.json" )
image_processor_first.to_json_file(lowerCAmelCase__ )
a__ : List[Any] =self.image_processing_class.from_json_file(lowerCAmelCase__ ).to_dict()
a__ : Tuple =image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(lowerCAmelCase__ , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , lowerCAmelCase__ )
def _lowercase ( self ) -> str:
'''simple docstring'''
a__ : Optional[int] =self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
image_processor_first.save_pretrained(lowerCAmelCase__ )
a__ : List[Any] =self.image_processing_class.from_pretrained(lowerCAmelCase__ ).to_dict()
a__ : List[Any] =image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(lowerCAmelCase__ , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , lowerCAmelCase__ )
@unittest.skip("ImageGPT requires clusters at initialization" )
def _lowercase ( self ) -> Union[str, Any]:
'''simple docstring'''
pass
def _A ( ):
"""simple docstring"""
a__ : Optional[int] =load_dataset("hf-internal-testing/fixtures_image_utils" , split="test" )
a__ : Union[str, Any] =Image.open(dataset[4]["file"] )
a__ : Any =Image.open(dataset[5]["file"] )
a__ : str =[imagea, imagea]
return images
@require_vision
@require_torch
class __lowerCAmelCase ( unittest.TestCase):
@slow
def _lowercase ( self ) -> List[Any]:
'''simple docstring'''
a__ : Tuple =ImageGPTImageProcessor.from_pretrained("openai/imagegpt-small" )
a__ : List[Any] =prepare_images()
# test non-batched
a__ : List[str] =image_processing(images[0] , return_tensors="pt" )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (1, 1_0_2_4) )
a__ : Any =[3_0_6, 1_9_1, 1_9_1]
self.assertEqual(encoding.input_ids[0, :3].tolist() , lowerCAmelCase__ )
# test batched
a__ : Optional[Any] =image_processing(lowerCAmelCase__ , return_tensors="pt" )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (2, 1_0_2_4) )
a__ : Tuple =[3_0_3, 1_3, 1_3]
self.assertEqual(encoding.input_ids[1, -3:].tolist() , lowerCAmelCase__ )
| 95 |
# flake8: noqa
# Lint as: python3
from typing import Dict, List, Optional, Type
from .. import config
from ..utils import logging
from .formatting import (
ArrowFormatter,
CustomFormatter,
Formatter,
PandasFormatter,
PythonFormatter,
TensorFormatter,
format_table,
query_table,
)
from .np_formatter import NumpyFormatter
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {}
lowerCAmelCase__ = {}
lowerCAmelCase__ = {}
def _UpperCAmelCase (UpperCamelCase__ : type , UpperCamelCase__ : Optional[str] , UpperCamelCase__ : Optional[List[str]] = None , ):
_A : Union[str, Any] = aliases if aliases is not None else []
if format_type in _FORMAT_TYPES:
logger.warning(
f"Overwriting format type '{format_type}' ({_FORMAT_TYPES[format_type].__name__} -> {formatter_cls.__name__})" )
_A : Dict = formatter_cls
for alias in set(aliases + [format_type] ):
if alias in _FORMAT_TYPES_ALIASES:
logger.warning(
f"Overwriting format type alias '{alias}' ({_FORMAT_TYPES_ALIASES[alias]} -> {format_type})" )
_A : Dict = format_type
def _UpperCAmelCase (UpperCamelCase__ : Exception , UpperCamelCase__ : Optional[str] , UpperCamelCase__ : Optional[List[str]] = None ):
_A : Union[str, Any] = aliases if aliases is not None else []
for alias in set(aliases + [format_type] ):
_A : Union[str, Any] = unavailable_error
# Here we define all the available formatting functions that can be used by `Dataset.set_format`
_register_formatter(PythonFormatter, None, aliases=['python'])
_register_formatter(ArrowFormatter, 'arrow', aliases=['pa', 'pyarrow'])
_register_formatter(NumpyFormatter, 'numpy', aliases=['np'])
_register_formatter(PandasFormatter, 'pandas', aliases=['pd'])
_register_formatter(CustomFormatter, 'custom')
if config.TORCH_AVAILABLE:
from .torch_formatter import TorchFormatter
_register_formatter(TorchFormatter, 'torch', aliases=['pt', 'pytorch'])
else:
lowerCAmelCase__ = ValueError('PyTorch needs to be installed to be able to return PyTorch tensors.')
_register_unavailable_formatter(_torch_error, 'torch', aliases=['pt', 'pytorch'])
if config.TF_AVAILABLE:
from .tf_formatter import TFFormatter
_register_formatter(TFFormatter, 'tensorflow', aliases=['tf'])
else:
lowerCAmelCase__ = ValueError('Tensorflow needs to be installed to be able to return Tensorflow tensors.')
_register_unavailable_formatter(_tf_error, 'tensorflow', aliases=['tf'])
if config.JAX_AVAILABLE:
from .jax_formatter import JaxFormatter
_register_formatter(JaxFormatter, 'jax', aliases=[])
else:
lowerCAmelCase__ = ValueError('JAX needs to be installed to be able to return JAX arrays.')
_register_unavailable_formatter(_jax_error, 'jax', aliases=[])
def _UpperCAmelCase (UpperCamelCase__ : Optional[str] ):
if format_type in _FORMAT_TYPES_ALIASES:
return _FORMAT_TYPES_ALIASES[format_type]
else:
return format_type
def _UpperCAmelCase (UpperCamelCase__ : Optional[str] , **UpperCamelCase__ : List[Any] ):
_A : List[str] = get_format_type_from_alias(UpperCamelCase__ )
if format_type in _FORMAT_TYPES:
return _FORMAT_TYPES[format_type](**UpperCamelCase__ )
if format_type in _FORMAT_TYPES_ALIASES_UNAVAILABLE:
raise _FORMAT_TYPES_ALIASES_UNAVAILABLE[format_type]
else:
raise ValueError(
f"Return type should be None or selected in {list(type for type in _FORMAT_TYPES.keys() if type != None )}, but got '{format_type}'" )
| 11 | 0 |
"""simple docstring"""
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
lowercase__ = logging.getLogger()
@unittest.skip("""Temporarily disable the doc tests.""" )
@require_torch
@require_tf
@slow
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
def A_ ( self , lowercase , lowercase = None , lowercase = None , lowercase = None , lowercase = True , ):
_lowerCamelCase : Union[str, Any] = [file for file in os.listdir(lowercase ) if os.path.isfile(os.path.join(lowercase , lowercase ) )]
if identifier is not None:
_lowerCamelCase : str = [file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(lowercase , lowercase ):
for n_ in n_identifier:
_lowerCamelCase : str = [file for file in files if n_ not in file]
else:
_lowerCamelCase : Dict = [file for file in files if n_identifier not in file]
_lowerCamelCase : str = ignore_files or []
ignore_files.append('__init__.py' )
_lowerCamelCase : Union[str, Any] = [file for file in files if file not in ignore_files]
for file in files:
# Open all files
print('Testing' , lowercase )
if only_modules:
_lowerCamelCase : List[str] = file.split('.' )[0]
try:
_lowerCamelCase : Tuple = getattr(lowercase , lowercase )
_lowerCamelCase : List[Any] = doctest.DocTestSuite(lowercase )
_lowerCamelCase : Optional[int] = unittest.TextTestRunner().run(lowercase )
self.assertIs(len(result.failures ) , 0 )
except AttributeError:
logger.info(F'''{module_identifier} is not a module.''' )
else:
_lowerCamelCase : Any = doctest.testfile(str('..' / directory / file ) , optionflags=doctest.ELLIPSIS )
self.assertIs(result.failed , 0 )
def A_ ( self ):
_lowerCamelCase : int = Path('src/transformers' )
_lowerCamelCase : List[Any] = 'modeling'
_lowerCamelCase : Dict = [
'modeling_ctrl.py',
'modeling_tf_ctrl.py',
]
self.analyze_directory(lowercase , identifier=lowercase , ignore_files=lowercase )
def A_ ( self ):
_lowerCamelCase : int = Path('src/transformers' )
_lowerCamelCase : Tuple = 'tokenization'
self.analyze_directory(lowercase , identifier=lowercase )
def A_ ( self ):
_lowerCamelCase : Optional[Any] = Path('src/transformers' )
_lowerCamelCase : int = 'configuration'
self.analyze_directory(lowercase , identifier=lowercase )
def A_ ( self ):
_lowerCamelCase : int = Path('src/transformers' )
_lowerCamelCase : Any = ['configuration', 'modeling', 'tokenization']
self.analyze_directory(lowercase , n_identifier=lowercase )
def A_ ( self ):
_lowerCamelCase : int = Path('docs/source' )
_lowerCamelCase : List[str] = ['favicon.ico']
self.analyze_directory(lowercase , ignore_files=lowercase , only_modules=lowercase ) | 96 |
def _UpperCAmelCase (UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Union[str, Any] ):
# "extended trapezoidal rule"
# int(f) = dx/2 * (f1 + 2f2 + ... + fn)
_A : int = (boundary[1] - boundary[0]) / steps
_A : Any = boundary[0]
_A : List[Any] = boundary[1]
_A : str = make_points(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
_A : str = 0.0
y += (h / 2.0) * f(UpperCamelCase__ )
for i in x_i:
# print(i)
y += h * f(UpperCamelCase__ )
y += (h / 2.0) * f(UpperCamelCase__ )
return y
def _UpperCAmelCase (UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Any ):
_A : Optional[int] = a + h
while x < (b - h):
yield x
_A : Dict = x + h
def _UpperCAmelCase (UpperCamelCase__ : Optional[int] ): # enter your function here
_A : Any = (x - 0) * (x - 0)
return y
def _UpperCAmelCase ():
_A : Optional[Any] = 0.0 # Lower bound of integration
_A : Optional[int] = 1.0 # Upper bound of integration
_A : List[Any] = 10.0 # define number of steps or resolution
_A : Any = [a, b] # define boundary of integration
_A : Tuple = method_a(UpperCamelCase__ , UpperCamelCase__ )
print(f"y = {y}" )
if __name__ == "__main__":
main()
| 11 | 0 |
'''simple docstring'''
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class lowercase ( A__ ):
"""simple docstring"""
_a = (DDPMScheduler,)
def lowerCAmelCase__ ( self , **UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :Tuple = {
'''num_train_timesteps''': 1000,
'''beta_start''': 0.0001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''variance_type''': '''fixed_small''',
'''clip_sample''': True,
}
config.update(**UpperCamelCase_ )
return config
def lowerCAmelCase__ ( self ):
'''simple docstring'''
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=UpperCamelCase_ )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=UpperCamelCase_ , beta_end=UpperCamelCase_ )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=UpperCamelCase_ )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=UpperCamelCase_ )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=UpperCamelCase_ )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
self.check_over_configs(thresholding=UpperCamelCase_ )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=UpperCamelCase_ , prediction_type=UpperCamelCase_ , sample_max_value=UpperCamelCase_ , )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=UpperCamelCase_ )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
for t in [0, 500, 999]:
self.check_over_forward(time_step=UpperCamelCase_ )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :int = self.scheduler_classes[0]
UpperCamelCase__ :Tuple = self.get_scheduler_config()
UpperCamelCase__ :Union[str, Any] = scheduler_class(**UpperCamelCase_ )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.00979 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.02 ) ) < 1e-5
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :List[Any] = self.scheduler_classes[0]
UpperCamelCase__ :Any = self.get_scheduler_config()
UpperCamelCase__ :int = scheduler_class(**UpperCamelCase_ )
UpperCamelCase__ :Dict = len(UpperCamelCase_ )
UpperCamelCase__ :Tuple = self.dummy_model()
UpperCamelCase__ :int = self.dummy_sample_deter
UpperCamelCase__ :Optional[Any] = torch.manual_seed(0 )
for t in reversed(range(UpperCamelCase_ ) ):
# 1. predict noise residual
UpperCamelCase__ :Dict = model(UpperCamelCase_ , UpperCamelCase_ )
# 2. predict previous mean of sample x_t-1
UpperCamelCase__ :Any = scheduler.step(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , generator=UpperCamelCase_ ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
UpperCamelCase__ :Optional[int] = pred_prev_sample
UpperCamelCase__ :Dict = torch.sum(torch.abs(UpperCamelCase_ ) )
UpperCamelCase__ :Optional[Any] = torch.mean(torch.abs(UpperCamelCase_ ) )
assert abs(result_sum.item() - 258.9606 ) < 1e-2
assert abs(result_mean.item() - 0.3372 ) < 1e-3
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Dict = self.scheduler_classes[0]
UpperCamelCase__ :Union[str, Any] = self.get_scheduler_config(prediction_type='''v_prediction''' )
UpperCamelCase__ :List[Any] = scheduler_class(**UpperCamelCase_ )
UpperCamelCase__ :Optional[Any] = len(UpperCamelCase_ )
UpperCamelCase__ :List[str] = self.dummy_model()
UpperCamelCase__ :Optional[int] = self.dummy_sample_deter
UpperCamelCase__ :Tuple = torch.manual_seed(0 )
for t in reversed(range(UpperCamelCase_ ) ):
# 1. predict noise residual
UpperCamelCase__ :Tuple = model(UpperCamelCase_ , UpperCamelCase_ )
# 2. predict previous mean of sample x_t-1
UpperCamelCase__ :Optional[int] = scheduler.step(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , generator=UpperCamelCase_ ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
UpperCamelCase__ :List[str] = pred_prev_sample
UpperCamelCase__ :List[str] = torch.sum(torch.abs(UpperCamelCase_ ) )
UpperCamelCase__ :Tuple = torch.mean(torch.abs(UpperCamelCase_ ) )
assert abs(result_sum.item() - 202.0296 ) < 1e-2
assert abs(result_mean.item() - 0.2631 ) < 1e-3
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Optional[Any] = self.scheduler_classes[0]
UpperCamelCase__ :Optional[Any] = self.get_scheduler_config()
UpperCamelCase__ :List[Any] = scheduler_class(**UpperCamelCase_ )
UpperCamelCase__ :List[str] = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=UpperCamelCase_ )
UpperCamelCase__ :Tuple = scheduler.timesteps
for i, timestep in enumerate(UpperCamelCase_ ):
if i == len(UpperCamelCase_ ) - 1:
UpperCamelCase__ :List[str] = -1
else:
UpperCamelCase__ :List[str] = timesteps[i + 1]
UpperCamelCase__ :Optional[int] = scheduler.previous_timestep(UpperCamelCase_ )
UpperCamelCase__ :Optional[Any] = prev_t.item()
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Tuple = self.scheduler_classes[0]
UpperCamelCase__ :List[Any] = self.get_scheduler_config()
UpperCamelCase__ :Union[str, Any] = scheduler_class(**UpperCamelCase_ )
UpperCamelCase__ :List[str] = [100, 87, 50, 51, 0]
with self.assertRaises(UpperCamelCase_ , msg='''`custom_timesteps` must be in descending order.''' ):
scheduler.set_timesteps(timesteps=UpperCamelCase_ )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Union[str, Any] = self.scheduler_classes[0]
UpperCamelCase__ :Union[str, Any] = self.get_scheduler_config()
UpperCamelCase__ :Union[str, Any] = scheduler_class(**UpperCamelCase_ )
UpperCamelCase__ :Tuple = [100, 87, 50, 1, 0]
UpperCamelCase__ :List[Any] = len(UpperCamelCase_ )
with self.assertRaises(UpperCamelCase_ , msg='''Can only pass one of `num_inference_steps` or `custom_timesteps`.''' ):
scheduler.set_timesteps(num_inference_steps=UpperCamelCase_ , timesteps=UpperCamelCase_ )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :str = self.scheduler_classes[0]
UpperCamelCase__ :Any = self.get_scheduler_config()
UpperCamelCase__ :Dict = scheduler_class(**UpperCamelCase_ )
UpperCamelCase__ :Tuple = [scheduler.config.num_train_timesteps]
with self.assertRaises(
UpperCamelCase_ , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ):
scheduler.set_timesteps(timesteps=UpperCamelCase_ ) | 97 |
import copy
import tempfile
import unittest
from huggingface_hub import HfFolder, delete_repo
from parameterized import parameterized
from requests.exceptions import HTTPError
from transformers import AutoConfig, GenerationConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
class lowerCAmelCase__ ( unittest.TestCase):
'''simple docstring'''
@parameterized.expand([(None,), ("foo.json",)])
def _lowerCamelCase ( self , __lowerCamelCase) -> List[str]:
_A : str = GenerationConfig(
do_sample=__lowerCamelCase , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(__lowerCamelCase , config_name=__lowerCamelCase)
_A : Tuple = GenerationConfig.from_pretrained(__lowerCamelCase , config_name=__lowerCamelCase)
# Checks parameters that were specified
self.assertEqual(loaded_config.do_sample , __lowerCamelCase)
self.assertEqual(loaded_config.temperature , 0.7)
self.assertEqual(loaded_config.length_penalty , 1.0)
self.assertEqual(loaded_config.bad_words_ids , [[1, 2, 3], [4, 5]])
# Checks parameters that were not specified (defaults)
self.assertEqual(loaded_config.top_k , 5_0)
self.assertEqual(loaded_config.max_length , 2_0)
self.assertEqual(loaded_config.max_time , __lowerCamelCase)
def _lowerCamelCase ( self) -> Optional[int]:
_A : Optional[int] = AutoConfig.from_pretrained("gpt2")
_A : int = GenerationConfig.from_model_config(__lowerCamelCase)
_A : List[Any] = GenerationConfig()
# The generation config has loaded a few non-default parameters from the model config
self.assertNotEqual(__lowerCamelCase , __lowerCamelCase)
# One of those parameters is eos_token_id -- check if it matches
self.assertNotEqual(generation_config_from_model.eos_token_id , default_generation_config.eos_token_id)
self.assertEqual(generation_config_from_model.eos_token_id , model_config.eos_token_id)
def _lowerCamelCase ( self) -> Optional[Any]:
_A : Optional[Any] = GenerationConfig()
_A : List[Any] = {
"max_new_tokens": 1_0_2_4,
"foo": "bar",
}
_A : List[str] = copy.deepcopy(__lowerCamelCase)
_A : int = generation_config.update(**__lowerCamelCase)
# update_kwargs was not modified (no side effects)
self.assertEqual(__lowerCamelCase , __lowerCamelCase)
# update_kwargs was used to update the config on valid attributes
self.assertEqual(generation_config.max_new_tokens , 1_0_2_4)
# `.update()` returns a dictionary of unused kwargs
self.assertEqual(__lowerCamelCase , {"foo": "bar"})
def _lowerCamelCase ( self) -> Any:
_A : int = GenerationConfig()
_A : int = "bar"
with tempfile.TemporaryDirectory("test-generation-config") as tmp_dir:
generation_config.save_pretrained(__lowerCamelCase)
_A : Any = GenerationConfig.from_pretrained(__lowerCamelCase)
# update_kwargs was used to update the config on valid attributes
self.assertEqual(new_config.foo , "bar")
_A : Optional[Any] = GenerationConfig.from_model_config(__lowerCamelCase)
assert not hasattr(__lowerCamelCase , "foo") # no new kwargs should be initialized if from config
def _lowerCamelCase ( self) -> List[str]:
_A : Union[str, Any] = GenerationConfig()
self.assertEqual(default_config.temperature , 1.0)
self.assertEqual(default_config.do_sample , __lowerCamelCase)
self.assertEqual(default_config.num_beams , 1)
_A : Optional[int] = GenerationConfig(
do_sample=__lowerCamelCase , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
self.assertEqual(config.temperature , 0.7)
self.assertEqual(config.do_sample , __lowerCamelCase)
self.assertEqual(config.num_beams , 1)
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(__lowerCamelCase)
_A : Optional[int] = GenerationConfig.from_pretrained(__lowerCamelCase , temperature=1.0)
self.assertEqual(loaded_config.temperature , 1.0)
self.assertEqual(loaded_config.do_sample , __lowerCamelCase)
self.assertEqual(loaded_config.num_beams , 1) # default value
@is_staging_test
class lowerCAmelCase__ ( unittest.TestCase):
'''simple docstring'''
@classmethod
def _lowerCamelCase ( cls) -> Optional[int]:
_A : Dict = TOKEN
HfFolder.save_token(__lowerCamelCase)
@classmethod
def _lowerCamelCase ( cls) -> List[Any]:
try:
delete_repo(token=cls._token , repo_id="test-generation-config")
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-generation-config-org")
except HTTPError:
pass
def _lowerCamelCase ( self) -> Any:
_A : Optional[int] = GenerationConfig(
do_sample=__lowerCamelCase , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub("test-generation-config" , use_auth_token=self._token)
_A : Union[str, Any] = GenerationConfig.from_pretrained(F"{USER}/test-generation-config")
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__lowerCamelCase , getattr(__lowerCamelCase , __lowerCamelCase))
# Reset repo
delete_repo(token=self._token , repo_id="test-generation-config")
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
__lowerCamelCase , repo_id="test-generation-config" , push_to_hub=__lowerCamelCase , use_auth_token=self._token)
_A : Optional[Any] = GenerationConfig.from_pretrained(F"{USER}/test-generation-config")
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__lowerCamelCase , getattr(__lowerCamelCase , __lowerCamelCase))
def _lowerCamelCase ( self) -> Union[str, Any]:
_A : Union[str, Any] = GenerationConfig(
do_sample=__lowerCamelCase , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub("valid_org/test-generation-config-org" , use_auth_token=self._token)
_A : int = GenerationConfig.from_pretrained("valid_org/test-generation-config-org")
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__lowerCamelCase , getattr(__lowerCamelCase , __lowerCamelCase))
# Reset repo
delete_repo(token=self._token , repo_id="valid_org/test-generation-config-org")
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
__lowerCamelCase , repo_id="valid_org/test-generation-config-org" , push_to_hub=__lowerCamelCase , use_auth_token=self._token)
_A : Optional[int] = GenerationConfig.from_pretrained("valid_org/test-generation-config-org")
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__lowerCamelCase , getattr(__lowerCamelCase , __lowerCamelCase))
| 11 | 0 |
"""simple docstring"""
import inspect
import tempfile
from collections import OrderedDict, UserDict
from collections.abc import MutableMapping
from contextlib import ExitStack, contextmanager
from dataclasses import fields
from enum import Enum
from typing import Any, ContextManager, List, Tuple
import numpy as np
from .import_utils import is_flax_available, is_tf_available, is_torch_available, is_torch_fx_proxy
if is_flax_available():
import jax.numpy as jnp
class snake_case ( __UpperCAmelCase ):
"""simple docstring"""
def __get__( self : str ,lowerCamelCase__ : Any ,lowerCamelCase__ : List[str]=None ):
# See docs.python.org/3/howto/descriptor.html#properties
if obj is None:
return self
if self.fget is None:
raise AttributeError('unreadable attribute' )
UpperCAmelCase__ = '__cached_' + self.fget.__name__
UpperCAmelCase__ = getattr(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ )
if cached is None:
UpperCAmelCase__ = self.fget(lowerCamelCase__ )
setattr(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ )
return cached
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = val.lower()
if val in {"y", "yes", "t", "true", "on", "1"}:
return 1
if val in {"n", "no", "f", "false", "off", "0"}:
return 0
raise ValueError(f'''invalid truth value {val!r}''' )
def a_ ( lowerCamelCase ):
if is_torch_fx_proxy(lowerCamelCase ):
return True
if is_torch_available():
import torch
if isinstance(lowerCamelCase , torch.Tensor ):
return True
if is_tf_available():
import tensorflow as tf
if isinstance(lowerCamelCase , tf.Tensor ):
return True
if is_flax_available():
import jax.numpy as jnp
from jax.core import Tracer
if isinstance(lowerCamelCase , (jnp.ndarray, Tracer) ):
return True
return isinstance(lowerCamelCase , np.ndarray )
def a_ ( lowerCamelCase ):
return isinstance(lowerCamelCase , np.ndarray )
def a_ ( lowerCamelCase ):
return _is_numpy(lowerCamelCase )
def a_ ( lowerCamelCase ):
import torch
return isinstance(lowerCamelCase , torch.Tensor )
def a_ ( lowerCamelCase ):
return False if not is_torch_available() else _is_torch(lowerCamelCase )
def a_ ( lowerCamelCase ):
import torch
return isinstance(lowerCamelCase , torch.device )
def a_ ( lowerCamelCase ):
return False if not is_torch_available() else _is_torch_device(lowerCamelCase )
def a_ ( lowerCamelCase ):
import torch
if isinstance(lowerCamelCase , lowerCamelCase ):
if hasattr(lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ = getattr(lowerCamelCase , lowerCamelCase )
else:
return False
return isinstance(lowerCamelCase , torch.dtype )
def a_ ( lowerCamelCase ):
return False if not is_torch_available() else _is_torch_dtype(lowerCamelCase )
def a_ ( lowerCamelCase ):
import tensorflow as tf
return isinstance(lowerCamelCase , tf.Tensor )
def a_ ( lowerCamelCase ):
return False if not is_tf_available() else _is_tensorflow(lowerCamelCase )
def a_ ( lowerCamelCase ):
import tensorflow as tf
# the `is_symbolic_tensor` predicate is only available starting with TF 2.14
if hasattr(lowerCamelCase , 'is_symbolic_tensor' ):
return tf.is_symbolic_tensor(lowerCamelCase )
return type(lowerCamelCase ) == tf.Tensor
def a_ ( lowerCamelCase ):
return False if not is_tf_available() else _is_tf_symbolic_tensor(lowerCamelCase )
def a_ ( lowerCamelCase ):
import jax.numpy as jnp # noqa: F811
return isinstance(lowerCamelCase , jnp.ndarray )
def a_ ( lowerCamelCase ):
return False if not is_flax_available() else _is_jax(lowerCamelCase )
def a_ ( lowerCamelCase ):
if isinstance(lowerCamelCase , (dict, UserDict) ):
return {k: to_py_obj(lowerCamelCase ) for k, v in obj.items()}
elif isinstance(lowerCamelCase , (list, tuple) ):
return [to_py_obj(lowerCamelCase ) for o in obj]
elif is_tf_tensor(lowerCamelCase ):
return obj.numpy().tolist()
elif is_torch_tensor(lowerCamelCase ):
return obj.detach().cpu().tolist()
elif is_jax_tensor(lowerCamelCase ):
return np.asarray(lowerCamelCase ).tolist()
elif isinstance(lowerCamelCase , (np.ndarray, np.number) ): # tolist also works on 0d np arrays
return obj.tolist()
else:
return obj
def a_ ( lowerCamelCase ):
if isinstance(lowerCamelCase , (dict, UserDict) ):
return {k: to_numpy(lowerCamelCase ) for k, v in obj.items()}
elif isinstance(lowerCamelCase , (list, tuple) ):
return np.array(lowerCamelCase )
elif is_tf_tensor(lowerCamelCase ):
return obj.numpy()
elif is_torch_tensor(lowerCamelCase ):
return obj.detach().cpu().numpy()
elif is_jax_tensor(lowerCamelCase ):
return np.asarray(lowerCamelCase )
else:
return obj
class snake_case ( __UpperCAmelCase ):
"""simple docstring"""
def __lowerCAmelCase ( self : List[Any] ):
UpperCAmelCase__ = fields(self )
# Safety and consistency checks
if not len(lowerCamelCase__ ):
raise ValueError(f'''{self.__class__.__name__} has no fields.''' )
if not all(field.default is None for field in class_fields[1:] ):
raise ValueError(f'''{self.__class__.__name__} should not have more than one required field.''' )
UpperCAmelCase__ = getattr(self ,class_fields[0].name )
UpperCAmelCase__ = all(getattr(self ,field.name ) is None for field in class_fields[1:] )
if other_fields_are_none and not is_tensor(lowerCamelCase__ ):
if isinstance(lowerCamelCase__ ,lowerCamelCase__ ):
UpperCAmelCase__ = first_field.items()
UpperCAmelCase__ = True
else:
try:
UpperCAmelCase__ = iter(lowerCamelCase__ )
UpperCAmelCase__ = True
except TypeError:
UpperCAmelCase__ = False
# if we provided an iterator as first field and the iterator is a (key, value) iterator
# set the associated fields
if first_field_iterator:
for idx, element in enumerate(lowerCamelCase__ ):
if (
not isinstance(lowerCamelCase__ ,(list, tuple) )
or not len(lowerCamelCase__ ) == 2
or not isinstance(element[0] ,lowerCamelCase__ )
):
if idx == 0:
# If we do not have an iterator of key/values, set it as attribute
UpperCAmelCase__ = first_field
else:
# If we have a mixed iterator, raise an error
raise ValueError(
f'''Cannot set key/value for {element}. It needs to be a tuple (key, value).''' )
break
setattr(self ,element[0] ,element[1] )
if element[1] is not None:
UpperCAmelCase__ = element[1]
elif first_field is not None:
UpperCAmelCase__ = first_field
else:
for field in class_fields:
UpperCAmelCase__ = getattr(self ,field.name )
if v is not None:
UpperCAmelCase__ = v
def __delitem__( self : Union[str, Any] ,*lowerCamelCase__ : List[str] ,**lowerCamelCase__ : Dict ):
raise Exception(f'''You cannot use ``__delitem__`` on a {self.__class__.__name__} instance.''' )
def __lowerCAmelCase ( self : Optional[int] ,*lowerCamelCase__ : List[str] ,**lowerCamelCase__ : str ):
raise Exception(f'''You cannot use ``setdefault`` on a {self.__class__.__name__} instance.''' )
def __lowerCAmelCase ( self : str ,*lowerCamelCase__ : Any ,**lowerCamelCase__ : Union[str, Any] ):
raise Exception(f'''You cannot use ``pop`` on a {self.__class__.__name__} instance.''' )
def __lowerCAmelCase ( self : str ,*lowerCamelCase__ : Optional[Any] ,**lowerCamelCase__ : List[Any] ):
raise Exception(f'''You cannot use ``update`` on a {self.__class__.__name__} instance.''' )
def __getitem__( self : List[str] ,lowerCamelCase__ : List[Any] ):
if isinstance(lowerCamelCase__ ,lowerCamelCase__ ):
UpperCAmelCase__ = dict(self.items() )
return inner_dict[k]
else:
return self.to_tuple()[k]
def __setattr__( self : Optional[int] ,lowerCamelCase__ : str ,lowerCamelCase__ : Optional[int] ):
if name in self.keys() and value is not None:
# Don't call self.__setitem__ to avoid recursion errors
super().__setitem__(lowerCamelCase__ ,lowerCamelCase__ )
super().__setattr__(lowerCamelCase__ ,lowerCamelCase__ )
def __setitem__( self : Optional[Any] ,lowerCamelCase__ : str ,lowerCamelCase__ : Dict ):
# Will raise a KeyException if needed
super().__setitem__(lowerCamelCase__ ,lowerCamelCase__ )
# Don't call self.__setattr__ to avoid recursion errors
super().__setattr__(lowerCamelCase__ ,lowerCamelCase__ )
def __lowerCAmelCase ( self : Optional[int] ):
return tuple(self[k] for k in self.keys() )
class snake_case ( __UpperCAmelCase , __UpperCAmelCase ):
"""simple docstring"""
@classmethod
def __lowerCAmelCase ( cls : List[str] ,lowerCamelCase__ : List[Any] ):
raise ValueError(
f'''{value} is not a valid {cls.__name__}, please select one of {list(cls._valueamember_map_.keys() )}''' )
class snake_case ( __UpperCAmelCase ):
"""simple docstring"""
snake_case__ = "longest"
snake_case__ = "max_length"
snake_case__ = "do_not_pad"
class snake_case ( __UpperCAmelCase ):
"""simple docstring"""
snake_case__ = "pt"
snake_case__ = "tf"
snake_case__ = "np"
snake_case__ = "jax"
class snake_case :
"""simple docstring"""
def __init__( self : Tuple ,lowerCamelCase__ : List[ContextManager] ):
UpperCAmelCase__ = context_managers
UpperCAmelCase__ = ExitStack()
def __enter__( self : Union[str, Any] ):
for context_manager in self.context_managers:
self.stack.enter_context(lowerCamelCase__ )
def __exit__( self : Union[str, Any] ,*lowerCamelCase__ : Optional[Any] ,**lowerCamelCase__ : Dict ):
self.stack.__exit__(*lowerCamelCase__ ,**lowerCamelCase__ )
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = infer_framework(lowerCamelCase )
if framework == "tf":
UpperCAmelCase__ = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
UpperCAmelCase__ = inspect.signature(model_class.forward ) # PyTorch models
else:
UpperCAmelCase__ = inspect.signature(model_class.__call__ ) # Flax models
for p in signature.parameters:
if p == "return_loss" and signature.parameters[p].default is True:
return True
return False
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = model_class.__name__
UpperCAmelCase__ = infer_framework(lowerCamelCase )
if framework == "tf":
UpperCAmelCase__ = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
UpperCAmelCase__ = inspect.signature(model_class.forward ) # PyTorch models
else:
UpperCAmelCase__ = inspect.signature(model_class.__call__ ) # Flax models
if "QuestionAnswering" in model_name:
return [p for p in signature.parameters if "label" in p or p in ("start_positions", "end_positions")]
else:
return [p for p in signature.parameters if "label" in p]
def a_ ( lowerCamelCase , lowerCamelCase = "" , lowerCamelCase = "." ):
def _flatten_dict(lowerCamelCase , lowerCamelCase="" , lowerCamelCase="." ):
for k, v in d.items():
UpperCAmelCase__ = str(lowerCamelCase ) + delimiter + str(lowerCamelCase ) if parent_key else k
if v and isinstance(lowerCamelCase , lowerCamelCase ):
yield from flatten_dict(lowerCamelCase , lowerCamelCase , delimiter=lowerCamelCase ).items()
else:
yield key, v
return dict(_flatten_dict(lowerCamelCase , lowerCamelCase , lowerCamelCase ) )
@contextmanager
def a_ ( lowerCamelCase , lowerCamelCase = False ):
if use_temp_dir:
with tempfile.TemporaryDirectory() as tmp_dir:
yield tmp_dir
else:
yield working_dir
def a_ ( lowerCamelCase , lowerCamelCase=None ):
if is_numpy_array(lowerCamelCase ):
return np.transpose(lowerCamelCase , axes=lowerCamelCase )
elif is_torch_tensor(lowerCamelCase ):
return array.T if axes is None else array.permute(*lowerCamelCase )
elif is_tf_tensor(lowerCamelCase ):
import tensorflow as tf
return tf.transpose(lowerCamelCase , perm=lowerCamelCase )
elif is_jax_tensor(lowerCamelCase ):
return jnp.transpose(lowerCamelCase , axes=lowerCamelCase )
else:
raise ValueError(f'''Type not supported for transpose: {type(lowerCamelCase )}.''' )
def a_ ( lowerCamelCase , lowerCamelCase ):
if is_numpy_array(lowerCamelCase ):
return np.reshape(lowerCamelCase , lowerCamelCase )
elif is_torch_tensor(lowerCamelCase ):
return array.reshape(*lowerCamelCase )
elif is_tf_tensor(lowerCamelCase ):
import tensorflow as tf
return tf.reshape(lowerCamelCase , lowerCamelCase )
elif is_jax_tensor(lowerCamelCase ):
return jnp.reshape(lowerCamelCase , lowerCamelCase )
else:
raise ValueError(f'''Type not supported for reshape: {type(lowerCamelCase )}.''' )
def a_ ( lowerCamelCase , lowerCamelCase=None ):
if is_numpy_array(lowerCamelCase ):
return np.squeeze(lowerCamelCase , axis=lowerCamelCase )
elif is_torch_tensor(lowerCamelCase ):
return array.squeeze() if axis is None else array.squeeze(dim=lowerCamelCase )
elif is_tf_tensor(lowerCamelCase ):
import tensorflow as tf
return tf.squeeze(lowerCamelCase , axis=lowerCamelCase )
elif is_jax_tensor(lowerCamelCase ):
return jnp.squeeze(lowerCamelCase , axis=lowerCamelCase )
else:
raise ValueError(f'''Type not supported for squeeze: {type(lowerCamelCase )}.''' )
def a_ ( lowerCamelCase , lowerCamelCase ):
if is_numpy_array(lowerCamelCase ):
return np.expand_dims(lowerCamelCase , lowerCamelCase )
elif is_torch_tensor(lowerCamelCase ):
return array.unsqueeze(dim=lowerCamelCase )
elif is_tf_tensor(lowerCamelCase ):
import tensorflow as tf
return tf.expand_dims(lowerCamelCase , axis=lowerCamelCase )
elif is_jax_tensor(lowerCamelCase ):
return jnp.expand_dims(lowerCamelCase , axis=lowerCamelCase )
else:
raise ValueError(f'''Type not supported for expand_dims: {type(lowerCamelCase )}.''' )
def a_ ( lowerCamelCase ):
if is_numpy_array(lowerCamelCase ):
return np.size(lowerCamelCase )
elif is_torch_tensor(lowerCamelCase ):
return array.numel()
elif is_tf_tensor(lowerCamelCase ):
import tensorflow as tf
return tf.size(lowerCamelCase )
elif is_jax_tensor(lowerCamelCase ):
return array.size
else:
raise ValueError(f'''Type not supported for expand_dims: {type(lowerCamelCase )}.''' )
def a_ ( lowerCamelCase , lowerCamelCase ):
for key, value in auto_map.items():
if isinstance(lowerCamelCase , (tuple, list) ):
UpperCAmelCase__ = [f'''{repo_id}--{v}''' if (v is not None and '--' not in v) else v for v in value]
elif value is not None and "--" not in value:
UpperCAmelCase__ = f'''{repo_id}--{value}'''
return auto_map
def a_ ( lowerCamelCase ):
for base_class in inspect.getmro(lowerCamelCase ):
UpperCAmelCase__ = base_class.__module__
UpperCAmelCase__ = base_class.__name__
if module.startswith('tensorflow' ) or module.startswith('keras' ) or name == "TFPreTrainedModel":
return "tf"
elif module.startswith('torch' ) or name == "PreTrainedModel":
return "pt"
elif module.startswith('flax' ) or module.startswith('jax' ) or name == "FlaxPreTrainedModel":
return "flax"
else:
raise TypeError(f'''Could not infer framework from class {model_class}.''' )
| 98 |
import pickle
import numpy as np
from matplotlib import pyplot as plt
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=0.2 , __lowerCamelCase=0.2) -> str:
_A : Optional[int] = bp_numa
_A : Dict = bp_numa
_A : Tuple = bp_numa
_A : List[str] = conva_get[:2]
_A : Tuple = conva_get[2]
_A : Optional[int] = size_pa
_A : Optional[Any] = rate_w
_A : Optional[Any] = rate_t
_A : Union[str, Any] = [
np.mat(-1 * np.random.rand(self.conva[0] , self.conva[0]) + 0.5)
for i in range(self.conva[1])
]
_A : int = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa) + 0.5)
_A : Dict = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa) + 0.5)
_A : Any = -2 * np.random.rand(self.conva[1]) + 1
_A : Optional[int] = -2 * np.random.rand(self.num_bpa) + 1
_A : Optional[Any] = -2 * np.random.rand(self.num_bpa) + 1
def _lowerCamelCase ( self , __lowerCamelCase) -> Dict:
# save model dict with pickle
_A : Dict = {
"num_bp1": self.num_bpa,
"num_bp2": self.num_bpa,
"num_bp3": self.num_bpa,
"conv1": self.conva,
"step_conv1": self.step_conva,
"size_pooling1": self.size_poolinga,
"rate_weight": self.rate_weight,
"rate_thre": self.rate_thre,
"w_conv1": self.w_conva,
"wkj": self.wkj,
"vji": self.vji,
"thre_conv1": self.thre_conva,
"thre_bp2": self.thre_bpa,
"thre_bp3": self.thre_bpa,
}
with open(__lowerCamelCase , "wb") as f:
pickle.dump(__lowerCamelCase , __lowerCamelCase)
print(F"Model saved: {save_path}")
@classmethod
def _lowerCamelCase ( cls , __lowerCamelCase) -> Any:
# read saved model
with open(__lowerCamelCase , "rb") as f:
_A : Any = pickle.load(__lowerCamelCase) # noqa: S301
_A : Optional[int] = model_dic.get("conv1")
conv_get.append(model_dic.get("step_conv1"))
_A : str = model_dic.get("size_pooling1")
_A : List[str] = model_dic.get("num_bp1")
_A : Union[str, Any] = model_dic.get("num_bp2")
_A : List[Any] = model_dic.get("num_bp3")
_A : Dict = model_dic.get("rate_weight")
_A : List[Any] = model_dic.get("rate_thre")
# create model instance
_A : str = CNN(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase)
# modify model parameter
_A : List[Any] = model_dic.get("w_conv1")
_A : Union[str, Any] = model_dic.get("wkj")
_A : str = model_dic.get("vji")
_A : List[str] = model_dic.get("thre_conv1")
_A : Optional[Any] = model_dic.get("thre_bp2")
_A : Dict = model_dic.get("thre_bp3")
return conv_ins
def _lowerCamelCase ( self , __lowerCamelCase) -> Dict:
return 1 / (1 + np.exp(-1 * x))
def _lowerCamelCase ( self , __lowerCamelCase) -> Union[str, Any]:
return round(__lowerCamelCase , 3)
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase) -> Union[str, Any]:
# convolution process
_A : Tuple = convs[0]
_A : Union[str, Any] = convs[1]
_A : List[Any] = np.shape(__lowerCamelCase)[0]
# get the data slice of original image data, data_focus
_A : Tuple = []
for i_focus in range(0 , size_data - size_conv + 1 , __lowerCamelCase):
for j_focus in range(0 , size_data - size_conv + 1 , __lowerCamelCase):
_A : Optional[int] = data[
i_focus : i_focus + size_conv, j_focus : j_focus + size_conv
]
data_focus.append(__lowerCamelCase)
# calculate the feature map of every single kernel, and saved as list of matrix
_A : Optional[Any] = []
_A : Optional[int] = int((size_data - size_conv) / conv_step + 1)
for i_map in range(__lowerCamelCase):
_A : Optional[int] = []
for i_focus in range(len(__lowerCamelCase)):
_A : Any = (
np.sum(np.multiply(data_focus[i_focus] , w_convs[i_map]))
- thre_convs[i_map]
)
featuremap.append(self.sig(__lowerCamelCase))
_A : Optional[Any] = np.asmatrix(__lowerCamelCase).reshape(
__lowerCamelCase , __lowerCamelCase)
data_featuremap.append(__lowerCamelCase)
# expanding the data slice to One dimenssion
_A : Optional[Any] = []
for each_focus in data_focus:
focusa_list.extend(self.Expand_Mat(__lowerCamelCase))
_A : Dict = np.asarray(__lowerCamelCase)
return focus_list, data_featuremap
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase="average_pool") -> Dict:
# pooling process
_A : Optional[Any] = len(featuremaps[0])
_A : str = int(size_map / size_pooling)
_A : Optional[int] = []
for i_map in range(len(__lowerCamelCase)):
_A : int = featuremaps[i_map]
_A : Optional[int] = []
for i_focus in range(0 , __lowerCamelCase , __lowerCamelCase):
for j_focus in range(0 , __lowerCamelCase , __lowerCamelCase):
_A : str = feature_map[
i_focus : i_focus + size_pooling,
j_focus : j_focus + size_pooling,
]
if pooling_type == "average_pool":
# average pooling
map_pooled.append(np.average(__lowerCamelCase))
elif pooling_type == "max_pooling":
# max pooling
map_pooled.append(np.max(__lowerCamelCase))
_A : Tuple = np.asmatrix(__lowerCamelCase).reshape(__lowerCamelCase , __lowerCamelCase)
featuremap_pooled.append(__lowerCamelCase)
return featuremap_pooled
def _lowerCamelCase ( self , __lowerCamelCase) -> Tuple:
# expanding three dimension data to one dimension list
_A : Tuple = []
for i in range(len(__lowerCamelCase)):
_A : Union[str, Any] = np.shape(data[i])
_A : List[Any] = data[i].reshape(1 , shapes[0] * shapes[1])
_A : Optional[Any] = data_listed.getA().tolist()[0]
data_expanded.extend(__lowerCamelCase)
_A : Optional[Any] = np.asarray(__lowerCamelCase)
return data_expanded
def _lowerCamelCase ( self , __lowerCamelCase) -> Union[str, Any]:
# expanding matrix to one dimension list
_A : List[Any] = np.asarray(__lowerCamelCase)
_A : Union[str, Any] = np.shape(__lowerCamelCase)
_A : Dict = data_mat.reshape(1 , shapes[0] * shapes[1])
return data_expanded
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase) -> Optional[int]:
_A : Dict = []
_A : Any = 0
for i_map in range(__lowerCamelCase):
_A : Union[str, Any] = np.ones((size_map, size_map))
for i in range(0 , __lowerCamelCase , __lowerCamelCase):
for j in range(0 , __lowerCamelCase , __lowerCamelCase):
_A : List[Any] = pd_pool[
i_pool
]
_A : Tuple = i_pool + 1
_A : Optional[Any] = np.multiply(
__lowerCamelCase , np.multiply(out_map[i_map] , (1 - out_map[i_map])))
pd_all.append(__lowerCamelCase)
return pd_all
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=bool) -> Union[str, Any]:
# model traning
print("----------------------Start Training-------------------------")
print((" - - Shape: Train_Data ", np.shape(__lowerCamelCase)))
print((" - - Shape: Teach_Data ", np.shape(__lowerCamelCase)))
_A : Tuple = 0
_A : Dict = []
_A : Optional[Any] = 1_0_0_0_0
while rp < n_repeat and mse >= error_accuracy:
_A : Union[str, Any] = 0
print(F"-------------Learning Time {rp}--------------")
for p in range(len(__lowerCamelCase)):
# print('------------Learning Image: %d--------------'%p)
_A : str = np.asmatrix(datas_train[p])
_A : Union[str, Any] = np.asarray(datas_teach[p])
_A , _A : Any = self.convolute(
__lowerCamelCase , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
_A : Optional[Any] = self.pooling(__lowerCamelCase , self.size_poolinga)
_A : Optional[int] = np.shape(__lowerCamelCase)
_A : List[str] = self._expand(__lowerCamelCase)
_A : Tuple = data_bp_input
_A : int = np.dot(__lowerCamelCase , self.vji.T) - self.thre_bpa
_A : List[Any] = self.sig(__lowerCamelCase)
_A : Union[str, Any] = np.dot(__lowerCamelCase , self.wkj.T) - self.thre_bpa
_A : List[str] = self.sig(__lowerCamelCase)
# --------------Model Leaning ------------------------
# calculate error and gradient---------------
_A : int = np.multiply(
(data_teach - bp_outa) , np.multiply(__lowerCamelCase , (1 - bp_outa)))
_A : Optional[Any] = np.multiply(
np.dot(__lowerCamelCase , self.wkj) , np.multiply(__lowerCamelCase , (1 - bp_outa)))
_A : Union[str, Any] = np.dot(__lowerCamelCase , self.vji)
_A : Any = pd_i_all / (self.size_poolinga * self.size_poolinga)
_A : Dict = pd_conva_pooled.T.getA().tolist()
_A : Optional[Any] = self._calculate_gradient_from_pool(
__lowerCamelCase , __lowerCamelCase , shape_featuremapa[0] , shape_featuremapa[1] , self.size_poolinga , )
# weight and threshold learning process---------
# convolution layer
for k_conv in range(self.conva[1]):
_A : int = self._expand_mat(pd_conva_all[k_conv])
_A : Optional[int] = self.rate_weight * np.dot(__lowerCamelCase , __lowerCamelCase)
_A : List[Any] = self.w_conva[k_conv] + delta_w.reshape(
(self.conva[0], self.conva[0]))
_A : Any = (
self.thre_conva[k_conv]
- np.sum(pd_conva_all[k_conv]) * self.rate_thre
)
# all connected layer
_A : Tuple = self.wkj + pd_k_all.T * bp_outa * self.rate_weight
_A : int = self.vji + pd_j_all.T * bp_outa * self.rate_weight
_A : Tuple = self.thre_bpa - pd_k_all * self.rate_thre
_A : List[str] = self.thre_bpa - pd_j_all * self.rate_thre
# calculate the sum error of all single image
_A : Optional[int] = np.sum(abs(data_teach - bp_outa))
error_count += errors
# print(' ----Teach ',data_teach)
# print(' ----BP_output ',bp_out3)
_A : Any = rp + 1
_A : Dict = error_count / patterns
all_mse.append(__lowerCamelCase)
def draw_error():
_A : Optional[int] = [error_accuracy for i in range(int(n_repeat * 1.2))]
plt.plot(__lowerCamelCase , "+-")
plt.plot(__lowerCamelCase , "r--")
plt.xlabel("Learning Times")
plt.ylabel("All_mse")
plt.grid(__lowerCamelCase , alpha=0.5)
plt.show()
print("------------------Training Complished---------------------")
print((" - - Training epoch: ", rp, F" - - Mse: {mse:.6f}"))
if draw_e:
draw_error()
return mse
def _lowerCamelCase ( self , __lowerCamelCase) -> int:
# model predict
_A : Union[str, Any] = []
print("-------------------Start Testing-------------------------")
print((" - - Shape: Test_Data ", np.shape(__lowerCamelCase)))
for p in range(len(__lowerCamelCase)):
_A : int = np.asmatrix(datas_test[p])
_A , _A : List[Any] = self.convolute(
__lowerCamelCase , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
_A : str = self.pooling(__lowerCamelCase , self.size_poolinga)
_A : Optional[int] = self._expand(__lowerCamelCase)
_A : List[Any] = data_bp_input
_A : Optional[int] = bp_outa * self.vji.T - self.thre_bpa
_A : int = self.sig(__lowerCamelCase)
_A : int = bp_outa * self.wkj.T - self.thre_bpa
_A : Optional[int] = self.sig(__lowerCamelCase)
produce_out.extend(bp_outa.getA().tolist())
_A : int = [list(map(self.do_round , __lowerCamelCase)) for each in produce_out]
return np.asarray(__lowerCamelCase)
def _lowerCamelCase ( self , __lowerCamelCase) -> Dict:
# return the data of image after convoluting process so we can check it out
_A : Optional[int] = np.asmatrix(__lowerCamelCase)
_A , _A : Tuple = self.convolute(
__lowerCamelCase , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
_A : Union[str, Any] = self.pooling(__lowerCamelCase , self.size_poolinga)
return data_conveda, data_pooleda
if __name__ == "__main__":
pass
| 11 | 0 |
from __future__ import annotations
import unittest
from transformers import is_tf_available, is_torch_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, is_pt_tf_cross_test, slow
if is_tf_available():
from transformers import (
AutoConfig,
BertConfig,
GPTaConfig,
TaConfig,
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
if is_torch_available():
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelWithLMHead,
BertForMaskedLM,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertModel,
GPTaLMHeadModel,
RobertaForMaskedLM,
TaForConditionalGeneration,
)
@is_pt_tf_cross_test
class A__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def __lowercase ( self) -> List[Any]:
'''simple docstring'''
for model_name in ["bert-base-uncased"]:
a__ : str = AutoConfig.from_pretrained(lowercase)
self.assertIsNotNone(lowercase)
self.assertIsInstance(lowercase , lowercase)
a__ : str = TFAutoModel.from_pretrained(lowercase , from_pt=lowercase)
self.assertIsNotNone(lowercase)
self.assertIsInstance(lowercase , lowercase)
a__ : Optional[Any] = AutoModel.from_pretrained(lowercase , from_tf=lowercase)
self.assertIsNotNone(lowercase)
self.assertIsInstance(lowercase , lowercase)
@slow
def __lowercase ( self) -> List[Any]:
'''simple docstring'''
for model_name in ["bert-base-uncased"]:
a__ : List[str] = AutoConfig.from_pretrained(lowercase)
self.assertIsNotNone(lowercase)
self.assertIsInstance(lowercase , lowercase)
a__ : Any = TFAutoModelForPreTraining.from_pretrained(lowercase , from_pt=lowercase)
self.assertIsNotNone(lowercase)
self.assertIsInstance(lowercase , lowercase)
a__ : Optional[int] = AutoModelForPreTraining.from_pretrained(lowercase , from_tf=lowercase)
self.assertIsNotNone(lowercase)
self.assertIsInstance(lowercase , lowercase)
@slow
def __lowercase ( self) -> str:
'''simple docstring'''
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a__ : Optional[Any] = AutoConfig.from_pretrained(lowercase)
self.assertIsNotNone(lowercase)
self.assertIsInstance(lowercase , lowercase)
a__ : List[Any] = TFAutoModelForCausalLM.from_pretrained(lowercase , from_pt=lowercase)
a__ , a__ : Tuple = TFAutoModelForCausalLM.from_pretrained(
lowercase , output_loading_info=lowercase , from_pt=lowercase)
self.assertIsNotNone(lowercase)
self.assertIsInstance(lowercase , lowercase)
a__ : Tuple = AutoModelForCausalLM.from_pretrained(lowercase , from_tf=lowercase)
a__ , a__ : Any = AutoModelForCausalLM.from_pretrained(
lowercase , output_loading_info=lowercase , from_tf=lowercase)
self.assertIsNotNone(lowercase)
self.assertIsInstance(lowercase , lowercase)
@slow
def __lowercase ( self) -> List[Any]:
'''simple docstring'''
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a__ : Optional[Any] = AutoConfig.from_pretrained(lowercase)
self.assertIsNotNone(lowercase)
self.assertIsInstance(lowercase , lowercase)
a__ : str = TFAutoModelWithLMHead.from_pretrained(lowercase , from_pt=lowercase)
self.assertIsNotNone(lowercase)
self.assertIsInstance(lowercase , lowercase)
a__ : int = AutoModelWithLMHead.from_pretrained(lowercase , from_tf=lowercase)
self.assertIsNotNone(lowercase)
self.assertIsInstance(lowercase , lowercase)
@slow
def __lowercase ( self) -> List[str]:
'''simple docstring'''
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a__ : str = AutoConfig.from_pretrained(lowercase)
self.assertIsNotNone(lowercase)
self.assertIsInstance(lowercase , lowercase)
a__ : List[str] = TFAutoModelForMaskedLM.from_pretrained(lowercase , from_pt=lowercase)
a__ , a__ : Optional[Any] = TFAutoModelForMaskedLM.from_pretrained(
lowercase , output_loading_info=lowercase , from_pt=lowercase)
self.assertIsNotNone(lowercase)
self.assertIsInstance(lowercase , lowercase)
a__ : Tuple = AutoModelForMaskedLM.from_pretrained(lowercase , from_tf=lowercase)
a__ , a__ : Optional[int] = AutoModelForMaskedLM.from_pretrained(
lowercase , output_loading_info=lowercase , from_tf=lowercase)
self.assertIsNotNone(lowercase)
self.assertIsInstance(lowercase , lowercase)
@slow
def __lowercase ( self) -> List[str]:
'''simple docstring'''
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a__ : Optional[Any] = AutoConfig.from_pretrained(lowercase)
self.assertIsNotNone(lowercase)
self.assertIsInstance(lowercase , lowercase)
a__ : Optional[int] = TFAutoModelForSeqaSeqLM.from_pretrained(lowercase , from_pt=lowercase)
a__ , a__ : List[str] = TFAutoModelForSeqaSeqLM.from_pretrained(
lowercase , output_loading_info=lowercase , from_pt=lowercase)
self.assertIsNotNone(lowercase)
self.assertIsInstance(lowercase , lowercase)
a__ : Union[str, Any] = AutoModelForSeqaSeqLM.from_pretrained(lowercase , from_tf=lowercase)
a__ , a__ : Optional[int] = AutoModelForSeqaSeqLM.from_pretrained(
lowercase , output_loading_info=lowercase , from_tf=lowercase)
self.assertIsNotNone(lowercase)
self.assertIsInstance(lowercase , lowercase)
@slow
def __lowercase ( self) -> List[str]:
'''simple docstring'''
for model_name in ["bert-base-uncased"]:
a__ : Optional[Any] = AutoConfig.from_pretrained(lowercase)
self.assertIsNotNone(lowercase)
self.assertIsInstance(lowercase , lowercase)
a__ : int = TFAutoModelForSequenceClassification.from_pretrained(lowercase , from_pt=lowercase)
self.assertIsNotNone(lowercase)
self.assertIsInstance(lowercase , lowercase)
a__ : Any = AutoModelForSequenceClassification.from_pretrained(lowercase , from_tf=lowercase)
self.assertIsNotNone(lowercase)
self.assertIsInstance(lowercase , lowercase)
@slow
def __lowercase ( self) -> Optional[int]:
'''simple docstring'''
for model_name in ["bert-base-uncased"]:
a__ : List[Any] = AutoConfig.from_pretrained(lowercase)
self.assertIsNotNone(lowercase)
self.assertIsInstance(lowercase , lowercase)
a__ : Optional[int] = TFAutoModelForQuestionAnswering.from_pretrained(lowercase , from_pt=lowercase)
self.assertIsNotNone(lowercase)
self.assertIsInstance(lowercase , lowercase)
a__ : str = AutoModelForQuestionAnswering.from_pretrained(lowercase , from_tf=lowercase)
self.assertIsNotNone(lowercase)
self.assertIsInstance(lowercase , lowercase)
def __lowercase ( self) -> List[str]:
'''simple docstring'''
a__ : Tuple = TFAutoModelWithLMHead.from_pretrained(lowercase , from_pt=lowercase)
self.assertIsInstance(lowercase , lowercase)
self.assertEqual(model.num_parameters() , 1_4410)
self.assertEqual(model.num_parameters(only_trainable=lowercase) , 1_4410)
a__ : Any = AutoModelWithLMHead.from_pretrained(lowercase , from_tf=lowercase)
self.assertIsInstance(lowercase , lowercase)
self.assertEqual(model.num_parameters() , 1_4410)
self.assertEqual(model.num_parameters(only_trainable=lowercase) , 1_4410)
def __lowercase ( self) -> Tuple:
'''simple docstring'''
a__ : List[str] = TFAutoModelWithLMHead.from_pretrained(lowercase , from_pt=lowercase)
self.assertIsInstance(lowercase , lowercase)
self.assertEqual(model.num_parameters() , 1_4410)
self.assertEqual(model.num_parameters(only_trainable=lowercase) , 1_4410)
a__ : int = AutoModelWithLMHead.from_pretrained(lowercase , from_tf=lowercase)
self.assertIsInstance(lowercase , lowercase)
self.assertEqual(model.num_parameters() , 1_4410)
self.assertEqual(model.num_parameters(only_trainable=lowercase) , 1_4410)
| 99 |
import re
from flax.core.frozen_dict import freeze
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.experimental import PartitionSpec as P
# Sentinels
lowerCAmelCase__ = object()
# For specifying empty leaf dict `{}`
lowerCAmelCase__ = object()
def _UpperCAmelCase (UpperCamelCase__ : str , UpperCamelCase__ : Union[str, Any] ):
_A : str = tuple((re.compile(x + "$" ) for x in qs) )
for i in range(len(UpperCamelCase__ ) - len(UpperCamelCase__ ) + 1 ):
_A : Tuple = [x.match(UpperCamelCase__ ) for x, y in zip(UpperCamelCase__ , ks[i:] )]
if matches and all(UpperCamelCase__ ):
return True
return False
def _UpperCAmelCase (UpperCamelCase__ : str ):
def replace(UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[int] ):
for rule, replacement in rules:
if _match(UpperCamelCase__ , UpperCamelCase__ ):
return replacement
return val
return replace
def _UpperCAmelCase ():
return [
# embeddings
(("transformer", "wpe", "embedding"), P("mp" , UpperCamelCase__ )),
(("transformer", "wte", "embedding"), P("mp" , UpperCamelCase__ )),
# atention
(("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(UpperCamelCase__ , "mp" )),
(("attention", "out_proj", "kernel"), P("mp" , UpperCamelCase__ )),
(("attention", "out_proj", "bias"), None),
# mlp
(("mlp", "c_fc", "kernel"), P(UpperCamelCase__ , "mp" )),
(("mlp", "c_fc", "bias"), P("mp" )),
(("mlp", "c_proj", "kernel"), P("mp" , UpperCamelCase__ )),
(("mlp", "c_proj", "bias"), None),
# layer norms
((r"ln_\d+", "bias"), None),
((r"\d+", r"ln_\d+", "scale"), None),
(("ln_f", "bias"), None),
(("ln_f", "scale"), None),
]
def _UpperCAmelCase (UpperCamelCase__ : List[str] ):
_A : int = _get_partition_rules()
_A : Optional[int] = _replacement_rules(UpperCamelCase__ )
_A : Optional[int] = {k: _unmatched for k in flatten_dict(UpperCamelCase__ )}
_A : List[str] = {k: replace(UpperCamelCase__ , UpperCamelCase__ ) for k, v in initd.items()}
assert _unmatched not in result.values(), "Incomplete partition spec."
return freeze(unflatten_dict(UpperCamelCase__ ) )
| 11 | 0 |
"""simple docstring"""
import re
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class SCREAMING_SNAKE_CASE_ ( __a ):
"""simple docstring"""
__lowercase : str = ['''image_processor''', '''tokenizer''']
__lowercase : Optional[Any] = '''AutoImageProcessor'''
__lowercase : int = '''AutoTokenizer'''
def __init__( self , lowerCAmelCase__=None , lowerCAmelCase__=None , **lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , lowerCAmelCase__ , )
__SCREAMING_SNAKE_CASE = kwargs.pop("""feature_extractor""")
__SCREAMING_SNAKE_CASE = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""")
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""")
super().__init__(lowerCAmelCase__ , lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = self.image_processor
__SCREAMING_SNAKE_CASE = False
def __call__( self , *lowerCAmelCase__ , **lowerCAmelCase__):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*lowerCAmelCase__ , **lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = kwargs.pop("""images""" , lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = kwargs.pop("""text""" , lowerCAmelCase__)
if len(lowerCAmelCase__) > 0:
__SCREAMING_SNAKE_CASE = args[0]
__SCREAMING_SNAKE_CASE = args[1:]
if images is None and text is None:
raise ValueError("""You need to specify either an `images` or `text` input to process.""")
if images is not None:
__SCREAMING_SNAKE_CASE = self.image_processor(lowerCAmelCase__ , *lowerCAmelCase__ , **lowerCAmelCase__)
if text is not None:
__SCREAMING_SNAKE_CASE = self.tokenizer(lowerCAmelCase__ , **lowerCAmelCase__)
if text is None:
return inputs
elif images is None:
return encodings
else:
__SCREAMING_SNAKE_CASE = encodings["""input_ids"""]
return inputs
def snake_case_ ( self , *lowerCAmelCase__ , **lowerCAmelCase__):
return self.tokenizer.batch_decode(*lowerCAmelCase__ , **lowerCAmelCase__)
def snake_case_ ( self , *lowerCAmelCase__ , **lowerCAmelCase__):
return self.tokenizer.decode(*lowerCAmelCase__ , **lowerCAmelCase__)
@contextmanager
def snake_case_ ( self):
warnings.warn(
"""`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your """
"""labels by using the argument `text` of the regular `__call__` method (either in the same call as """
"""your images inputs, or in a separate call.""")
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = self.tokenizer
yield
__SCREAMING_SNAKE_CASE = self.image_processor
__SCREAMING_SNAKE_CASE = False
def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__=False , lowerCAmelCase__=None):
if added_vocab is None:
__SCREAMING_SNAKE_CASE = self.tokenizer.get_added_vocab()
__SCREAMING_SNAKE_CASE = {}
while tokens:
__SCREAMING_SNAKE_CASE = re.search(R"""<s_(.*?)>""" , lowerCAmelCase__ , re.IGNORECASE)
if start_token is None:
break
__SCREAMING_SNAKE_CASE = start_token.group(1)
__SCREAMING_SNAKE_CASE = re.search(Rf"</s_{key}>" , lowerCAmelCase__ , re.IGNORECASE)
__SCREAMING_SNAKE_CASE = start_token.group()
if end_token is None:
__SCREAMING_SNAKE_CASE = tokens.replace(lowerCAmelCase__ , """""")
else:
__SCREAMING_SNAKE_CASE = end_token.group()
__SCREAMING_SNAKE_CASE = re.escape(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = re.escape(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = re.search(f"{start_token_escaped}(.*?){end_token_escaped}" , lowerCAmelCase__ , re.IGNORECASE)
if content is not None:
__SCREAMING_SNAKE_CASE = content.group(1).strip()
if r"<s_" in content and r"</s_" in content: # non-leaf node
__SCREAMING_SNAKE_CASE = self.tokenajson(lowerCAmelCase__ , is_inner_value=lowerCAmelCase__ , added_vocab=lowerCAmelCase__)
if value:
if len(lowerCAmelCase__) == 1:
__SCREAMING_SNAKE_CASE = value[0]
__SCREAMING_SNAKE_CASE = value
else: # leaf nodes
__SCREAMING_SNAKE_CASE = []
for leaf in content.split(R"""<sep/>"""):
__SCREAMING_SNAKE_CASE = leaf.strip()
if leaf in added_vocab and leaf[0] == "<" and leaf[-2:] == "/>":
__SCREAMING_SNAKE_CASE = leaf[1:-2] # for categorical special tokens
output[key].append(lowerCAmelCase__)
if len(output[key]) == 1:
__SCREAMING_SNAKE_CASE = output[key][0]
__SCREAMING_SNAKE_CASE = tokens[tokens.find(lowerCAmelCase__) + len(lowerCAmelCase__) :].strip()
if tokens[:6] == r"<sep/>": # non-leaf nodes
return [output] + self.tokenajson(tokens[6:] , is_inner_value=lowerCAmelCase__ , added_vocab=lowerCAmelCase__)
if len(lowerCAmelCase__):
return [output] if is_inner_value else output
else:
return [] if is_inner_value else {"text_sequence": tokens}
@property
def snake_case_ ( self):
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , lowerCAmelCase__ , )
return self.image_processor_class
@property
def snake_case_ ( self):
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , lowerCAmelCase__ , )
return self.image_processor
| 100 |
def _UpperCAmelCase (UpperCamelCase__ : str , UpperCamelCase__ : bool = False ):
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
_A : Optional[Any] = f"Expected string as input, found {type(UpperCamelCase__ )}"
raise ValueError(UpperCamelCase__ )
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
_A : Union[str, Any] = f"Expected boolean as use_pascal parameter, found {type(UpperCamelCase__ )}"
raise ValueError(UpperCamelCase__ )
_A : int = input_str.split("_" )
_A : str = 0 if use_pascal else 1
_A : str = words[start_index:]
_A : Optional[Any] = [word[0].upper() + word[1:] for word in words_to_capitalize]
_A : Any = "" if use_pascal else words[0]
return "".join([initial_word, *capitalized_words] )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 11 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
lowercase__ :List[Any] = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ :Union[str, Any] = ["NllbTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ :Union[str, Any] = ["NllbTokenizerFast"]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb import NllbTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb_fast import NllbTokenizerFast
else:
import sys
lowercase__ :List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 101 |
from __future__ import annotations
def _UpperCAmelCase (UpperCamelCase__ : list[int] , UpperCamelCase__ : list[int] , UpperCamelCase__ : int ):
_A : Dict = list(range(len(UpperCamelCase__ ) ) )
_A : Any = [v / w for v, w in zip(UpperCamelCase__ , UpperCamelCase__ )]
index.sort(key=lambda UpperCamelCase__ : ratio[i] , reverse=UpperCamelCase__ )
_A : float = 0
_A : list[float] = [0] * len(UpperCamelCase__ )
for i in index:
if weight[i] <= capacity:
_A : Union[str, Any] = 1
max_value += value[i]
capacity -= weight[i]
else:
_A : Optional[Any] = capacity / weight[i]
max_value += value[i] * capacity / weight[i]
break
return max_value, fractions
if __name__ == "__main__":
import doctest
doctest.testmod()
| 11 | 0 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_flax, require_tf, require_torch
from transformers.utils import (
expand_dims,
flatten_dict,
is_flax_available,
is_tf_available,
is_torch_available,
reshape,
squeeze,
transpose,
)
if is_flax_available():
import jax.numpy as jnp
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Any = {
'''task_specific_params''': {
'''summarization''': {'''length_penalty''': 1.0, '''max_length''': 1_28, '''min_length''': 12, '''num_beams''': 4},
'''summarization_cnn''': {'''length_penalty''': 2.0, '''max_length''': 1_42, '''min_length''': 56, '''num_beams''': 4},
'''summarization_xsum''': {'''length_penalty''': 1.0, '''max_length''': 62, '''min_length''': 11, '''num_beams''': 6},
}
}
__snake_case : Dict = {
'''task_specific_params.summarization.length_penalty''': 1.0,
'''task_specific_params.summarization.max_length''': 1_28,
'''task_specific_params.summarization.min_length''': 12,
'''task_specific_params.summarization.num_beams''': 4,
'''task_specific_params.summarization_cnn.length_penalty''': 2.0,
'''task_specific_params.summarization_cnn.max_length''': 1_42,
'''task_specific_params.summarization_cnn.min_length''': 56,
'''task_specific_params.summarization_cnn.num_beams''': 4,
'''task_specific_params.summarization_xsum.length_penalty''': 1.0,
'''task_specific_params.summarization_xsum.max_length''': 62,
'''task_specific_params.summarization_xsum.min_length''': 11,
'''task_specific_params.summarization_xsum.num_beams''': 6,
}
self.assertEqual(flatten_dict(a_ ) , a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : int = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(transpose(a_ ) , x.transpose() ) )
__snake_case : str = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(transpose(a_ , axes=(1, 2, 0) ) , x.transpose((1, 2, 0) ) ) )
@require_torch
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Any = np.random.randn(3 , 4 )
__snake_case : str = torch.tensor(a_ )
self.assertTrue(np.allclose(transpose(a_ ) , transpose(a_ ).numpy() ) )
__snake_case : List[Any] = np.random.randn(3 , 4 , 5 )
__snake_case : List[Any] = torch.tensor(a_ )
self.assertTrue(np.allclose(transpose(a_ , axes=(1, 2, 0) ) , transpose(a_ , axes=(1, 2, 0) ).numpy() ) )
@require_tf
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Tuple = np.random.randn(3 , 4 )
__snake_case : Optional[Any] = tf.constant(a_ )
self.assertTrue(np.allclose(transpose(a_ ) , transpose(a_ ).numpy() ) )
__snake_case : Optional[int] = np.random.randn(3 , 4 , 5 )
__snake_case : Optional[int] = tf.constant(a_ )
self.assertTrue(np.allclose(transpose(a_ , axes=(1, 2, 0) ) , transpose(a_ , axes=(1, 2, 0) ).numpy() ) )
@require_flax
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[Any] = np.random.randn(3 , 4 )
__snake_case : Optional[Any] = jnp.array(a_ )
self.assertTrue(np.allclose(transpose(a_ ) , np.asarray(transpose(a_ ) ) ) )
__snake_case : Any = np.random.randn(3 , 4 , 5 )
__snake_case : Tuple = jnp.array(a_ )
self.assertTrue(np.allclose(transpose(a_ , axes=(1, 2, 0) ) , np.asarray(transpose(a_ , axes=(1, 2, 0) ) ) ) )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : str = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(reshape(a_ , (4, 3) ) , np.reshape(a_ , (4, 3) ) ) )
__snake_case : Dict = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(reshape(a_ , (12, 5) ) , np.reshape(a_ , (12, 5) ) ) )
@require_torch
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : List[str] = np.random.randn(3 , 4 )
__snake_case : int = torch.tensor(a_ )
self.assertTrue(np.allclose(reshape(a_ , (4, 3) ) , reshape(a_ , (4, 3) ).numpy() ) )
__snake_case : Optional[int] = np.random.randn(3 , 4 , 5 )
__snake_case : Optional[Any] = torch.tensor(a_ )
self.assertTrue(np.allclose(reshape(a_ , (12, 5) ) , reshape(a_ , (12, 5) ).numpy() ) )
@require_tf
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : List[Any] = np.random.randn(3 , 4 )
__snake_case : Dict = tf.constant(a_ )
self.assertTrue(np.allclose(reshape(a_ , (4, 3) ) , reshape(a_ , (4, 3) ).numpy() ) )
__snake_case : Optional[Any] = np.random.randn(3 , 4 , 5 )
__snake_case : List[Any] = tf.constant(a_ )
self.assertTrue(np.allclose(reshape(a_ , (12, 5) ) , reshape(a_ , (12, 5) ).numpy() ) )
@require_flax
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : List[str] = np.random.randn(3 , 4 )
__snake_case : Any = jnp.array(a_ )
self.assertTrue(np.allclose(reshape(a_ , (4, 3) ) , np.asarray(reshape(a_ , (4, 3) ) ) ) )
__snake_case : int = np.random.randn(3 , 4 , 5 )
__snake_case : Dict = jnp.array(a_ )
self.assertTrue(np.allclose(reshape(a_ , (12, 5) ) , np.asarray(reshape(a_ , (12, 5) ) ) ) )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : List[str] = np.random.randn(1 , 3 , 4 )
self.assertTrue(np.allclose(squeeze(a_ ) , np.squeeze(a_ ) ) )
__snake_case : List[str] = np.random.randn(1 , 4 , 1 , 5 )
self.assertTrue(np.allclose(squeeze(a_ , axis=2 ) , np.squeeze(a_ , axis=2 ) ) )
@require_torch
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : List[Any] = np.random.randn(1 , 3 , 4 )
__snake_case : Any = torch.tensor(a_ )
self.assertTrue(np.allclose(squeeze(a_ ) , squeeze(a_ ).numpy() ) )
__snake_case : Optional[Any] = np.random.randn(1 , 4 , 1 , 5 )
__snake_case : Optional[int] = torch.tensor(a_ )
self.assertTrue(np.allclose(squeeze(a_ , axis=2 ) , squeeze(a_ , axis=2 ).numpy() ) )
@require_tf
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : List[str] = np.random.randn(1 , 3 , 4 )
__snake_case : Optional[int] = tf.constant(a_ )
self.assertTrue(np.allclose(squeeze(a_ ) , squeeze(a_ ).numpy() ) )
__snake_case : List[Any] = np.random.randn(1 , 4 , 1 , 5 )
__snake_case : Dict = tf.constant(a_ )
self.assertTrue(np.allclose(squeeze(a_ , axis=2 ) , squeeze(a_ , axis=2 ).numpy() ) )
@require_flax
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : str = np.random.randn(1 , 3 , 4 )
__snake_case : Union[str, Any] = jnp.array(a_ )
self.assertTrue(np.allclose(squeeze(a_ ) , np.asarray(squeeze(a_ ) ) ) )
__snake_case : Union[str, Any] = np.random.randn(1 , 4 , 1 , 5 )
__snake_case : Optional[Any] = jnp.array(a_ )
self.assertTrue(np.allclose(squeeze(a_ , axis=2 ) , np.asarray(squeeze(a_ , axis=2 ) ) ) )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : int = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(expand_dims(a_ , axis=1 ) , np.expand_dims(a_ , axis=1 ) ) )
@require_torch
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Dict = np.random.randn(3 , 4 )
__snake_case : Tuple = torch.tensor(a_ )
self.assertTrue(np.allclose(expand_dims(a_ , axis=1 ) , expand_dims(a_ , axis=1 ).numpy() ) )
@require_tf
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Dict = np.random.randn(3 , 4 )
__snake_case : Optional[int] = tf.constant(a_ )
self.assertTrue(np.allclose(expand_dims(a_ , axis=1 ) , expand_dims(a_ , axis=1 ).numpy() ) )
@require_flax
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Union[str, Any] = np.random.randn(3 , 4 )
__snake_case : int = jnp.array(a_ )
self.assertTrue(np.allclose(expand_dims(a_ , axis=1 ) , np.asarray(expand_dims(a_ , axis=1 ) ) ) )
| 102 |
import warnings
from ...utils import logging
from .image_processing_beit import BeitImageProcessor
lowerCAmelCase__ = logging.get_logger(__name__)
class lowerCAmelCase__ ( a):
'''simple docstring'''
def __init__( self , *__lowerCamelCase , **__lowerCamelCase) -> None:
warnings.warn(
"The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use BeitImageProcessor instead." , __lowerCamelCase , )
super().__init__(*__lowerCamelCase , **__lowerCamelCase)
| 11 | 0 |
import warnings
from ...utils import logging
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
A__ : Optional[Any] = logging.get_logger(__name__)
class __snake_case ( UpperCamelCase_ ):
def __init__( self : Any , *A_ : List[Any] , **A_ : int):
warnings.warn(
'''The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use LayoutLMv2ImageProcessor instead.''' , A_ , )
super().__init__(*A_ , **A_)
| 103 |
import collections
import inspect
import unittest
from typing import Dict, List, Tuple
from transformers import MaskFormerSwinConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device
from transformers.utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MaskFormerSwinBackbone
from transformers.models.maskformer import MaskFormerSwinModel
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self , __lowerCamelCase , __lowerCamelCase=1_3 , __lowerCamelCase=3_2 , __lowerCamelCase=2 , __lowerCamelCase=3 , __lowerCamelCase=1_6 , __lowerCamelCase=[1, 2, 1] , __lowerCamelCase=[2, 2, 4] , __lowerCamelCase=2 , __lowerCamelCase=2.0 , __lowerCamelCase=True , __lowerCamelCase=0.0 , __lowerCamelCase=0.0 , __lowerCamelCase=0.1 , __lowerCamelCase="gelu" , __lowerCamelCase=False , __lowerCamelCase=True , __lowerCamelCase=0.0_2 , __lowerCamelCase=1e-5 , __lowerCamelCase=True , __lowerCamelCase=None , __lowerCamelCase=True , __lowerCamelCase=1_0 , __lowerCamelCase=8 , __lowerCamelCase=["stage1", "stage2", "stage3"] , __lowerCamelCase=[1, 2, 3] , ) -> Optional[Any]:
_A : int = parent
_A : Optional[Any] = batch_size
_A : str = image_size
_A : Tuple = patch_size
_A : Tuple = num_channels
_A : Optional[int] = embed_dim
_A : Dict = depths
_A : Any = num_heads
_A : Any = window_size
_A : int = mlp_ratio
_A : Any = qkv_bias
_A : Union[str, Any] = hidden_dropout_prob
_A : Optional[Any] = attention_probs_dropout_prob
_A : Dict = drop_path_rate
_A : List[Any] = hidden_act
_A : Any = use_absolute_embeddings
_A : Optional[int] = patch_norm
_A : Tuple = layer_norm_eps
_A : List[str] = initializer_range
_A : Optional[int] = is_training
_A : Optional[Any] = scope
_A : Optional[int] = use_labels
_A : Dict = type_sequence_label_size
_A : str = encoder_stride
_A : Optional[int] = out_features
_A : Optional[int] = out_indices
def _lowerCamelCase ( self) -> Union[str, Any]:
_A : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
_A : Optional[Any] = None
if self.use_labels:
_A : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size)
_A : Optional[int] = self.get_config()
return config, pixel_values, labels
def _lowerCamelCase ( self) -> Union[str, Any]:
return MaskFormerSwinConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase) -> List[Any]:
_A : Dict = MaskFormerSwinModel(config=__lowerCamelCase)
model.to(__lowerCamelCase)
model.eval()
_A : int = model(__lowerCamelCase)
_A : Any = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths) - 1))
_A : List[str] = int(config.embed_dim * 2 ** (len(config.depths) - 1))
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim))
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase) -> Dict:
_A : Optional[Any] = MaskFormerSwinBackbone(config=__lowerCamelCase)
model.to(__lowerCamelCase)
model.eval()
_A : Dict = model(__lowerCamelCase)
# verify feature maps
self.parent.assertEqual(len(result.feature_maps) , len(config.out_features))
self.parent.assertListEqual(list(result.feature_maps[0].shape) , [1_3, 1_6, 1_6, 1_6])
# verify channels
self.parent.assertEqual(len(model.channels) , len(config.out_features))
self.parent.assertListEqual(model.channels , [1_6, 3_2, 6_4])
# verify ValueError
with self.parent.assertRaises(__lowerCamelCase):
_A : Union[str, Any] = ["stem"]
_A : Union[str, Any] = MaskFormerSwinBackbone(config=__lowerCamelCase)
def _lowerCamelCase ( self) -> Dict:
_A : Any = self.prepare_config_and_inputs()
_A , _A , _A : List[Any] = config_and_inputs
_A : Optional[int] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class lowerCAmelCase__ ( a , a , unittest.TestCase):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = (
(
MaskFormerSwinModel,
MaskFormerSwinBackbone,
)
if is_torch_available()
else ()
)
__SCREAMING_SNAKE_CASE = {"feature-extraction": MaskFormerSwinModel} if is_torch_available() else {}
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
def _lowerCamelCase ( self) -> str:
_A : Union[str, Any] = MaskFormerSwinModelTester(self)
_A : Optional[int] = ConfigTester(self , config_class=__lowerCamelCase , embed_dim=3_7)
@require_torch_multi_gpu
@unittest.skip(
reason=(
"`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn't work well with"
" `nn.DataParallel`"
))
def _lowerCamelCase ( self) -> Union[str, Any]:
pass
def _lowerCamelCase ( self) -> int:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _lowerCamelCase ( self) -> str:
return
def _lowerCamelCase ( self) -> List[Any]:
_A : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase)
def _lowerCamelCase ( self) -> Union[str, Any]:
_A : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*__lowerCamelCase)
@unittest.skip("Swin does not use inputs_embeds")
def _lowerCamelCase ( self) -> str:
pass
@unittest.skip("Swin does not support feedforward chunking")
def _lowerCamelCase ( self) -> List[Any]:
pass
def _lowerCamelCase ( self) -> Optional[int]:
_A , _A : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A : Union[str, Any] = model_class(__lowerCamelCase)
self.assertIsInstance(model.get_input_embeddings() , (nn.Module))
_A : Dict = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__lowerCamelCase , nn.Linear))
def _lowerCamelCase ( self) -> Any:
_A , _A : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A : int = model_class(__lowerCamelCase)
_A : Optional[int] = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_A : int = [*signature.parameters.keys()]
_A : Optional[int] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __lowerCamelCase)
@unittest.skip(reason="MaskFormerSwin is only used as backbone and doesn't support output_attentions")
def _lowerCamelCase ( self) -> Tuple:
pass
@unittest.skip(reason="MaskFormerSwin is only used as an internal backbone")
def _lowerCamelCase ( self) -> str:
pass
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase) -> Optional[int]:
_A : Any = model_class(__lowerCamelCase)
model.to(__lowerCamelCase)
model.eval()
with torch.no_grad():
_A : str = model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase))
_A : Tuple = outputs.hidden_states
_A : Any = getattr(
self.model_tester , "expected_num_hidden_layers" , len(self.model_tester.depths) + 1)
self.assertEqual(len(__lowerCamelCase) , __lowerCamelCase)
# Swin has a different seq_length
_A : Optional[int] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable)
else (config.patch_size, config.patch_size)
)
_A : str = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:]) , [num_patches, self.model_tester.embed_dim] , )
def _lowerCamelCase ( self) -> Dict:
_A , _A : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
_A : Any = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable)
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
_A : List[Any] = True
self.check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_A : Optional[int] = True
self.check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase)
def _lowerCamelCase ( self) -> Tuple:
_A , _A : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
_A : Optional[int] = 3
_A : Dict = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable)
else (self.model_tester.image_size, self.model_tester.image_size)
)
_A : Optional[int] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable)
else (config.patch_size, config.patch_size)
)
_A : int = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
_A : Dict = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
_A : List[Any] = True
self.check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , (padded_height, padded_width))
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_A : Union[str, Any] = True
self.check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , (padded_height, padded_width))
@unittest.skip(reason="MaskFormerSwin doesn't have pretrained checkpoints")
def _lowerCamelCase ( self) -> List[str]:
pass
@unittest.skip(reason="This will be fixed once MaskFormerSwin is replaced by native Swin")
def _lowerCamelCase ( self) -> List[str]:
pass
@unittest.skip(reason="This will be fixed once MaskFormerSwin is replaced by native Swin")
def _lowerCamelCase ( self) -> str:
pass
def _lowerCamelCase ( self) -> Optional[Any]:
_A , _A : Any = self.model_tester.prepare_config_and_inputs_for_common()
def set_nan_tensor_to_zero(__lowerCamelCase):
_A : Optional[int] = 0
return t
def check_equivalence(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase={}):
with torch.no_grad():
_A : Any = model(**__lowerCamelCase , return_dict=__lowerCamelCase , **__lowerCamelCase)
_A : int = model(**__lowerCamelCase , return_dict=__lowerCamelCase , **__lowerCamelCase).to_tuple()
def recursive_check(__lowerCamelCase , __lowerCamelCase):
if isinstance(__lowerCamelCase , (List, Tuple)):
for tuple_iterable_value, dict_iterable_value in zip(__lowerCamelCase , __lowerCamelCase):
recursive_check(__lowerCamelCase , __lowerCamelCase)
elif isinstance(__lowerCamelCase , __lowerCamelCase):
for tuple_iterable_value, dict_iterable_value in zip(
tuple_object.values() , dict_object.values()):
recursive_check(__lowerCamelCase , __lowerCamelCase)
elif tuple_object is None:
return
else:
self.assertTrue(
torch.allclose(
set_nan_tensor_to_zero(__lowerCamelCase) , set_nan_tensor_to_zero(__lowerCamelCase) , atol=1e-5) , msg=(
"Tuple and dict output are not equal. Difference:"
F" {torch.max(torch.abs(tuple_object - dict_object))}. Tuple has `nan`:"
F" {torch.isnan(__lowerCamelCase).any()} and `inf`: {torch.isinf(__lowerCamelCase)}. Dict has"
F" `nan`: {torch.isnan(__lowerCamelCase).any()} and `inf`: {torch.isinf(__lowerCamelCase)}."
) , )
recursive_check(__lowerCamelCase , __lowerCamelCase)
for model_class in self.all_model_classes:
_A : List[Any] = model_class(__lowerCamelCase)
model.to(__lowerCamelCase)
model.eval()
_A : str = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase)
_A : Tuple = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase)
check_equivalence(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase)
_A : Any = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase)
_A : List[Any] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase)
check_equivalence(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase)
_A : List[Any] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase)
_A : str = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase)
check_equivalence(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , {"output_hidden_states": True})
_A : Union[str, Any] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase)
_A : Optional[Any] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase)
check_equivalence(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , {"output_hidden_states": True})
@require_torch
class lowerCAmelCase__ ( unittest.TestCase , a):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = (MaskFormerSwinBackbone,) if is_torch_available() else ()
__SCREAMING_SNAKE_CASE = MaskFormerSwinConfig
def _lowerCamelCase ( self) -> Optional[Any]:
_A : Tuple = MaskFormerSwinModelTester(self)
def _lowerCamelCase ( self) -> Optional[Any]:
_A , _A : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
_A : Union[str, Any] = inputs_dict["pixel_values"].shape[0]
for backbone_class in self.all_model_classes:
_A : Optional[Any] = backbone_class(__lowerCamelCase)
backbone.to(__lowerCamelCase)
backbone.eval()
_A : List[Any] = backbone(**__lowerCamelCase)
# Test default outputs and verify feature maps
self.assertIsInstance(outputs.feature_maps , __lowerCamelCase)
self.assertTrue(len(outputs.feature_maps) == len(backbone.channels))
for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels):
self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels))
self.assertIsNone(outputs.hidden_states)
self.assertIsNone(outputs.attentions)
# Test output_hidden_states=True
_A : List[str] = backbone(**__lowerCamelCase , output_hidden_states=__lowerCamelCase)
self.assertIsNotNone(outputs.hidden_states)
self.assertTrue(len(outputs.hidden_states) , len(backbone.stage_names))
# We skip the stem layer
for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels):
for hidden_state in hidden_states:
# Hidden states are in the format (batch_size, (height * width), n_channels)
_A , _A , _A : List[str] = hidden_state.shape
self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels))
# Test output_attentions=True
if self.has_attentions:
_A : int = backbone(**__lowerCamelCase , output_attentions=__lowerCamelCase)
self.assertIsNotNone(outputs.attentions)
| 11 | 0 |
'''simple docstring'''
from math import factorial
def _A ( A__ = 100 ):
"""simple docstring"""
return sum(int(A__ ) for x in str(factorial(A__ ) ) )
if __name__ == "__main__":
print(solution(int(input('''Enter the Number: ''').strip())))
| 104 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_torch_available,
)
lowerCAmelCase__ = {
'configuration_speecht5': [
'SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP',
'SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP',
'SpeechT5Config',
'SpeechT5HifiGanConfig',
],
'feature_extraction_speecht5': ['SpeechT5FeatureExtractor'],
'processing_speecht5': ['SpeechT5Processor'],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['SpeechT5Tokenizer']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST',
'SpeechT5ForSpeechToText',
'SpeechT5ForSpeechToSpeech',
'SpeechT5ForTextToSpeech',
'SpeechT5Model',
'SpeechT5PreTrainedModel',
'SpeechT5HifiGan',
]
if TYPE_CHECKING:
from .configuration_speechta import (
SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP,
SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP,
SpeechTaConfig,
SpeechTaHifiGanConfig,
)
from .feature_extraction_speechta import SpeechTaFeatureExtractor
from .processing_speechta import SpeechTaProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speechta import SpeechTaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speechta import (
SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaHifiGan,
SpeechTaModel,
SpeechTaPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 11 | 0 |
"""simple docstring"""
from __future__ import annotations
def _SCREAMING_SNAKE_CASE ( _lowercase : float , _lowercase : float , _lowercase : float , ) ->tuple[str, float]:
'''simple docstring'''
if (stress, tangential_force, area).count(0 ) != 1:
raise ValueError("You cannot supply more or less than 2 values" )
elif stress < 0:
raise ValueError("Stress cannot be negative" )
elif tangential_force < 0:
raise ValueError("Tangential Force cannot be negative" )
elif area < 0:
raise ValueError("Area cannot be negative" )
elif stress == 0:
return (
"stress",
tangential_force / area,
)
elif tangential_force == 0:
return (
"tangential_force",
stress * area,
)
else:
return (
"area",
tangential_force / stress,
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 105 |
import inspect
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
import torch.utils.checkpoint
from ...models import UNetaDModel, VQModel
from ...schedulers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
)
from ...utils import PIL_INTERPOLATION, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
def _UpperCAmelCase (UpperCamelCase__ : Union[str, Any] ):
_A , _A : Any = image.size
_A , _A : str = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
_A : List[str] = image.resize((w, h) , resample=PIL_INTERPOLATION["lanczos"] )
_A : Any = np.array(UpperCamelCase__ ).astype(np.floataa ) / 2_55.0
_A : Optional[Any] = image[None].transpose(0 , 3 , 1 , 2 )
_A : Union[str, Any] = torch.from_numpy(UpperCamelCase__ )
return 2.0 * image - 1.0
class lowerCAmelCase__ ( a):
'''simple docstring'''
def __init__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , ) -> Optional[int]:
super().__init__()
self.register_modules(vqvae=__lowerCamelCase , unet=__lowerCamelCase , scheduler=__lowerCamelCase)
@torch.no_grad()
def __call__( self , __lowerCamelCase = None , __lowerCamelCase = 1 , __lowerCamelCase = 1_0_0 , __lowerCamelCase = 0.0 , __lowerCamelCase = None , __lowerCamelCase = "pil" , __lowerCamelCase = True , ) -> Union[Tuple, ImagePipelineOutput]:
if isinstance(__lowerCamelCase , PIL.Image.Image):
_A : Tuple = 1
elif isinstance(__lowerCamelCase , torch.Tensor):
_A : Union[str, Any] = image.shape[0]
else:
raise ValueError(F"`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(__lowerCamelCase)}")
if isinstance(__lowerCamelCase , PIL.Image.Image):
_A : Union[str, Any] = preprocess(__lowerCamelCase)
_A , _A : Union[str, Any] = image.shape[-2:]
# in_channels should be 6: 3 for latents, 3 for low resolution image
_A : Optional[Any] = (batch_size, self.unet.config.in_channels // 2, height, width)
_A : str = next(self.unet.parameters()).dtype
_A : Union[str, Any] = randn_tensor(__lowerCamelCase , generator=__lowerCamelCase , device=self.device , dtype=__lowerCamelCase)
_A : List[Any] = image.to(device=self.device , dtype=__lowerCamelCase)
# set timesteps and move to the correct device
self.scheduler.set_timesteps(__lowerCamelCase , device=self.device)
_A : Any = self.scheduler.timesteps
# scale the initial noise by the standard deviation required by the scheduler
_A : List[str] = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature.
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
_A : str = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
_A : Optional[int] = {}
if accepts_eta:
_A : List[Any] = eta
for t in self.progress_bar(__lowerCamelCase):
# concat latents and low resolution image in the channel dimension.
_A : List[Any] = torch.cat([latents, image] , dim=1)
_A : str = self.scheduler.scale_model_input(__lowerCamelCase , __lowerCamelCase)
# predict the noise residual
_A : Any = self.unet(__lowerCamelCase , __lowerCamelCase).sample
# compute the previous noisy sample x_t -> x_t-1
_A : Optional[int] = self.scheduler.step(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase).prev_sample
# decode the image latents with the VQVAE
_A : Union[str, Any] = self.vqvae.decode(__lowerCamelCase).sample
_A : Dict = torch.clamp(__lowerCamelCase , -1.0 , 1.0)
_A : Tuple = image / 2 + 0.5
_A : int = image.cpu().permute(0 , 2 , 3 , 1).numpy()
if output_type == "pil":
_A : Optional[int] = self.numpy_to_pil(__lowerCamelCase)
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__lowerCamelCase)
| 11 | 0 |
"""simple docstring"""
def __SCREAMING_SNAKE_CASE ( A_ ):
lowerCAmelCase__ : Optional[int] = len(A_ )
lowerCAmelCase__ : List[Any] = len(matrix[0] )
lowerCAmelCase__ : Union[str, Any] = min(A_ , A_ )
for row in range(A_ ):
# Check if diagonal element is not zero
if matrix[row][row] != 0:
# Eliminate all the elements below the diagonal
for col in range(row + 1 , A_ ):
lowerCAmelCase__ : List[Any] = matrix[col][row] / matrix[row][row]
for i in range(A_ , A_ ):
matrix[col][i] -= multiplier * matrix[row][i]
else:
# Find a non-zero diagonal element to swap rows
lowerCAmelCase__ : str = True
for i in range(row + 1 , A_ ):
if matrix[i][row] != 0:
lowerCAmelCase__ ,lowerCAmelCase__ : Optional[Any] = matrix[i], matrix[row]
lowerCAmelCase__ : Any = False
break
if reduce:
rank -= 1
for i in range(A_ ):
lowerCAmelCase__ : Dict = matrix[i][rank]
# Reduce the row pointer by one to stay on the same row
row -= 1
return rank
if __name__ == "__main__":
import doctest
doctest.testmod()
| 106 |
import unittest
import torch
from diffusers import VQModel
from diffusers.utils import floats_tensor, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class lowerCAmelCase__ ( a , a , unittest.TestCase):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = VQModel
__SCREAMING_SNAKE_CASE = "sample"
@property
def _lowerCamelCase ( self , __lowerCamelCase=(3_2, 3_2)) -> Optional[Any]:
_A : Optional[int] = 4
_A : Tuple = 3
_A : List[Any] = floats_tensor((batch_size, num_channels) + sizes).to(__lowerCamelCase)
return {"sample": image}
@property
def _lowerCamelCase ( self) -> int:
return (3, 3_2, 3_2)
@property
def _lowerCamelCase ( self) -> List[Any]:
return (3, 3_2, 3_2)
def _lowerCamelCase ( self) -> Union[str, Any]:
_A : List[Any] = {
"block_out_channels": [3_2, 6_4],
"in_channels": 3,
"out_channels": 3,
"down_block_types": ["DownEncoderBlock2D", "DownEncoderBlock2D"],
"up_block_types": ["UpDecoderBlock2D", "UpDecoderBlock2D"],
"latent_channels": 3,
}
_A : int = self.dummy_input
return init_dict, inputs_dict
def _lowerCamelCase ( self) -> Union[str, Any]:
pass
def _lowerCamelCase ( self) -> Any:
pass
def _lowerCamelCase ( self) -> Any:
_A , _A : List[Any] = VQModel.from_pretrained("fusing/vqgan-dummy" , output_loading_info=__lowerCamelCase)
self.assertIsNotNone(__lowerCamelCase)
self.assertEqual(len(loading_info["missing_keys"]) , 0)
model.to(__lowerCamelCase)
_A : str = model(**self.dummy_input)
assert image is not None, "Make sure output is not None"
def _lowerCamelCase ( self) -> Union[str, Any]:
_A : Optional[Any] = VQModel.from_pretrained("fusing/vqgan-dummy")
model.to(__lowerCamelCase).eval()
torch.manual_seed(0)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(0)
_A : Tuple = torch.randn(1 , model.config.in_channels , model.config.sample_size , model.config.sample_size)
_A : Optional[int] = image.to(__lowerCamelCase)
with torch.no_grad():
_A : List[str] = model(__lowerCamelCase).sample
_A : int = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
_A : Optional[Any] = torch.tensor([-0.0_1_5_3, -0.4_0_4_4, -0.1_8_8_0, -0.5_1_6_1, -0.2_4_1_8, -0.4_0_7_2, -0.1_6_1_2, -0.0_6_3_3, -0.0_1_4_3])
# fmt: on
self.assertTrue(torch.allclose(__lowerCamelCase , __lowerCamelCase , atol=1e-3))
| 11 | 0 |
import unittest
import numpy as np
import requests
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
__lowerCAmelCase : List[str] = False
if is_vision_available():
from PIL import Image
from transformers import PixaStructImageProcessor
class snake_case__ (unittest.TestCase ):
"""simple docstring"""
def __init__( self : List[Any] , __lowerCamelCase : str , __lowerCamelCase : str=7 , __lowerCamelCase : Optional[int]=3 , __lowerCamelCase : Dict=18 , __lowerCamelCase : Dict=30 , __lowerCamelCase : List[Any]=4_00 , __lowerCamelCase : Any=None , __lowerCamelCase : List[str]=True , __lowerCamelCase : Tuple=True , __lowerCamelCase : Union[str, Any]=None , ) -> Optional[int]:
a = size if size is not None else {"height": 20, "width": 20}
a = parent
a = batch_size
a = num_channels
a = image_size
a = min_resolution
a = max_resolution
a = size
a = do_normalize
a = do_convert_rgb
a = [5_12, 10_24, 20_48, 40_96]
a = patch_size if patch_size is not None else {"height": 16, "width": 16}
def __UpperCAmelCase ( self : Optional[Any] ) -> List[str]:
return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb}
def __UpperCAmelCase ( self : Dict ) -> Optional[int]:
a = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg"
a = Image.open(requests.get(__lowerCamelCase , stream=__lowerCamelCase ).raw ).convert("RGB" )
return raw_image
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason="""`Pix2StructImageProcessor` requires `torch>=1.11.0`.""" , )
@require_torch
@require_vision
class snake_case__ (_UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = PixaStructImageProcessor if is_vision_available() else None
def __UpperCAmelCase ( self : Union[str, Any] ) -> List[Any]:
a = PixaStructImageProcessingTester(self )
@property
def __UpperCAmelCase ( self : int ) -> Dict:
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCAmelCase ( self : str ) -> Optional[int]:
a = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowerCamelCase , "do_normalize" ) )
self.assertTrue(hasattr(__lowerCamelCase , "do_convert_rgb" ) )
def __UpperCAmelCase ( self : List[str] ) -> List[Any]:
a = self.image_processor_tester.prepare_dummy_image()
a = self.image_processing_class(**self.image_processor_dict )
a = 20_48
a = image_processor(__lowerCamelCase , return_tensors="pt" , max_patches=__lowerCamelCase )
self.assertTrue(torch.allclose(inputs.flattened_patches.mean() , torch.tensor(0.0_606 ) , atol=1e-3 , rtol=1e-3 ) )
def __UpperCAmelCase ( self : Optional[int] ) -> Dict:
# Initialize image_processor
a = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , Image.Image )
# Test not batched input
a = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
a = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=__lowerCamelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
a = image_processor(
__lowerCamelCase , return_tensors="pt" , max_patches=__lowerCamelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def __UpperCAmelCase ( self : int ) -> Union[str, Any]:
# Initialize image_processor
a = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , Image.Image )
# Test not batched input
a = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
a = True
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
with self.assertRaises(__lowerCamelCase ):
a = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=__lowerCamelCase ).flattened_patches
a = "Hello"
a = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=__lowerCamelCase , header_text=__lowerCamelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
a = image_processor(
__lowerCamelCase , return_tensors="pt" , max_patches=__lowerCamelCase , header_text=__lowerCamelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def __UpperCAmelCase ( self : Optional[Any] ) -> int:
# Initialize image_processor
a = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase , numpify=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , np.ndarray )
a = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
a = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=__lowerCamelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
a = image_processor(
__lowerCamelCase , return_tensors="pt" , max_patches=__lowerCamelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def __UpperCAmelCase ( self : int ) -> str:
# Initialize image_processor
a = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase , torchify=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , torch.Tensor )
# Test not batched input
a = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
a = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=__lowerCamelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
a = image_processor(
__lowerCamelCase , return_tensors="pt" , max_patches=__lowerCamelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason="""`Pix2StructImageProcessor` requires `torch>=1.11.0`.""" , )
@require_torch
@require_vision
class snake_case__ (_UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = PixaStructImageProcessor if is_vision_available() else None
def __UpperCAmelCase ( self : Union[str, Any] ) -> Dict:
a = PixaStructImageProcessingTester(self , num_channels=4 )
a = 3
@property
def __UpperCAmelCase ( self : Dict ) -> Optional[int]:
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCAmelCase ( self : Any ) -> str:
a = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowerCamelCase , "do_normalize" ) )
self.assertTrue(hasattr(__lowerCamelCase , "do_convert_rgb" ) )
def __UpperCAmelCase ( self : Dict ) -> int:
# Initialize image_processor
a = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , Image.Image )
# Test not batched input
a = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* (self.image_processor_tester.num_channels - 1)
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
a = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=__lowerCamelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
a = image_processor(
__lowerCamelCase , return_tensors="pt" , max_patches=__lowerCamelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
| 107 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_mbart import MBartTokenizer
else:
lowerCAmelCase__ = None
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'}
lowerCAmelCase__ = {
'vocab_file': {
'facebook/mbart-large-en-ro': (
'https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model'
),
'facebook/mbart-large-cc25': (
'https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model'
),
},
'tokenizer_file': {
'facebook/mbart-large-en-ro': 'https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json',
'facebook/mbart-large-cc25': 'https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json',
},
}
lowerCAmelCase__ = {
'facebook/mbart-large-en-ro': 10_24,
'facebook/mbart-large-cc25': 10_24,
}
# fmt: off
lowerCAmelCase__ = ['ar_AR', 'cs_CZ', 'de_DE', 'en_XX', 'es_XX', 'et_EE', 'fi_FI', 'fr_XX', 'gu_IN', 'hi_IN', 'it_IT', 'ja_XX', 'kk_KZ', 'ko_KR', 'lt_LT', 'lv_LV', 'my_MM', 'ne_NP', 'nl_XX', 'ro_RO', 'ru_RU', 'si_LK', 'tr_TR', 'vi_VN', 'zh_CN']
class lowerCAmelCase__ ( a):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE = ["input_ids", "attention_mask"]
__SCREAMING_SNAKE_CASE = MBartTokenizer
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = []
def __init__( self , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase="<s>" , __lowerCamelCase="</s>" , __lowerCamelCase="</s>" , __lowerCamelCase="<s>" , __lowerCamelCase="<unk>" , __lowerCamelCase="<pad>" , __lowerCamelCase="<mask>" , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=None , **__lowerCamelCase , ) -> Optional[int]:
# Mask token behave like a normal word, i.e. include the space before it
_A : List[str] = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase) if isinstance(__lowerCamelCase , __lowerCamelCase) else mask_token
super().__init__(
vocab_file=__lowerCamelCase , tokenizer_file=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , sep_token=__lowerCamelCase , cls_token=__lowerCamelCase , unk_token=__lowerCamelCase , pad_token=__lowerCamelCase , mask_token=__lowerCamelCase , src_lang=__lowerCamelCase , tgt_lang=__lowerCamelCase , additional_special_tokens=__lowerCamelCase , **__lowerCamelCase , )
_A : Union[str, Any] = vocab_file
_A : int = False if not self.vocab_file else True
_A : Optional[int] = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens])
self.add_special_tokens({"additional_special_tokens": _additional_special_tokens})
_A : Union[str, Any] = {
lang_code: self.convert_tokens_to_ids(__lowerCamelCase) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
_A : Optional[int] = src_lang if src_lang is not None else "en_XX"
_A : Union[str, Any] = self.convert_tokens_to_ids(self._src_lang)
_A : int = tgt_lang
self.set_src_lang_special_tokens(self._src_lang)
@property
def _lowerCamelCase ( self) -> str:
return self._src_lang
@src_lang.setter
def _lowerCamelCase ( self , __lowerCamelCase) -> None:
_A : Dict = new_src_lang
self.set_src_lang_special_tokens(self._src_lang)
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = None) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = None) -> List[int]:
_A : List[str] = [self.sep_token_id]
_A : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase) -> Dict:
if src_lang is None or tgt_lang is None:
raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model")
_A : str = src_lang
_A : Any = self(__lowerCamelCase , add_special_tokens=__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase)
_A : Tuple = self.convert_tokens_to_ids(__lowerCamelCase)
_A : Dict = tgt_lang_id
return inputs
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = "en_XX" , __lowerCamelCase = None , __lowerCamelCase = "ro_RO" , **__lowerCamelCase , ) -> BatchEncoding:
_A : Any = src_lang
_A : int = tgt_lang
return super().prepare_seqaseq_batch(__lowerCamelCase , __lowerCamelCase , **__lowerCamelCase)
def _lowerCamelCase ( self) -> List[str]:
return self.set_src_lang_special_tokens(self.src_lang)
def _lowerCamelCase ( self) -> List[Any]:
return self.set_tgt_lang_special_tokens(self.tgt_lang)
def _lowerCamelCase ( self , __lowerCamelCase) -> None:
_A : int = self.convert_tokens_to_ids(__lowerCamelCase)
_A : int = []
_A : List[str] = [self.eos_token_id, self.cur_lang_code]
_A : Union[str, Any] = self.convert_ids_to_tokens(self.prefix_tokens)
_A : str = self.convert_ids_to_tokens(self.suffix_tokens)
_A : List[Any] = processors.TemplateProcessing(
single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens)) , )
def _lowerCamelCase ( self , __lowerCamelCase) -> None:
_A : Optional[int] = self.convert_tokens_to_ids(__lowerCamelCase)
_A : List[Any] = []
_A : str = [self.eos_token_id, self.cur_lang_code]
_A : Optional[int] = self.convert_ids_to_tokens(self.prefix_tokens)
_A : int = self.convert_ids_to_tokens(self.suffix_tokens)
_A : str = processors.TemplateProcessing(
single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens)) , )
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = None) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer.")
if not os.path.isdir(__lowerCamelCase):
logger.error(F"Vocabulary path ({save_directory}) should be a directory.")
return
_A : int = os.path.join(
__lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
if os.path.abspath(self.vocab_file) != os.path.abspath(__lowerCamelCase):
copyfile(self.vocab_file , __lowerCamelCase)
return (out_vocab_file,)
| 11 | 0 |
"""simple docstring"""
import math
import time
from typing import Dict, List, Optional
from torch.utils.data import Dataset
from transformers import SeqaSeqTrainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class SCREAMING_SNAKE_CASE__ ( lowercase ):
"""simple docstring"""
def __init__( self , *snake_case__ , snake_case__=None , snake_case__=None , **snake_case__ ):
"""simple docstring"""
super().__init__(*snake_case__ , **snake_case__ )
lowerCAmelCase : Tuple = eval_examples
lowerCAmelCase : int = post_process_function
def lowercase__ ( self , snake_case__ = None , snake_case__=None , snake_case__ = None , snake_case__ = "eval" , **snake_case__ , ):
"""simple docstring"""
lowerCAmelCase : List[Any] = gen_kwargs.copy()
lowerCAmelCase : Dict = (
gen_kwargs["max_length"] if gen_kwargs.get("max_length" ) is not None else self.args.generation_max_length
)
lowerCAmelCase : str = (
gen_kwargs["num_beams"] if gen_kwargs.get("num_beams" ) is not None else self.args.generation_num_beams
)
lowerCAmelCase : List[str] = gen_kwargs
lowerCAmelCase : Optional[Any] = self.eval_dataset if eval_dataset is None else eval_dataset
lowerCAmelCase : Dict = self.get_eval_dataloader(snake_case__ )
lowerCAmelCase : Union[str, Any] = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
lowerCAmelCase : Any = self.compute_metrics
lowerCAmelCase : Dict = None
lowerCAmelCase : Optional[int] = time.time()
lowerCAmelCase : Tuple = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
lowerCAmelCase : List[Any] = eval_loop(
snake_case__ , description="Evaluation" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=snake_case__ , metric_key_prefix=snake_case__ , )
finally:
lowerCAmelCase : Dict = compute_metrics
lowerCAmelCase : List[str] = self.args.eval_batch_size * self.args.world_size
if f"""{metric_key_prefix}_jit_compilation_time""" in output.metrics:
start_time += output.metrics[f"""{metric_key_prefix}_jit_compilation_time"""]
output.metrics.update(
speed_metrics(
snake_case__ , snake_case__ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
lowerCAmelCase : Optional[int] = self.post_process_function(snake_case__ , snake_case__ , snake_case__ )
lowerCAmelCase : Union[str, Any] = self.compute_metrics(snake_case__ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f"""{metric_key_prefix}_""" ):
lowerCAmelCase : str = metrics.pop(snake_case__ )
metrics.update(output.metrics )
else:
lowerCAmelCase : int = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(snake_case__ )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
lowerCAmelCase : Dict = self.callback_handler.on_evaluate(self.args , self.state , self.control , snake_case__ )
return metrics
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__=None , snake_case__ = "test" , **snake_case__ ):
"""simple docstring"""
lowerCAmelCase : Union[str, Any] = gen_kwargs.copy()
lowerCAmelCase : Tuple = self.get_test_dataloader(snake_case__ )
# Temporarily disable metric computation, we will do it in the loop here.
lowerCAmelCase : List[Any] = self.compute_metrics
lowerCAmelCase : Optional[int] = None
lowerCAmelCase : Any = time.time()
lowerCAmelCase : int = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
lowerCAmelCase : str = eval_loop(
snake_case__ , description="Prediction" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=snake_case__ , metric_key_prefix=snake_case__ , )
finally:
lowerCAmelCase : Optional[Any] = compute_metrics
lowerCAmelCase : int = self.args.eval_batch_size * self.args.world_size
if f"""{metric_key_prefix}_jit_compilation_time""" in output.metrics:
start_time += output.metrics[f"""{metric_key_prefix}_jit_compilation_time"""]
output.metrics.update(
speed_metrics(
snake_case__ , snake_case__ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
lowerCAmelCase : Any = self.post_process_function(snake_case__ , snake_case__ , snake_case__ , "predict" )
lowerCAmelCase : Union[str, Any] = self.compute_metrics(snake_case__ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f"""{metric_key_prefix}_""" ):
lowerCAmelCase : Optional[int] = metrics.pop(snake_case__ )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=snake_case__ )
| 108 |
import json
import os
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'vocab_file': 'vocab.json',
'tokenizer_config_file': 'tokenizer_config.json',
'merges_file': 'merges.txt',
}
lowerCAmelCase__ = {
'vocab_file': {
'facebook/s2t-wav2vec2-large-en-de': (
'https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/vocab.json'
),
},
'tokenizer_config_file': {
'facebook/s2t-wav2vec2-large-en-de': (
'https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/tokenizer_config.json'
),
},
'merges_file': {
'facebook/s2t-wav2vec2-large-en-de': (
'https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/merges.txt'
),
},
}
lowerCAmelCase__ = '</w>'
lowerCAmelCase__ = '@@ '
def _UpperCAmelCase (UpperCamelCase__ : Optional[Any] ):
_A : Optional[int] = set()
_A : Optional[Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_A : List[Any] = char
return pairs
# Speech2Text2 has no max input length
lowerCAmelCase__ = {'facebook/s2t-wav2vec2-large-en-de': 10_24}
class lowerCAmelCase__ ( a):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE = ["input_ids", "attention_mask"]
def __init__( self , __lowerCamelCase , __lowerCamelCase="<s>" , __lowerCamelCase="<pad>" , __lowerCamelCase="</s>" , __lowerCamelCase="<unk>" , __lowerCamelCase=False , __lowerCamelCase=None , **__lowerCamelCase , ) -> Optional[Any]:
super().__init__(
unk_token=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , pad_token=__lowerCamelCase , do_lower_case=__lowerCamelCase , **__lowerCamelCase , )
_A : Dict = do_lower_case
with open(__lowerCamelCase , encoding="utf-8") as vocab_handle:
_A : Optional[int] = json.load(__lowerCamelCase)
_A : Optional[Any] = {v: k for k, v in self.encoder.items()}
if merges_file is None:
logger.info(F"No merges files provided. {self.__class__.__name__} can only be used for decoding.")
_A : Optional[Any] = None
_A : Tuple = None
else:
with open(__lowerCamelCase , encoding="utf-8") as merges_handle:
_A : Optional[int] = merges_handle.read().split("\n")[:-1]
_A : Union[str, Any] = [tuple(merge.split()[:2]) for merge in merges]
_A : Optional[int] = dict(zip(__lowerCamelCase , range(len(__lowerCamelCase))))
_A : List[Any] = {}
@property
def _lowerCamelCase ( self) -> int:
return len(self.decoder)
def _lowerCamelCase ( self) -> Dict:
return dict(self.encoder , **self.added_tokens_encoder)
def _lowerCamelCase ( self , __lowerCamelCase) -> Dict:
_A : Tuple = tuple(token[:-1]) + (token[-1] + BPE_TOKEN_MERGES,)
if token in self.cache:
return self.cache[token]
_A : int = get_pairs(__lowerCamelCase)
if not pairs:
return token
while True:
_A : Any = min(__lowerCamelCase , key=lambda __lowerCamelCase: self.bpe_ranks.get(__lowerCamelCase , float("inf")))
if bigram not in self.bpe_ranks:
break
_A , _A : Optional[int] = bigram
_A : int = []
_A : str = 0
while i < len(__lowerCamelCase):
try:
_A : str = word.index(__lowerCamelCase , __lowerCamelCase)
except ValueError:
new_word.extend(word[i:])
break
else:
new_word.extend(word[i:j])
_A : str = j
if word[i] == first and i < len(__lowerCamelCase) - 1 and word[i + 1] == second:
new_word.append(first + second)
i += 2
else:
new_word.append(word[i])
i += 1
_A : List[str] = tuple(__lowerCamelCase)
_A : List[str] = new_word
if len(__lowerCamelCase) == 1:
break
else:
_A : List[Any] = get_pairs(__lowerCamelCase)
_A : Tuple = " ".join(__lowerCamelCase)
if word == "\n " + BPE_TOKEN_MERGES:
_A : List[str] = "\n" + BPE_TOKEN_MERGES
if word.endswith(__lowerCamelCase):
_A : int = word.replace(__lowerCamelCase , "")
_A : int = word.replace(" " , __lowerCamelCase)
_A : Union[str, Any] = word
return word
def _lowerCamelCase ( self , __lowerCamelCase) -> Optional[Any]:
if self.bpe_ranks is None:
raise ValueError(
"This tokenizer was instantiated without a `merges.txt` file, so"
" that it can only be used for decoding, not for encoding."
"Make sure to provide `merges.txt` file at instantiation to enable "
"encoding.")
if self.do_lower_case:
_A : List[Any] = text.lower()
_A : Optional[int] = text.split()
_A : List[str] = []
for token in text:
if token:
split_tokens.extend(list(self.bpe(__lowerCamelCase).split(" ")))
return split_tokens
def _lowerCamelCase ( self , __lowerCamelCase) -> int:
return self.encoder.get(__lowerCamelCase , self.encoder.get(self.unk_token))
def _lowerCamelCase ( self , __lowerCamelCase) -> str:
_A : List[str] = self.decoder.get(__lowerCamelCase , self.unk_token)
return result
def _lowerCamelCase ( self , __lowerCamelCase) -> str:
_A : str = " ".join(__lowerCamelCase)
# make sure @@ tokens are concatenated
_A : int = "".join(string.split(__lowerCamelCase))
return string
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = None) -> Tuple[str]:
if not os.path.isdir(__lowerCamelCase):
logger.error(F"Vocabulary path ({save_directory}) should be a directory")
return
_A : Any = os.path.join(
__lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
_A : Any = os.path.join(
__lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"])
with open(__lowerCamelCase , "w" , encoding="utf-8") as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__lowerCamelCase , ensure_ascii=__lowerCamelCase) + "\n")
_A : Union[str, Any] = 0
if self.bpe_ranks is None:
return (vocab_file,)
with open(__lowerCamelCase , "w" , encoding="utf-8") as writer:
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __lowerCamelCase: kv[1]):
if index != token_index:
logger.warning(
F"Saving vocabulary to {merges_file}: BPE merge indices are not consecutive."
" Please check that the tokenizer is not corrupted!")
_A : Optional[int] = token_index
writer.write(" ".join(__lowerCamelCase) + "\n")
index += 1
return (vocab_file, merges_file)
| 11 | 0 |
"""simple docstring"""
import gc
import unittest
from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self ) -> Dict:
'''simple docstring'''
super().tearDown()
gc.collect()
def SCREAMING_SNAKE_CASE ( self ) -> Tuple:
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase : List[str] = FlaxControlNetModel.from_pretrained(
"""lllyasviel/sd-controlnet-canny""" , from_pt=_SCREAMING_SNAKE_CASE , dtype=jnp.bfloataa )
UpperCAmelCase , UpperCAmelCase : Optional[int] = FlaxStableDiffusionControlNetPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , controlnet=_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE , dtype=jnp.bfloataa )
UpperCAmelCase : List[str] = controlnet_params
UpperCAmelCase : int = """bird"""
UpperCAmelCase : Tuple = jax.device_count()
UpperCAmelCase : Optional[int] = pipe.prepare_text_inputs([prompts] * num_samples )
UpperCAmelCase : Dict = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png""" )
UpperCAmelCase : Tuple = pipe.prepare_image_inputs([canny_image] * num_samples )
UpperCAmelCase : List[Any] = jax.random.PRNGKey(0 )
UpperCAmelCase : str = jax.random.split(_SCREAMING_SNAKE_CASE , jax.device_count() )
UpperCAmelCase : int = replicate(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Optional[Any] = shard(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Dict = shard(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : str = pipe(
prompt_ids=_SCREAMING_SNAKE_CASE , image=_SCREAMING_SNAKE_CASE , params=_SCREAMING_SNAKE_CASE , prng_seed=_SCREAMING_SNAKE_CASE , num_inference_steps=50 , jit=_SCREAMING_SNAKE_CASE , ).images
assert images.shape == (jax.device_count(), 1, 768, 512, 3)
UpperCAmelCase : int = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
UpperCAmelCase : Optional[Any] = images[0, 253:256, 253:256, -1]
UpperCAmelCase : Tuple = jnp.asarray(jax.device_get(image_slice.flatten() ) )
UpperCAmelCase : Union[str, Any] = jnp.array(
[0.16_7969, 0.11_6699, 0.08_1543, 0.15_4297, 0.13_2812, 0.10_8887, 0.16_9922, 0.16_9922, 0.20_5078] )
print(F"output_slice: {output_slice}" )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
def SCREAMING_SNAKE_CASE ( self ) -> Any:
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase : str = FlaxControlNetModel.from_pretrained(
"""lllyasviel/sd-controlnet-openpose""" , from_pt=_SCREAMING_SNAKE_CASE , dtype=jnp.bfloataa )
UpperCAmelCase , UpperCAmelCase : int = FlaxStableDiffusionControlNetPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , controlnet=_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE , dtype=jnp.bfloataa )
UpperCAmelCase : int = controlnet_params
UpperCAmelCase : List[Any] = """Chef in the kitchen"""
UpperCAmelCase : Dict = jax.device_count()
UpperCAmelCase : Optional[Any] = pipe.prepare_text_inputs([prompts] * num_samples )
UpperCAmelCase : int = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png""" )
UpperCAmelCase : List[str] = pipe.prepare_image_inputs([pose_image] * num_samples )
UpperCAmelCase : Dict = jax.random.PRNGKey(0 )
UpperCAmelCase : Any = jax.random.split(_SCREAMING_SNAKE_CASE , jax.device_count() )
UpperCAmelCase : Tuple = replicate(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : int = shard(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Any = shard(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : str = pipe(
prompt_ids=_SCREAMING_SNAKE_CASE , image=_SCREAMING_SNAKE_CASE , params=_SCREAMING_SNAKE_CASE , prng_seed=_SCREAMING_SNAKE_CASE , num_inference_steps=50 , jit=_SCREAMING_SNAKE_CASE , ).images
assert images.shape == (jax.device_count(), 1, 768, 512, 3)
UpperCAmelCase : Dict = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
UpperCAmelCase : int = images[0, 253:256, 253:256, -1]
UpperCAmelCase : Optional[Any] = jnp.asarray(jax.device_get(image_slice.flatten() ) )
UpperCAmelCase : Any = jnp.array(
[[0.27_1484, 0.26_1719, 0.27_5391, 0.27_7344, 0.27_9297, 0.29_1016, 0.29_4922, 0.30_2734, 0.30_2734]] )
print(F"output_slice: {output_slice}" )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 109 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'facebook/vit-mae-base': 'https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json',
# See all ViT MAE models at https://huggingface.co/models?filter=vit-mae
}
class lowerCAmelCase__ ( a):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = "vit_mae"
def __init__( self , __lowerCamelCase=7_6_8 , __lowerCamelCase=1_2 , __lowerCamelCase=1_2 , __lowerCamelCase=3_0_7_2 , __lowerCamelCase="gelu" , __lowerCamelCase=0.0 , __lowerCamelCase=0.0 , __lowerCamelCase=0.0_2 , __lowerCamelCase=1e-12 , __lowerCamelCase=2_2_4 , __lowerCamelCase=1_6 , __lowerCamelCase=3 , __lowerCamelCase=True , __lowerCamelCase=1_6 , __lowerCamelCase=5_1_2 , __lowerCamelCase=8 , __lowerCamelCase=2_0_4_8 , __lowerCamelCase=0.7_5 , __lowerCamelCase=False , **__lowerCamelCase , ) -> int:
super().__init__(**__lowerCamelCase)
_A : int = hidden_size
_A : List[str] = num_hidden_layers
_A : List[Any] = num_attention_heads
_A : Optional[Any] = intermediate_size
_A : Optional[int] = hidden_act
_A : List[Any] = hidden_dropout_prob
_A : List[Any] = attention_probs_dropout_prob
_A : Union[str, Any] = initializer_range
_A : str = layer_norm_eps
_A : Any = image_size
_A : int = patch_size
_A : int = num_channels
_A : Dict = qkv_bias
_A : Tuple = decoder_num_attention_heads
_A : Tuple = decoder_hidden_size
_A : List[str] = decoder_num_hidden_layers
_A : Optional[Any] = decoder_intermediate_size
_A : List[str] = mask_ratio
_A : Union[str, Any] = norm_pix_loss
| 11 | 0 |
from typing import Optional
import pyspark
from .. import Features, NamedSplit
from ..download import DownloadMode
from ..packaged_modules.spark.spark import Spark
from .abc import AbstractDatasetReader
class _a ( UpperCamelCase__ ):
def __init__( self: Optional[Any] , UpperCamelCase_: pyspark.sql.DataFrame , UpperCamelCase_: Optional[NamedSplit] = None , UpperCamelCase_: Optional[Features] = None , UpperCamelCase_: bool = True , UpperCamelCase_: str = None , UpperCamelCase_: bool = False , UpperCamelCase_: str = None , UpperCamelCase_: bool = True , UpperCamelCase_: str = "arrow" , **UpperCamelCase_: Tuple , ) -> Optional[int]:
"""simple docstring"""
super().__init__(
split=UpperCamelCase_ , features=UpperCamelCase_ , cache_dir=UpperCamelCase_ , keep_in_memory=UpperCamelCase_ , streaming=UpperCamelCase_ , **UpperCamelCase_ , )
lowercase__ = load_from_cache_file
lowercase__ = file_format
lowercase__ = Spark(
df=UpperCamelCase_ , features=UpperCamelCase_ , cache_dir=UpperCamelCase_ , working_dir=UpperCamelCase_ , **UpperCamelCase_ , )
def lowerCamelCase_ ( self: str ) -> Optional[int]:
"""simple docstring"""
if self.streaming:
return self.builder.as_streaming_dataset(split=self.split )
lowercase__ = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD
self.builder.download_and_prepare(
download_mode=UpperCamelCase_ , file_format=self._file_format , )
return self.builder.as_dataset(split=self.split )
| 110 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
lowerCAmelCase__ = {'configuration_speech_encoder_decoder': ['SpeechEncoderDecoderConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['SpeechEncoderDecoderModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['FlaxSpeechEncoderDecoderModel']
if TYPE_CHECKING:
from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 11 | 0 |
from __future__ import annotations
a__: Union[str, Any] = [
[-1, 0], # left
[0, -1], # down
[1, 0], # right
[0, 1], # up
]
def UpperCamelCase__( UpperCamelCase__ : list[list[int]] , UpperCamelCase__ : list[int] , UpperCamelCase__ : list[int] , UpperCamelCase__ : int , UpperCamelCase__ : list[list[int]] , )->Any:
A__ = [
[0 for col in range(len(grid[0] ) )] for row in range(len(UpperCamelCase__ ) )
] # the reference grid
A__ = 1
A__ = [
[0 for col in range(len(grid[0] ) )] for row in range(len(UpperCamelCase__ ) )
] # the action grid
A__ = init[0]
A__ = init[1]
A__ = 0
A__ = g + heuristic[x][y] # cost from starting cell to destination cell
A__ = [[f, g, x, y]]
A__ = False # flag that is set when search is complete
A__ = False # flag set if we can't find expand
while not found and not resign:
if len(UpperCamelCase__ ) == 0:
raise ValueError('''Algorithm is unable to find solution''' )
else: # to choose the least costliest action so as to move closer to the goal
cell.sort()
cell.reverse()
A__ = cell.pop()
A__ = next_cell[2]
A__ = next_cell[3]
A__ = next_cell[1]
if x == goal[0] and y == goal[1]:
A__ = True
else:
for i in range(len(UpperCamelCase__ ) ): # to try out different valid actions
A__ = x + DIRECTIONS[i][0]
A__ = y + DIRECTIONS[i][1]
if xa >= 0 and xa < len(UpperCamelCase__ ) and ya >= 0 and ya < len(grid[0] ):
if closed[xa][ya] == 0 and grid[xa][ya] == 0:
A__ = g + cost
A__ = ga + heuristic[xa][ya]
cell.append([fa, ga, xa, ya] )
A__ = 1
A__ = i
A__ = []
A__ = goal[0]
A__ = goal[1]
invpath.append([x, y] ) # we get the reverse path from here
while x != init[0] or y != init[1]:
A__ = x - DIRECTIONS[action[x][y]][0]
A__ = y - DIRECTIONS[action[x][y]][1]
A__ = xa
A__ = ya
invpath.append([x, y] )
A__ = []
for i in range(len(UpperCamelCase__ ) ):
path.append(invpath[len(UpperCamelCase__ ) - 1 - i] )
return path, action
if __name__ == "__main__":
a__: Optional[int] = [
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
]
a__: List[str] = [0, 0]
# all coordinates are given in format [y,x]
a__: Tuple = [len(grid) - 1, len(grid[0]) - 1]
a__: Tuple = 1
# the cost map which pushes the path closer to the goal
a__: Dict = [[0 for row in range(len(grid[0]))] for col in range(len(grid))]
for i in range(len(grid)):
for j in range(len(grid[0])):
a__: Dict = abs(i - goal[0]) + abs(j - goal[1])
if grid[i][j] == 1:
# added extra penalty in the heuristic map
a__: List[str] = 99
a__ , a__: Optional[int] = search(grid, init, goal, cost, heuristic)
print('ACTION MAP')
for i in range(len(action)):
print(action[i])
for i in range(len(path)):
print(path[i])
| 193 |
# HF Trainer benchmarking tool
#
# This tool can be used to run and compare multiple dimensions of the HF Trainers args.
#
# It then prints a report once in github format with all the information that needs to be shared
# with others and second time in a console-friendly format, so it's easier to use for tuning things up.
#
# The main idea is:
#
# ./trainer-benchmark.py --base-cmd '<cmd args that don't change>' \
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1' \
# --target-metric-key train_samples_per_second
#
# The variations can be any command line argument that you want to compare and not just dtype as in
# the example.
#
# --variations allows you to compare variations in multiple dimensions.
#
# as the first dimention has 2 options and the second 3 in our example, this will run the trainer 6
# times adding one of:
#
# 1. --tf32 0 --fp16 0
# 2. --tf32 0 --fp16 1
# 3. --tf32 0 --bf16 1
# 4. --tf32 1 --fp16 0
# 5. --tf32 1 --fp16 1
# 6. --tf32 1 --bf16 1
#
# and print the results. This is just a cartesian product - and more than 2 dimensions can be used.
#
# If you want to rely on defaults, this:
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1'
# is identical to this:
# --variations '--tf32 0|--tf32 1' '|--fp16|--bf16'
#
# the leading empty variation in the 2nd dimension is a valid variation.
#
# So here we get the following 6 variations:
#
# 1. --tf32 0
# 2. --tf32 0 --fp16
# 3. --tf32 0 --bf16
# 4. --tf32 1
# 5. --tf32 1 --fp16
# 6. --tf32 1 --bf16
#
# In this particular case we don't know what the default tf32 setting is as it's normally
# pytorch-version dependent). That's why it's best to do an explicit setting of each variation:
# `--tf32 0|--tf32 1`
#
# Here is a full example of a train:
#
# CUDA_VISIBLE_DEVICES=0 python ./scripts/benchmark/trainer-benchmark.py \
# --base-cmd \
# ' examples/pytorch/translation/run_translation.py --model_name_or_path t5-small \
# --output_dir output_dir --do_train --label_smoothing 0.1 --logging_strategy no \
# --save_strategy no --per_device_train_batch_size 32 --max_source_length 512 \
# --max_target_length 512 --num_train_epochs 1 --overwrite_output_dir \
# --source_lang en --target_lang ro --dataset_name wmt16 --dataset_config "ro-en" \
# --source_prefix "translate English to Romanian: " --warmup_steps 50 \
# --max_train_samples 20000 --dataloader_num_workers 2 ' \
# --target-metric-key train_samples_per_second --repeat-times 1 --variations \
# '|--fp16|--bf16' '--tf32 0|--tf32 1' --report-metric-keys train_loss \
# --repeat-times 1 --base-variation '--tf32 0'
#
# and here is a possible output:
#
#
# | Variation | Train | Diff | Train |
# | | samples | % | loss |
# | | per | | |
# | | second | | |
# |:----------------|----------:|-------:|--------:|
# | --tf32 0 | 285.11 | 0 | 2.51 |
# | --tf32 1 | 342.09 | 20 | 2.51 |
# | --fp16 --tf32 0 | 423.49 | 49 | 2.51 |
# | --fp16 --tf32 1 | 423.13 | 48 | 2.51 |
# | --bf16 --tf32 0 | 416.80 | 46 | 2.52 |
# | --bf16 --tf32 1 | 415.87 | 46 | 2.52 |
#
#
# So you can quickly compare the different outcomes.
#
# Typically running each experiment once is enough, but if the environment is unstable you can
# re-run each multiple times, e.g., 3 using --repeat-times 3 and it will report the averaged results.
#
# By default it'll use the lowest result as the base line to use as 100% and then compare the rest to
# it as can be seen from the table above, but you can also specify which combination is the one to use as
# the baseline, e.g., to change to another entry use: --base-variation '--tf32 1 --fp16 0'
#
# --target-metric-key is there to tell the program which metrics to compare - the different metric keys are
# inside output_dir/all_results.json. e.g., to measure eval performance instead of train use:
# --target-metric-key eval_samples_per_second
# but of course you will need to adjust the --base-cmd value in the example to perform evaluation as
# well (as currently it doesn't)
#
import argparse
import datetime
import io
import itertools
import json
import math
import os
import platform
import re
import shlex
import subprocess
import sys
from pathlib import Path
from statistics import fmean
import pandas as pd
import torch
from tqdm import tqdm
import transformers
lowerCAmelCase__ = float('nan')
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self , __lowerCamelCase) -> Optional[Any]:
_A : List[Any] = sys.stdout
_A : str = open(__lowerCamelCase , "a")
def __getattr__( self , __lowerCamelCase) -> List[str]:
return getattr(self.stdout , __lowerCamelCase)
def _lowerCamelCase ( self , __lowerCamelCase) -> str:
self.stdout.write(__lowerCamelCase)
# strip tqdm codes
self.file.write(re.sub(r"^.*\r" , "" , __lowerCamelCase , 0 , re.M))
def _UpperCAmelCase (UpperCamelCase__ : str=80 , UpperCamelCase__ : Tuple=False ):
_A : Tuple = []
# deal with critical env vars
_A : Dict = ["CUDA_VISIBLE_DEVICES"]
for key in env_keys:
_A : Optional[int] = os.environ.get(UpperCamelCase__ , UpperCamelCase__ )
if val is not None:
cmd.append(f"{key}={val}" )
# python executable (not always needed if the script is executable)
_A : Optional[int] = sys.executable if full_python_path else sys.executable.split("/" )[-1]
cmd.append(UpperCamelCase__ )
# now the normal args
cmd += list(map(shlex.quote , sys.argv ) )
# split up into up to MAX_WIDTH lines with shell multi-line escapes
_A : Tuple = []
_A : Dict = ""
while len(UpperCamelCase__ ) > 0:
current_line += f"{cmd.pop(0 )} "
if len(UpperCamelCase__ ) == 0 or len(UpperCamelCase__ ) + len(cmd[0] ) + 1 > max_width - 1:
lines.append(UpperCamelCase__ )
_A : Union[str, Any] = ""
return "\\\n".join(UpperCamelCase__ )
def _UpperCAmelCase (UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Tuple ):
# unwrap multi-line input
_A : Union[str, Any] = re.sub(r"[\\\n]+" , " " , args.base_cmd )
# remove --output_dir if any and set our own
_A : int = re.sub("--output_dir\s+[^\s]+" , "" , args.base_cmd )
args.base_cmd += f" --output_dir {output_dir}"
# ensure we have --overwrite_output_dir
_A : int = re.sub("--overwrite_output_dir\s+" , "" , args.base_cmd )
args.base_cmd += " --overwrite_output_dir"
return [sys.executable] + shlex.split(args.base_cmd )
def _UpperCAmelCase (UpperCamelCase__ : List[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Tuple , UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[int] ):
# Enable to debug everything but the run itself, to do it fast and see the progress.
# This is useful for debugging the output formatting quickly - we can remove it later once
# everybody is happy with the output
if 0:
import random
from time import sleep
sleep(0 )
return dict(
{k: random.uniform(0 , 100 ) for k in metric_keys} , **{target_metric_key: random.choice([nan, 10.31, 1_00.2, 55.66_66, 2_22.22_22_22_22] )} , )
_A : Dict = subprocess.run(UpperCamelCase__ , capture_output=UpperCamelCase__ , text=UpperCamelCase__ )
if verbose:
print("STDOUT" , result.stdout )
print("STDERR" , result.stderr )
# save the streams
_A : Tuple = variation.replace(" " , "-" )
with open(Path(UpperCamelCase__ ) / f"log.{prefix}.stdout.txt" , "w" ) as f:
f.write(result.stdout )
with open(Path(UpperCamelCase__ ) / f"log.{prefix}.stderr.txt" , "w" ) as f:
f.write(result.stderr )
if result.returncode != 0:
if verbose:
print("failed" )
return {target_metric_key: nan}
with io.open(f"{output_dir}/all_results.json" , "r" , encoding="utf-8" ) as f:
_A : List[str] = json.load(UpperCamelCase__ )
# filter out just the keys we want
return {k: v for k, v in metrics.items() if k in metric_keys}
def _UpperCAmelCase (UpperCamelCase__ : int , UpperCamelCase__ : Dict , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : int , UpperCamelCase__ : Tuple , UpperCamelCase__ : List[Any] , UpperCamelCase__ : str , UpperCamelCase__ : Any , ):
_A : Union[str, Any] = []
_A : Optional[int] = []
_A : Any = f"{id}: {variation:<{longest_variation_len}}"
_A : Dict = f"{preamble}: "
_A : Union[str, Any] = set(report_metric_keys + [target_metric_key] )
for i in tqdm(range(UpperCamelCase__ ) , desc=UpperCamelCase__ , leave=UpperCamelCase__ ):
_A : Optional[Any] = process_run_single(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
_A : Optional[Any] = single_run_metrics[target_metric_key]
if not math.isnan(UpperCamelCase__ ):
metrics.append(UpperCamelCase__ )
results.append(UpperCamelCase__ )
outcome += "✓"
else:
outcome += "✘"
_A : str = f"\33[2K\r{outcome}"
if len(UpperCamelCase__ ) > 0:
_A : List[str] = {k: fmean([x[k] for x in metrics] ) for k in metrics[0].keys()}
_A : Any = round(mean_metrics[target_metric_key] , 2 )
_A : Tuple = f"{outcome} {mean_target}"
if len(UpperCamelCase__ ) > 1:
results_str += f" {tuple(round(UpperCamelCase__ , 2 ) for x in results )}"
print(UpperCamelCase__ )
_A : Optional[int] = variation
return mean_metrics
else:
print(UpperCamelCase__ )
return {variation_key: variation, target_metric_key: nan}
def _UpperCAmelCase ():
_A : int = torch.cuda.get_device_properties(torch.device("cuda" ) )
return f"\nDatetime : {datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S' )}\n\nSoftware:\ntransformers: {transformers.__version__}\ntorch : {torch.__version__}\ncuda : {torch.version.cuda}\npython : {platform.python_version()}\n\nHardware:\n{torch.cuda.device_count()} GPUs : {properties.name}, {properties.total_memory/2**30:0.2f}GB\n"
def _UpperCAmelCase (UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : Any , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Dict ):
_A : Any = pd.DataFrame(UpperCamelCase__ )
_A : List[str] = "variation"
_A : List[Any] = "diff_%"
_A : int = nan
if base_variation is not None and len(df[df[variation_key] == base_variation] ):
# this may still return nan
_A : int = df.loc[df[variation_key] == base_variation][target_metric_key].item()
if math.isnan(UpperCamelCase__ ):
# as a fallback, use the minimal value as the sentinel
_A : List[str] = df.loc[df[target_metric_key] != nan][target_metric_key].min()
# create diff column if possible
if not math.isnan(UpperCamelCase__ ):
_A : Optional[Any] = df.apply(
lambda UpperCamelCase__ : round(100 * (r[target_metric_key] - sentinel_value) / sentinel_value )
if not math.isnan(r[target_metric_key] )
else 0 , axis="columns" , )
# re-order columns
_A : Union[str, Any] = [variation_key, target_metric_key, diff_key, *report_metric_keys]
_A : Any = df.reindex(UpperCamelCase__ , axis="columns" ) # reorder cols
# capitalize
_A : Tuple = df.rename(str.capitalize , axis="columns" )
# make the cols as narrow as possible
_A : List[str] = df.rename(lambda UpperCamelCase__ : c.replace("_" , "<br>" ) , axis="columns" )
_A : Union[str, Any] = df.rename(lambda UpperCamelCase__ : c.replace("_" , "\n" ) , axis="columns" )
_A : Optional[int] = ["", "Copy between the cut-here-lines and paste as is to github or a forum"]
report += ["----------8<-----------------8<--------"]
report += ["*** Results:", df_github.to_markdown(index=UpperCamelCase__ , floatfmt=".2f" )]
report += ["```"]
report += ["*** Setup:", get_versions()]
report += ["*** The benchmark command line was:", get_original_command()]
report += ["```"]
report += ["----------8<-----------------8<--------"]
report += ["*** Results (console):", df_console.to_markdown(index=UpperCamelCase__ , floatfmt=".2f" )]
print("\n\n".join(UpperCamelCase__ ) )
def _UpperCAmelCase ():
_A : int = argparse.ArgumentParser()
parser.add_argument(
"--base-cmd" , default=UpperCamelCase__ , type=UpperCamelCase__ , required=UpperCamelCase__ , help="Base cmd" , )
parser.add_argument(
"--variations" , default=UpperCamelCase__ , type=UpperCamelCase__ , nargs="+" , required=UpperCamelCase__ , help="Multi-dimensional variations, example: '|--fp16|--bf16' '|--tf32'" , )
parser.add_argument(
"--base-variation" , default=UpperCamelCase__ , type=UpperCamelCase__ , help="Baseline variation to compare to. if None the minimal target value will be used to compare against" , )
parser.add_argument(
"--target-metric-key" , default=UpperCamelCase__ , type=UpperCamelCase__ , required=UpperCamelCase__ , help="Target metric key in output_dir/all_results.json, e.g., train_samples_per_second" , )
parser.add_argument(
"--report-metric-keys" , default="" , type=UpperCamelCase__ , help="Report metric keys - other metric keys from output_dir/all_results.json to report, e.g., train_loss. Use a single argument e.g., 'train_loss train_samples" , )
parser.add_argument(
"--repeat-times" , default=1 , type=UpperCamelCase__ , help="How many times to re-run each variation - an average will be reported" , )
parser.add_argument(
"--output_dir" , default="output_benchmark" , type=UpperCamelCase__ , help="The output directory where all the benchmark reports will go to and additionally this directory will be used to override --output_dir in the script that is being benchmarked" , )
parser.add_argument(
"--verbose" , default=UpperCamelCase__ , action="store_true" , help="Whether to show the outputs of each run or just the benchmark progress" , )
_A : int = parser.parse_args()
_A : Union[str, Any] = args.output_dir
Path(UpperCamelCase__ ).mkdir(exist_ok=UpperCamelCase__ )
_A : Tuple = get_base_command(UpperCamelCase__ , UpperCamelCase__ )
# split each dimension into its --foo variations
_A : Dict = [list(map(str.strip , re.split(r"\|" , UpperCamelCase__ ) ) ) for x in args.variations]
# build a cartesian product of dimensions and convert those back into cmd-line arg strings,
# while stripping white space for inputs that were empty
_A : Union[str, Any] = list(map(str.strip , map(" ".join , itertools.product(*UpperCamelCase__ ) ) ) )
_A : Union[str, Any] = max(len(UpperCamelCase__ ) for x in variations )
# split wanted keys
_A : str = args.report_metric_keys.split()
# capture prints into a log file for convenience
_A : Optional[int] = f"benchmark-report-{datetime.datetime.now().strftime('%Y-%m-%d-%H-%M-%S' )}.txt"
print(f"\nNote: each run's output is also logged under {output_dir}/log.*.std*.txt" )
print(f"and this script's output is also piped into {report_fn}" )
_A : Tuple = Tee(UpperCamelCase__ )
print(f"\n*** Running {len(UpperCamelCase__ )} benchmarks:" )
print(f"Base command: {' '.join(UpperCamelCase__ )}" )
_A : str = "variation"
_A : Union[str, Any] = []
for id, variation in enumerate(tqdm(UpperCamelCase__ , desc="Total completion: " , leave=UpperCamelCase__ ) ):
_A : Dict = base_cmd + variation.split()
results.append(
process_run(
id + 1 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , args.target_metric_key , UpperCamelCase__ , args.repeat_times , UpperCamelCase__ , args.verbose , ) )
process_results(UpperCamelCase__ , args.target_metric_key , UpperCamelCase__ , args.base_variation , UpperCamelCase__ )
if __name__ == "__main__":
main()
| 11 | 0 |
'''simple docstring'''
import os
import unittest
from transformers import BatchEncoding
from transformers.models.bert.tokenization_bert import (
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.models.prophetnet.tokenization_prophetnet import VOCAB_FILES_NAMES, ProphetNetTokenizer
from transformers.testing_utils import require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
class A_ ( lowerCAmelCase_ , unittest.TestCase ):
_lowerCamelCase : List[Any] = ProphetNetTokenizer
_lowerCamelCase : Tuple = False
def lowercase ( self : Any ):
super().setUp()
_UpperCAmelCase = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
_UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def lowercase ( self : Union[str, Any] , snake_case_ : Tuple ):
_UpperCAmelCase = "UNwant\u00E9d,running"
_UpperCAmelCase = "unwanted, running"
return input_text, output_text
def lowercase ( self : Any ):
_UpperCAmelCase = self.tokenizer_class(self.vocab_file )
_UpperCAmelCase = tokenizer.tokenize("UNwant\u00E9d,running" )
self.assertListEqual(__lowerCamelCase , ["un", "##want", "##ed", ",", "runn", "##ing"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCamelCase ) , [9, 6, 7, 1_2, 1_0, 1_1] )
def lowercase ( self : int ):
_UpperCAmelCase = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize("ah\u535A\u63A8zz" ) , ["ah", "\u535A", "\u63A8", "zz"] )
def lowercase ( self : Optional[Any] ):
_UpperCAmelCase = BasicTokenizer(do_lower_case=__lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["hello", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def lowercase ( self : List[str] ):
_UpperCAmelCase = BasicTokenizer(do_lower_case=__lowerCamelCase , strip_accents=__lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hällo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["h\u00E9llo"] )
def lowercase ( self : int ):
_UpperCAmelCase = BasicTokenizer(do_lower_case=__lowerCamelCase , strip_accents=__lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def lowercase ( self : int ):
_UpperCAmelCase = BasicTokenizer(do_lower_case=__lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def lowercase ( self : Any ):
_UpperCAmelCase = BasicTokenizer(do_lower_case=__lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["HeLLo", "!", "how", "Are", "yoU", "?"] )
def lowercase ( self : Dict ):
_UpperCAmelCase = BasicTokenizer(do_lower_case=__lowerCamelCase , strip_accents=__lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HäLLo", "!", "how", "Are", "yoU", "?"] )
def lowercase ( self : Tuple ):
_UpperCAmelCase = BasicTokenizer(do_lower_case=__lowerCamelCase , strip_accents=__lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HaLLo", "!", "how", "Are", "yoU", "?"] )
def lowercase ( self : Tuple ):
_UpperCAmelCase = BasicTokenizer(do_lower_case=__lowerCamelCase , never_split=["[UNK]"] )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? [UNK]" ) , ["HeLLo", "!", "how", "Are", "yoU", "?", "[UNK]"] )
def lowercase ( self : Union[str, Any] ):
_UpperCAmelCase = ["[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", "##ing"]
_UpperCAmelCase = {}
for i, token in enumerate(__lowerCamelCase ):
_UpperCAmelCase = i
_UpperCAmelCase = WordpieceTokenizer(vocab=__lowerCamelCase , unk_token="[UNK]" )
self.assertListEqual(tokenizer.tokenize("" ) , [] )
self.assertListEqual(tokenizer.tokenize("unwanted running" ) , ["un", "##want", "##ed", "runn", "##ing"] )
self.assertListEqual(tokenizer.tokenize("unwantedX running" ) , ["[UNK]", "runn", "##ing"] )
@require_torch
def lowercase ( self : Optional[Any] ):
_UpperCAmelCase = self.tokenizer_class.from_pretrained("microsoft/prophetnet-large-uncased" )
_UpperCAmelCase = ["A long paragraph for summarization.", "Another paragraph for summarization."]
_UpperCAmelCase = [1_0_3_7, 2_1_4_6, 2_0_4_2_3, 2_0_0_5, 7_6_8_0, 7_8_4_9, 3_9_8_9, 1_0_1_2, 1_0_2]
_UpperCAmelCase = tokenizer(__lowerCamelCase , padding=__lowerCamelCase , return_tensors="pt" )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
_UpperCAmelCase = list(batch.input_ids.numpy()[0] )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
def lowercase ( self : Union[str, Any] ):
self.assertTrue(_is_whitespace(" " ) )
self.assertTrue(_is_whitespace("\t" ) )
self.assertTrue(_is_whitespace("\r" ) )
self.assertTrue(_is_whitespace("\n" ) )
self.assertTrue(_is_whitespace("\u00A0" ) )
self.assertFalse(_is_whitespace("A" ) )
self.assertFalse(_is_whitespace("-" ) )
def lowercase ( self : str ):
self.assertTrue(_is_control("\u0005" ) )
self.assertFalse(_is_control("A" ) )
self.assertFalse(_is_control(" " ) )
self.assertFalse(_is_control("\t" ) )
self.assertFalse(_is_control("\r" ) )
def lowercase ( self : int ):
self.assertTrue(_is_punctuation("-" ) )
self.assertTrue(_is_punctuation("$" ) )
self.assertTrue(_is_punctuation("`" ) )
self.assertTrue(_is_punctuation("." ) )
self.assertFalse(_is_punctuation("A" ) )
self.assertFalse(_is_punctuation(" " ) )
@slow
def lowercase ( self : Optional[int] ):
_UpperCAmelCase = self.tokenizer_class.from_pretrained("microsoft/prophetnet-large-uncased" )
_UpperCAmelCase = tokenizer.encode("sequence builders" , add_special_tokens=__lowerCamelCase )
_UpperCAmelCase = tokenizer.encode("multi-sequence build" , add_special_tokens=__lowerCamelCase )
_UpperCAmelCase = tokenizer.build_inputs_with_special_tokens(__lowerCamelCase )
_UpperCAmelCase = tokenizer.build_inputs_with_special_tokens(__lowerCamelCase , __lowerCamelCase )
assert encoded_sentence == text + [1_0_2]
assert encoded_pair == text + [1_0_2] + text_a + [1_0_2]
| 22 |
import logging
import os
import sys
from dataclasses import dataclass, field
from itertools import chain
from typing import Optional, Union
import datasets
import numpy as np
import torch
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.31.0')
lowerCAmelCase__ = logging.getLogger(__name__)
@dataclass
class lowerCAmelCase__ :
'''simple docstring'''
__SCREAMING_SNAKE_CASE = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"})
__SCREAMING_SNAKE_CASE = field(
default=a , metadata={"help": "Pretrained config name or path if not the same as model_name"})
__SCREAMING_SNAKE_CASE = field(
default=a , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"})
__SCREAMING_SNAKE_CASE = field(
default=a , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
__SCREAMING_SNAKE_CASE = field(
default=a , metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."} , )
__SCREAMING_SNAKE_CASE = field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
__SCREAMING_SNAKE_CASE = field(
default=a , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
@dataclass
class lowerCAmelCase__ :
'''simple docstring'''
__SCREAMING_SNAKE_CASE = field(default=a , metadata={"help": "The input training data file (a text file)."})
__SCREAMING_SNAKE_CASE = field(
default=a , metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."} , )
__SCREAMING_SNAKE_CASE = field(
default=a , metadata={"help": "Overwrite the cached training and evaluation sets"})
__SCREAMING_SNAKE_CASE = field(
default=a , metadata={"help": "The number of processes to use for the preprocessing."} , )
__SCREAMING_SNAKE_CASE = field(
default=a , metadata={
"help": (
"The maximum total input sequence length after tokenization. If passed, sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
__SCREAMING_SNAKE_CASE = field(
default=a , metadata={
"help": (
"Whether to pad all samples to the maximum sentence length. "
"If False, will pad the samples dynamically when batching to the maximum length in the batch. More "
"efficient on GPU but very bad for TPU."
)
} , )
__SCREAMING_SNAKE_CASE = field(
default=a , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
__SCREAMING_SNAKE_CASE = field(
default=a , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} , )
def _lowerCamelCase ( self) -> int:
if self.train_file is not None:
_A : Optional[int] = self.train_file.split(".")[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
_A : Dict = self.validation_file.split(".")[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
@dataclass
class lowerCAmelCase__ :
'''simple docstring'''
__SCREAMING_SNAKE_CASE = 42
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
def __call__( self , __lowerCamelCase) -> str:
_A : List[Any] = "label" if "label" in features[0].keys() else "labels"
_A : Any = [feature.pop(__lowerCamelCase) for feature in features]
_A : Optional[int] = len(__lowerCamelCase)
_A : int = len(features[0]["input_ids"])
_A : Tuple = [
[{k: v[i] for k, v in feature.items()} for i in range(__lowerCamelCase)] for feature in features
]
_A : str = list(chain(*__lowerCamelCase))
_A : Tuple = self.tokenizer.pad(
__lowerCamelCase , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="pt" , )
# Un-flatten
_A : Optional[int] = {k: v.view(__lowerCamelCase , __lowerCamelCase , -1) for k, v in batch.items()}
# Add back labels
_A : Optional[int] = torch.tensor(__lowerCamelCase , dtype=torch.intaa)
return batch
def _UpperCAmelCase ():
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_A : int = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_A , _A , _A : int = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_A , _A , _A : Union[str, Any] = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_swag" , UpperCamelCase__ , UpperCamelCase__ )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
_A : int = training_args.get_process_log_level()
logger.setLevel(UpperCamelCase__ )
datasets.utils.logging.set_verbosity(UpperCamelCase__ )
transformers.utils.logging.set_verbosity(UpperCamelCase__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ f"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" )
logger.info(f"Training/evaluation parameters {training_args}" )
# Detecting last checkpoint.
_A : List[Any] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_A : Optional[int] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
"Use --overwrite_output_dir to overcome." )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.train_file is not None or data_args.validation_file is not None:
_A : List[str] = {}
if data_args.train_file is not None:
_A : Optional[int] = data_args.train_file
if data_args.validation_file is not None:
_A : Tuple = data_args.validation_file
_A : Union[str, Any] = data_args.train_file.split("." )[-1]
_A : List[str] = load_dataset(
UpperCamelCase__ , data_files=UpperCamelCase__ , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
# Downloading and loading the swag dataset from the hub.
_A : Union[str, Any] = load_dataset(
"swag" , "regular" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_A : Optional[Any] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
_A : Optional[Any] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
_A : List[Any] = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=UpperCamelCase__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# When using your own dataset or a different dataset from swag, you will probably need to change this.
_A : str = [f"ending{i}" for i in range(4 )]
_A : Union[str, Any] = "sent1"
_A : str = "sent2"
if data_args.max_seq_length is None:
_A : Any = tokenizer.model_max_length
if max_seq_length > 1024:
logger.warning(
"The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value"
" of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can"
" override this default with `--block_size xxx`." )
_A : Optional[Any] = 1024
else:
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f"The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"
f"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}." )
_A : int = min(data_args.max_seq_length , tokenizer.model_max_length )
# Preprocessing the datasets.
def preprocess_function(UpperCamelCase__ : List[Any] ):
_A : List[Any] = [[context] * 4 for context in examples[context_name]]
_A : Any = examples[question_header_name]
_A : Union[str, Any] = [
[f"{header} {examples[end][i]}" for end in ending_names] for i, header in enumerate(UpperCamelCase__ )
]
# Flatten out
_A : Dict = list(chain(*UpperCamelCase__ ) )
_A : List[Any] = list(chain(*UpperCamelCase__ ) )
# Tokenize
_A : str = tokenizer(
UpperCamelCase__ , UpperCamelCase__ , truncation=UpperCamelCase__ , max_length=UpperCamelCase__ , padding="max_length" if data_args.pad_to_max_length else False , )
# Un-flatten
return {k: [v[i : i + 4] for i in range(0 , len(UpperCamelCase__ ) , 4 )] for k, v in tokenized_examples.items()}
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError("--do_train requires a train dataset" )
_A : Optional[int] = raw_datasets["train"]
if data_args.max_train_samples is not None:
_A : Union[str, Any] = min(len(UpperCamelCase__ ) , data_args.max_train_samples )
_A : Any = train_dataset.select(range(UpperCamelCase__ ) )
with training_args.main_process_first(desc="train dataset map pre-processing" ):
_A : Optional[int] = train_dataset.map(
UpperCamelCase__ , batched=UpperCamelCase__ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
if training_args.do_eval:
if "validation" not in raw_datasets:
raise ValueError("--do_eval requires a validation dataset" )
_A : Optional[int] = raw_datasets["validation"]
if data_args.max_eval_samples is not None:
_A : str = min(len(UpperCamelCase__ ) , data_args.max_eval_samples )
_A : Dict = eval_dataset.select(range(UpperCamelCase__ ) )
with training_args.main_process_first(desc="validation dataset map pre-processing" ):
_A : List[str] = eval_dataset.map(
UpperCamelCase__ , batched=UpperCamelCase__ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
# Data collator
_A : str = (
default_data_collator
if data_args.pad_to_max_length
else DataCollatorForMultipleChoice(tokenizer=UpperCamelCase__ , pad_to_multiple_of=8 if training_args.fpaa else None )
)
# Metric
def compute_metrics(UpperCamelCase__ : Tuple ):
_A , _A : List[str] = eval_predictions
_A : Optional[int] = np.argmax(UpperCamelCase__ , axis=1 )
return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()}
# Initialize our Trainer
_A : List[str] = Trainer(
model=UpperCamelCase__ , args=UpperCamelCase__ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=UpperCamelCase__ , data_collator=UpperCamelCase__ , compute_metrics=UpperCamelCase__ , )
# Training
if training_args.do_train:
_A : Any = None
if training_args.resume_from_checkpoint is not None:
_A : Optional[int] = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
_A : int = last_checkpoint
_A : Any = trainer.train(resume_from_checkpoint=UpperCamelCase__ )
trainer.save_model() # Saves the tokenizer too for easy upload
_A : Optional[int] = train_result.metrics
_A : Tuple = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(UpperCamelCase__ )
)
_A : Tuple = min(UpperCamelCase__ , len(UpperCamelCase__ ) )
trainer.log_metrics("train" , UpperCamelCase__ )
trainer.save_metrics("train" , UpperCamelCase__ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("*** Evaluate ***" )
_A : List[Any] = trainer.evaluate()
_A : int = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(UpperCamelCase__ )
_A : Optional[Any] = min(UpperCamelCase__ , len(UpperCamelCase__ ) )
trainer.log_metrics("eval" , UpperCamelCase__ )
trainer.save_metrics("eval" , UpperCamelCase__ )
_A : Tuple = {
"finetuned_from": model_args.model_name_or_path,
"tasks": "multiple-choice",
"dataset_tags": "swag",
"dataset_args": "regular",
"dataset": "SWAG",
"language": "en",
}
if training_args.push_to_hub:
trainer.push_to_hub(**UpperCamelCase__ )
else:
trainer.create_model_card(**UpperCamelCase__ )
def _UpperCAmelCase (UpperCamelCase__ : Optional[int] ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 11 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__A ={
'configuration_rag': ['RagConfig'],
'retrieval_rag': ['RagRetriever'],
'tokenization_rag': ['RagTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =[
'RagModel',
'RagPreTrainedModel',
'RagSequenceForGeneration',
'RagTokenForGeneration',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =[
'TFRagModel',
'TFRagPreTrainedModel',
'TFRagSequenceForGeneration',
'TFRagTokenForGeneration',
]
if TYPE_CHECKING:
from .configuration_rag import RagConfig
from .retrieval_rag import RagRetriever
from .tokenization_rag import RagTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rag import RagModel, RagPreTrainedModel, RagSequenceForGeneration, RagTokenForGeneration
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rag import (
TFRagModel,
TFRagPreTrainedModel,
TFRagSequenceForGeneration,
TFRagTokenForGeneration,
)
else:
import sys
__A =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 163 |
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("TEST_SAGEMAKER" , "False")) is not True , reason="Skipping test because should only be run when releasing minor transformers version" , )
@pytest.mark.usefixtures("sm_env")
@parameterized_class(
[
{
"framework": "pytorch",
"script": "run_glue.py",
"model_name_or_path": "distilbert-base-cased",
"instance_type": "ml.p3.16xlarge",
"results": {"train_runtime": 650, "eval_accuracy": 0.7, "eval_loss": 0.6},
},
{
"framework": "pytorch",
"script": "run_ddp.py",
"model_name_or_path": "distilbert-base-cased",
"instance_type": "ml.p3.16xlarge",
"results": {"train_runtime": 600, "eval_accuracy": 0.7, "eval_loss": 0.6},
},
{
"framework": "tensorflow",
"script": "run_tf_dist.py",
"model_name_or_path": "distilbert-base-cased",
"instance_type": "ml.p3.16xlarge",
"results": {"train_runtime": 600, "eval_accuracy": 0.6, "eval_loss": 0.7},
},
])
class lowerCAmelCase__ ( unittest.TestCase):
'''simple docstring'''
def _lowerCamelCase ( self) -> str:
if self.framework == "pytorch":
subprocess.run(
F"cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py".split() , encoding="utf-8" , check=__lowerCamelCase , )
assert hasattr(self , "env")
def _lowerCamelCase ( self , __lowerCamelCase) -> Tuple:
_A : Dict = F"{self.env.base_job_name}-{instance_count}-{'ddp' if 'ddp' in self.script else 'smd'}"
# distributed data settings
_A : Optional[Any] = {"smdistributed": {"dataparallel": {"enabled": True}}} if self.script != "run_ddp.py" else None
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=__lowerCamelCase , instance_count=__lowerCamelCase , instance_type=self.instance_type , debugger_hook_config=__lowerCamelCase , hyperparameters={**self.env.distributed_hyperparameters, "model_name_or_path": self.model_name_or_path} , metric_definitions=self.env.metric_definitions , distribution=__lowerCamelCase , py_version="py36" , )
def _lowerCamelCase ( self , __lowerCamelCase) -> Optional[Any]:
TrainingJobAnalytics(__lowerCamelCase).export_csv(F"{self.env.test_path}/{job_name}_metrics.csv")
@parameterized.expand([(2,)])
def _lowerCamelCase ( self , __lowerCamelCase) -> Any:
# create estimator
_A : Union[str, Any] = self.create_estimator(__lowerCamelCase)
# run training
estimator.fit()
# result dataframe
_A : Optional[Any] = TrainingJobAnalytics(estimator.latest_training_job.name).dataframe()
# extract kpis
_A : List[Any] = list(result_metrics_df[result_metrics_df.metric_name == "eval_accuracy"]["value"])
_A : Dict = list(result_metrics_df[result_metrics_df.metric_name == "eval_loss"]["value"])
# get train time from SageMaker job, this includes starting, preprocessing, stopping
_A : Optional[Any] = (
Session().describe_training_job(estimator.latest_training_job.name).get("TrainingTimeInSeconds" , 9_9_9_9_9_9)
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["eval_accuracy"] for t in eval_accuracy)
assert all(t <= self.results["eval_loss"] for t in eval_loss)
# dump tests result into json file to share in PR
with open(F"{estimator.latest_training_job.name}.json" , "w") as outfile:
json.dump({"train_time": train_runtime, "eval_accuracy": eval_accuracy, "eval_loss": eval_loss} , __lowerCamelCase)
| 11 | 0 |
"""simple docstring"""
from __future__ import annotations
import sys
from collections import deque
from typing import Generic, TypeVar
__A : List[str] = TypeVar('''T''')
class _UpperCAmelCase ( Generic[T] ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = 42 # Cache store of keys
SCREAMING_SNAKE_CASE_ : Any = 42 # References of the keys in cache
SCREAMING_SNAKE_CASE_ : List[str] = 10 # Maximum capacity of cache
def __init__( self : Any , A : List[Any] ) -> None:
lowercase_ : List[Any] = deque()
lowercase_ : Union[str, Any] = set()
if not n:
lowercase_ : List[Any] = sys.maxsize
elif n < 0:
raise ValueError('''n should be an integer greater than 0.''' )
else:
lowercase_ : str = n
def A ( self : List[Any] , A : List[Any] ) -> None:
if x not in self.key_reference:
if len(self.dq_store ) == LRUCache._MAX_CAPACITY:
lowercase_ : Optional[Any] = self.dq_store.pop()
self.key_reference.remove(__lowerCamelCase )
else:
self.dq_store.remove(__lowerCamelCase )
self.dq_store.appendleft(__lowerCamelCase )
self.key_reference.add(__lowerCamelCase )
def A ( self : Any ) -> None:
for k in self.dq_store:
print(__lowerCamelCase )
def __repr__( self : Any ) -> str:
return F'''LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store )}'''
if __name__ == "__main__":
import doctest
doctest.testmod()
__A : Union[str, Any] = LRUCache(4)
lru_cache.refer('''A''')
lru_cache.refer(2)
lru_cache.refer(3)
lru_cache.refer('''A''')
lru_cache.refer(4)
lru_cache.refer(5)
lru_cache.display()
print(lru_cache)
assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]"
| 33 |
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class lowerCAmelCase__ ( a):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = ["image_processor", "tokenizer"]
__SCREAMING_SNAKE_CASE = "OwlViTImageProcessor"
__SCREAMING_SNAKE_CASE = ("CLIPTokenizer", "CLIPTokenizerFast")
def __init__( self , __lowerCamelCase=None , __lowerCamelCase=None , **__lowerCamelCase) -> Union[str, Any]:
_A : int = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , __lowerCamelCase , )
_A : List[Any] = kwargs.pop("feature_extractor")
_A : Dict = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`.")
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`.")
super().__init__(__lowerCamelCase , __lowerCamelCase)
def __call__( self , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase="max_length" , __lowerCamelCase="np" , **__lowerCamelCase) -> Any:
if text is None and query_images is None and images is None:
raise ValueError(
"You have to specify at least one text or query image or image. All three cannot be none.")
if text is not None:
if isinstance(__lowerCamelCase , __lowerCamelCase) or (isinstance(__lowerCamelCase , __lowerCamelCase) and not isinstance(text[0] , __lowerCamelCase)):
_A : Union[str, Any] = [self.tokenizer(__lowerCamelCase , padding=__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase)]
elif isinstance(__lowerCamelCase , __lowerCamelCase) and isinstance(text[0] , __lowerCamelCase):
_A : Optional[Any] = []
# Maximum number of queries across batch
_A : str = max([len(__lowerCamelCase) for t in text])
# Pad all batch samples to max number of text queries
for t in text:
if len(__lowerCamelCase) != max_num_queries:
_A : Optional[int] = t + [" "] * (max_num_queries - len(__lowerCamelCase))
_A : List[Any] = self.tokenizer(__lowerCamelCase , padding=__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase)
encodings.append(__lowerCamelCase)
else:
raise TypeError("Input text should be a string, a list of strings or a nested list of strings")
if return_tensors == "np":
_A : Tuple = np.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0)
_A : Optional[Any] = np.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0)
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
_A : Optional[int] = jnp.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0)
_A : Optional[int] = jnp.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0)
elif return_tensors == "pt" and is_torch_available():
import torch
_A : Optional[Any] = torch.cat([encoding["input_ids"] for encoding in encodings] , dim=0)
_A : Union[str, Any] = torch.cat([encoding["attention_mask"] for encoding in encodings] , dim=0)
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
_A : Any = tf.stack([encoding["input_ids"] for encoding in encodings] , axis=0)
_A : Tuple = tf.stack([encoding["attention_mask"] for encoding in encodings] , axis=0)
else:
raise ValueError("Target return tensor type could not be returned")
_A : Optional[Any] = BatchEncoding()
_A : Tuple = input_ids
_A : Dict = attention_mask
if query_images is not None:
_A : Optional[Any] = BatchEncoding()
_A : List[str] = self.image_processor(
__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase).pixel_values
_A : Union[str, Any] = query_pixel_values
if images is not None:
_A : int = self.image_processor(__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase)
if text is not None and images is not None:
_A : Tuple = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
_A : int = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**__lowerCamelCase) , tensor_type=__lowerCamelCase)
def _lowerCamelCase ( self , *__lowerCamelCase , **__lowerCamelCase) -> str:
return self.image_processor.post_process(*__lowerCamelCase , **__lowerCamelCase)
def _lowerCamelCase ( self , *__lowerCamelCase , **__lowerCamelCase) -> List[str]:
return self.image_processor.post_process_object_detection(*__lowerCamelCase , **__lowerCamelCase)
def _lowerCamelCase ( self , *__lowerCamelCase , **__lowerCamelCase) -> Optional[int]:
return self.image_processor.post_process_image_guided_detection(*__lowerCamelCase , **__lowerCamelCase)
def _lowerCamelCase ( self , *__lowerCamelCase , **__lowerCamelCase) -> int:
return self.tokenizer.batch_decode(*__lowerCamelCase , **__lowerCamelCase)
def _lowerCamelCase ( self , *__lowerCamelCase , **__lowerCamelCase) -> Optional[int]:
return self.tokenizer.decode(*__lowerCamelCase , **__lowerCamelCase)
@property
def _lowerCamelCase ( self) -> int:
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , __lowerCamelCase , )
return self.image_processor_class
@property
def _lowerCamelCase ( self) -> List[str]:
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , __lowerCamelCase , )
return self.image_processor
| 11 | 0 |
"""simple docstring"""
from math import factorial
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
if successes > trials:
raise ValueError("successes must be lower or equal to trials" )
if trials < 0 or successes < 0:
raise ValueError("the function is defined for non-negative integers" )
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ) or not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
raise ValueError("the function is defined for non-negative integers" )
if not 0 < prob < 1:
raise ValueError("prob has to be in range of 1 - 0" )
__lowerCAmelCase = (prob**successes) * ((1 - prob) ** (trials - successes))
# Calculate the binomial coefficient: n! / k!(n-k)!
__lowerCAmelCase = float(factorial(UpperCamelCase__ ) )
coefficient /= factorial(UpperCamelCase__ ) * factorial(trials - successes )
return probability * coefficient
if __name__ == "__main__":
from doctest import testmod
testmod()
print("Probability of 2 successes out of 4 trails")
print("with probability of 0.75 is:", end=" ")
print(binomial_distribution(2, 4, 0.75))
| 57 |
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_torch_available
from transformers.testing_utils import require_torch, torch_device
if is_torch_available():
from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments
@require_torch
class lowerCAmelCase__ ( unittest.TestCase):
'''simple docstring'''
def _lowerCamelCase ( self , __lowerCamelCase) -> Dict:
for model_result in results.values():
for batch_size, sequence_length in zip(model_result["bs"] , model_result["ss"]):
_A : Optional[int] = model_result["result"][batch_size][sequence_length]
self.assertIsNotNone(__lowerCamelCase)
def _lowerCamelCase ( self) -> int:
_A : Optional[int] = "sshleifer/tiny-gpt2"
_A : int = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , )
_A : List[str] = PyTorchBenchmark(__lowerCamelCase)
_A : Optional[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def _lowerCamelCase ( self) -> Dict:
_A : int = "sgugger/tiny-distilbert-classification"
_A : str = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , only_pretrain_model=__lowerCamelCase , )
_A : Dict = PyTorchBenchmark(__lowerCamelCase)
_A : Union[str, Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def _lowerCamelCase ( self) -> Optional[Any]:
_A : Tuple = "sshleifer/tiny-gpt2"
_A : Optional[Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , torchscript=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , )
_A : Union[str, Any] = PyTorchBenchmark(__lowerCamelCase)
_A : List[str] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
@unittest.skipIf(torch_device == "cpu" , "Cant do half precision")
def _lowerCamelCase ( self) -> int:
_A : Any = "sshleifer/tiny-gpt2"
_A : Tuple = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , fpaa=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , )
_A : Any = PyTorchBenchmark(__lowerCamelCase)
_A : Union[str, Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def _lowerCamelCase ( self) -> Any:
_A : Union[str, Any] = "sshleifer/tiny-gpt2"
_A : Any = AutoConfig.from_pretrained(__lowerCamelCase)
# set architectures equal to `None`
_A : Dict = None
_A : Any = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , )
_A : Union[str, Any] = PyTorchBenchmark(__lowerCamelCase , configs=[config])
_A : int = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def _lowerCamelCase ( self) -> int:
_A : List[Any] = "sshleifer/tiny-gpt2"
_A : int = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , )
_A : Optional[Any] = PyTorchBenchmark(__lowerCamelCase)
_A : int = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result)
self.check_results_dict_not_empty(results.memory_train_result)
@unittest.skipIf(torch_device == "cpu" , "Can't do half precision")
def _lowerCamelCase ( self) -> Optional[Any]:
_A : Any = "sshleifer/tiny-gpt2"
_A : Tuple = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , fpaa=__lowerCamelCase , multi_process=__lowerCamelCase , )
_A : List[Any] = PyTorchBenchmark(__lowerCamelCase)
_A : Tuple = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result)
self.check_results_dict_not_empty(results.memory_train_result)
def _lowerCamelCase ( self) -> str:
_A : List[str] = "sshleifer/tiny-gpt2"
_A : Union[str, Any] = AutoConfig.from_pretrained(__lowerCamelCase)
_A : Any = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , )
_A : Optional[Any] = PyTorchBenchmark(__lowerCamelCase , configs=[config])
_A : Tuple = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def _lowerCamelCase ( self) -> int:
_A : Tuple = "sshleifer/tinier_bart"
_A : Optional[Any] = AutoConfig.from_pretrained(__lowerCamelCase)
_A : Optional[int] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , )
_A : Dict = PyTorchBenchmark(__lowerCamelCase , configs=[config])
_A : Optional[int] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def _lowerCamelCase ( self) -> str:
_A : List[Any] = "sshleifer/tiny-gpt2"
_A : Optional[Any] = AutoConfig.from_pretrained(__lowerCamelCase)
_A : Optional[Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , )
_A : List[str] = PyTorchBenchmark(__lowerCamelCase , configs=[config])
_A : Dict = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result)
self.check_results_dict_not_empty(results.memory_train_result)
def _lowerCamelCase ( self) -> int:
_A : int = "sshleifer/tinier_bart"
_A : str = AutoConfig.from_pretrained(__lowerCamelCase)
_A : Tuple = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , )
_A : Tuple = PyTorchBenchmark(__lowerCamelCase , configs=[config])
_A : Union[str, Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result)
self.check_results_dict_not_empty(results.memory_train_result)
def _lowerCamelCase ( self) -> Dict:
_A : List[str] = "sshleifer/tiny-gpt2"
with tempfile.TemporaryDirectory() as tmp_dir:
_A : Optional[Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , save_to_csv=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(__lowerCamelCase , "inf_time.csv") , train_memory_csv_file=os.path.join(__lowerCamelCase , "train_mem.csv") , inference_memory_csv_file=os.path.join(__lowerCamelCase , "inf_mem.csv") , train_time_csv_file=os.path.join(__lowerCamelCase , "train_time.csv") , env_info_csv_file=os.path.join(__lowerCamelCase , "env.csv") , multi_process=__lowerCamelCase , )
_A : Tuple = PyTorchBenchmark(__lowerCamelCase)
benchmark.run()
self.assertTrue(Path(os.path.join(__lowerCamelCase , "inf_time.csv")).exists())
self.assertTrue(Path(os.path.join(__lowerCamelCase , "train_time.csv")).exists())
self.assertTrue(Path(os.path.join(__lowerCamelCase , "inf_mem.csv")).exists())
self.assertTrue(Path(os.path.join(__lowerCamelCase , "train_mem.csv")).exists())
self.assertTrue(Path(os.path.join(__lowerCamelCase , "env.csv")).exists())
def _lowerCamelCase ( self) -> int:
_A : Dict = "sshleifer/tiny-gpt2"
def _check_summary_is_not_empty(__lowerCamelCase):
self.assertTrue(hasattr(__lowerCamelCase , "sequential"))
self.assertTrue(hasattr(__lowerCamelCase , "cumulative"))
self.assertTrue(hasattr(__lowerCamelCase , "current"))
self.assertTrue(hasattr(__lowerCamelCase , "total"))
with tempfile.TemporaryDirectory() as tmp_dir:
_A : Union[str, Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(__lowerCamelCase , "log.txt") , log_print=__lowerCamelCase , trace_memory_line_by_line=__lowerCamelCase , multi_process=__lowerCamelCase , )
_A : Optional[int] = PyTorchBenchmark(__lowerCamelCase)
_A : Dict = benchmark.run()
_check_summary_is_not_empty(result.inference_summary)
_check_summary_is_not_empty(result.train_summary)
self.assertTrue(Path(os.path.join(__lowerCamelCase , "log.txt")).exists())
| 11 | 0 |
"""simple docstring"""
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ : str = 0
SCREAMING_SNAKE_CASE__ : Tuple = len(UpperCamelCase__ ) - 1
while left <= right:
# avoid divided by 0 during interpolation
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
SCREAMING_SNAKE_CASE__ : Dict = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(UpperCamelCase__ ):
return None
SCREAMING_SNAKE_CASE__ : Union[str, Any] = sorted_collection[point]
if current_item == item:
return point
else:
if point < left:
SCREAMING_SNAKE_CASE__ : List[str] = left
SCREAMING_SNAKE_CASE__ : Union[str, Any] = point
elif point > right:
SCREAMING_SNAKE_CASE__ : Any = right
SCREAMING_SNAKE_CASE__ : Any = point
else:
if item < current_item:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = point - 1
else:
SCREAMING_SNAKE_CASE__ : List[str] = point + 1
return None
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Dict:
# avoid divided by 0 during interpolation
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
SCREAMING_SNAKE_CASE__ : List[Any] = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(UpperCamelCase__ ):
return None
if sorted_collection[point] == item:
return point
elif point < left:
return interpolation_search_by_recursion(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
elif point > right:
return interpolation_search_by_recursion(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
else:
if sorted_collection[point] > item:
return interpolation_search_by_recursion(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , point - 1 )
else:
return interpolation_search_by_recursion(
UpperCamelCase__ , UpperCamelCase__ , point + 1 , UpperCamelCase__ )
def _lowercase ( __lowerCAmelCase ) -> str:
if collection != sorted(UpperCamelCase__ ):
raise ValueError("""Collection must be ascending sorted""" )
return True
if __name__ == "__main__":
import sys
a :Any = 0
if debug == 1:
a :Optional[Any] = [10, 30, 40, 45, 50, 66, 77, 93]
try:
__assert_sorted(collection)
except ValueError:
sys.exit("Sequence must be ascending sorted to apply interpolation search")
a :List[str] = 67
a :Tuple = interpolation_search(collection, target)
if result is not None:
print(f'{target} found at positions: {result}')
else:
print("Not found")
| 132 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_nllb import NllbTokenizer
else:
lowerCAmelCase__ = None
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'}
lowerCAmelCase__ = {
'vocab_file': {
'facebook/nllb-200-distilled-600M': (
'https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model'
),
},
'tokenizer_file': {
'facebook/nllb-200-distilled-600M': (
'https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json'
),
},
}
lowerCAmelCase__ = {
'facebook/nllb-large-en-ro': 10_24,
'facebook/nllb-200-distilled-600M': 10_24,
}
# fmt: off
lowerCAmelCase__ = ['ace_Arab', 'ace_Latn', 'acm_Arab', 'acq_Arab', 'aeb_Arab', 'afr_Latn', 'ajp_Arab', 'aka_Latn', 'amh_Ethi', 'apc_Arab', 'arb_Arab', 'ars_Arab', 'ary_Arab', 'arz_Arab', 'asm_Beng', 'ast_Latn', 'awa_Deva', 'ayr_Latn', 'azb_Arab', 'azj_Latn', 'bak_Cyrl', 'bam_Latn', 'ban_Latn', 'bel_Cyrl', 'bem_Latn', 'ben_Beng', 'bho_Deva', 'bjn_Arab', 'bjn_Latn', 'bod_Tibt', 'bos_Latn', 'bug_Latn', 'bul_Cyrl', 'cat_Latn', 'ceb_Latn', 'ces_Latn', 'cjk_Latn', 'ckb_Arab', 'crh_Latn', 'cym_Latn', 'dan_Latn', 'deu_Latn', 'dik_Latn', 'dyu_Latn', 'dzo_Tibt', 'ell_Grek', 'eng_Latn', 'epo_Latn', 'est_Latn', 'eus_Latn', 'ewe_Latn', 'fao_Latn', 'pes_Arab', 'fij_Latn', 'fin_Latn', 'fon_Latn', 'fra_Latn', 'fur_Latn', 'fuv_Latn', 'gla_Latn', 'gle_Latn', 'glg_Latn', 'grn_Latn', 'guj_Gujr', 'hat_Latn', 'hau_Latn', 'heb_Hebr', 'hin_Deva', 'hne_Deva', 'hrv_Latn', 'hun_Latn', 'hye_Armn', 'ibo_Latn', 'ilo_Latn', 'ind_Latn', 'isl_Latn', 'ita_Latn', 'jav_Latn', 'jpn_Jpan', 'kab_Latn', 'kac_Latn', 'kam_Latn', 'kan_Knda', 'kas_Arab', 'kas_Deva', 'kat_Geor', 'knc_Arab', 'knc_Latn', 'kaz_Cyrl', 'kbp_Latn', 'kea_Latn', 'khm_Khmr', 'kik_Latn', 'kin_Latn', 'kir_Cyrl', 'kmb_Latn', 'kon_Latn', 'kor_Hang', 'kmr_Latn', 'lao_Laoo', 'lvs_Latn', 'lij_Latn', 'lim_Latn', 'lin_Latn', 'lit_Latn', 'lmo_Latn', 'ltg_Latn', 'ltz_Latn', 'lua_Latn', 'lug_Latn', 'luo_Latn', 'lus_Latn', 'mag_Deva', 'mai_Deva', 'mal_Mlym', 'mar_Deva', 'min_Latn', 'mkd_Cyrl', 'plt_Latn', 'mlt_Latn', 'mni_Beng', 'khk_Cyrl', 'mos_Latn', 'mri_Latn', 'zsm_Latn', 'mya_Mymr', 'nld_Latn', 'nno_Latn', 'nob_Latn', 'npi_Deva', 'nso_Latn', 'nus_Latn', 'nya_Latn', 'oci_Latn', 'gaz_Latn', 'ory_Orya', 'pag_Latn', 'pan_Guru', 'pap_Latn', 'pol_Latn', 'por_Latn', 'prs_Arab', 'pbt_Arab', 'quy_Latn', 'ron_Latn', 'run_Latn', 'rus_Cyrl', 'sag_Latn', 'san_Deva', 'sat_Beng', 'scn_Latn', 'shn_Mymr', 'sin_Sinh', 'slk_Latn', 'slv_Latn', 'smo_Latn', 'sna_Latn', 'snd_Arab', 'som_Latn', 'sot_Latn', 'spa_Latn', 'als_Latn', 'srd_Latn', 'srp_Cyrl', 'ssw_Latn', 'sun_Latn', 'swe_Latn', 'swh_Latn', 'szl_Latn', 'tam_Taml', 'tat_Cyrl', 'tel_Telu', 'tgk_Cyrl', 'tgl_Latn', 'tha_Thai', 'tir_Ethi', 'taq_Latn', 'taq_Tfng', 'tpi_Latn', 'tsn_Latn', 'tso_Latn', 'tuk_Latn', 'tum_Latn', 'tur_Latn', 'twi_Latn', 'tzm_Tfng', 'uig_Arab', 'ukr_Cyrl', 'umb_Latn', 'urd_Arab', 'uzn_Latn', 'vec_Latn', 'vie_Latn', 'war_Latn', 'wol_Latn', 'xho_Latn', 'ydd_Hebr', 'yor_Latn', 'yue_Hant', 'zho_Hans', 'zho_Hant', 'zul_Latn']
class lowerCAmelCase__ ( a):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE = ["input_ids", "attention_mask"]
__SCREAMING_SNAKE_CASE = NllbTokenizer
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = []
def __init__( self , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase="<s>" , __lowerCamelCase="</s>" , __lowerCamelCase="</s>" , __lowerCamelCase="<s>" , __lowerCamelCase="<unk>" , __lowerCamelCase="<pad>" , __lowerCamelCase="<mask>" , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=False , **__lowerCamelCase , ) -> Tuple:
# Mask token behave like a normal word, i.e. include the space before it
_A : Any = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase) if isinstance(__lowerCamelCase , __lowerCamelCase) else mask_token
_A : Optional[int] = legacy_behaviour
super().__init__(
vocab_file=__lowerCamelCase , tokenizer_file=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , sep_token=__lowerCamelCase , cls_token=__lowerCamelCase , unk_token=__lowerCamelCase , pad_token=__lowerCamelCase , mask_token=__lowerCamelCase , src_lang=__lowerCamelCase , tgt_lang=__lowerCamelCase , additional_special_tokens=__lowerCamelCase , legacy_behaviour=__lowerCamelCase , **__lowerCamelCase , )
_A : int = vocab_file
_A : Optional[Any] = False if not self.vocab_file else True
_A : Tuple = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens])
self.add_special_tokens({"additional_special_tokens": _additional_special_tokens})
_A : Union[str, Any] = {
lang_code: self.convert_tokens_to_ids(__lowerCamelCase) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
_A : Optional[int] = src_lang if src_lang is not None else "eng_Latn"
_A : Union[str, Any] = self.convert_tokens_to_ids(self._src_lang)
_A : List[str] = tgt_lang
self.set_src_lang_special_tokens(self._src_lang)
@property
def _lowerCamelCase ( self) -> str:
return self._src_lang
@src_lang.setter
def _lowerCamelCase ( self , __lowerCamelCase) -> None:
_A : Tuple = new_src_lang
self.set_src_lang_special_tokens(self._src_lang)
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = None) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = None) -> List[int]:
_A : Tuple = [self.sep_token_id]
_A : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase) -> Optional[int]:
if src_lang is None or tgt_lang is None:
raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model")
_A : List[Any] = src_lang
_A : Optional[int] = self(__lowerCamelCase , add_special_tokens=__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase)
_A : Tuple = self.convert_tokens_to_ids(__lowerCamelCase)
_A : Tuple = tgt_lang_id
return inputs
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = "eng_Latn" , __lowerCamelCase = None , __lowerCamelCase = "fra_Latn" , **__lowerCamelCase , ) -> BatchEncoding:
_A : Tuple = src_lang
_A : int = tgt_lang
return super().prepare_seqaseq_batch(__lowerCamelCase , __lowerCamelCase , **__lowerCamelCase)
def _lowerCamelCase ( self) -> str:
return self.set_src_lang_special_tokens(self.src_lang)
def _lowerCamelCase ( self) -> List[str]:
return self.set_tgt_lang_special_tokens(self.tgt_lang)
def _lowerCamelCase ( self , __lowerCamelCase) -> None:
_A : Dict = self.convert_tokens_to_ids(__lowerCamelCase)
if self.legacy_behaviour:
_A : List[str] = []
_A : Dict = [self.eos_token_id, self.cur_lang_code]
else:
_A : Tuple = [self.cur_lang_code]
_A : Optional[Any] = [self.eos_token_id]
_A : Optional[int] = self.convert_ids_to_tokens(self.prefix_tokens)
_A : int = self.convert_ids_to_tokens(self.suffix_tokens)
_A : List[Any] = processors.TemplateProcessing(
single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens)) , )
def _lowerCamelCase ( self , __lowerCamelCase) -> None:
_A : Optional[Any] = self.convert_tokens_to_ids(__lowerCamelCase)
if self.legacy_behaviour:
_A : Tuple = []
_A : Any = [self.eos_token_id, self.cur_lang_code]
else:
_A : Union[str, Any] = [self.cur_lang_code]
_A : str = [self.eos_token_id]
_A : Optional[Any] = self.convert_ids_to_tokens(self.prefix_tokens)
_A : Dict = self.convert_ids_to_tokens(self.suffix_tokens)
_A : Union[str, Any] = processors.TemplateProcessing(
single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens)) , )
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = None) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer.")
if not os.path.isdir(__lowerCamelCase):
logger.error(F"Vocabulary path ({save_directory}) should be a directory.")
return
_A : Dict = os.path.join(
__lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
if os.path.abspath(self.vocab_file) != os.path.abspath(__lowerCamelCase):
copyfile(self.vocab_file , __lowerCamelCase)
return (out_vocab_file,)
| 11 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase : List[Any] = logging.get_logger(__name__)
__lowerCAmelCase : Dict = {
"funnel-transformer/small": "https://huggingface.co/funnel-transformer/small/resolve/main/config.json",
"funnel-transformer/small-base": "https://huggingface.co/funnel-transformer/small-base/resolve/main/config.json",
"funnel-transformer/medium": "https://huggingface.co/funnel-transformer/medium/resolve/main/config.json",
"funnel-transformer/medium-base": "https://huggingface.co/funnel-transformer/medium-base/resolve/main/config.json",
"funnel-transformer/intermediate": (
"https://huggingface.co/funnel-transformer/intermediate/resolve/main/config.json"
),
"funnel-transformer/intermediate-base": (
"https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/config.json"
),
"funnel-transformer/large": "https://huggingface.co/funnel-transformer/large/resolve/main/config.json",
"funnel-transformer/large-base": "https://huggingface.co/funnel-transformer/large-base/resolve/main/config.json",
"funnel-transformer/xlarge": "https://huggingface.co/funnel-transformer/xlarge/resolve/main/config.json",
"funnel-transformer/xlarge-base": "https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/config.json",
}
class __lowerCAmelCase ( lowerCAmelCase_ ):
"""simple docstring"""
A__ : str = '''funnel'''
A__ : int = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''n_head''',
}
def __init__( self : str , _snake_case : Union[str, Any]=3_0522 , _snake_case : List[str]=[4, 4, 4] , _snake_case : int=None , _snake_case : int=2 , _snake_case : Tuple=768 , _snake_case : Any=12 , _snake_case : Tuple=64 , _snake_case : Union[str, Any]=3072 , _snake_case : Any="gelu_new" , _snake_case : Optional[Any]=0.1 , _snake_case : Any=0.1 , _snake_case : Optional[Any]=0.0 , _snake_case : Tuple=0.1 , _snake_case : Optional[Any]=None , _snake_case : Any=1E-9 , _snake_case : List[Any]="mean" , _snake_case : Optional[int]="relative_shift" , _snake_case : List[Any]=True , _snake_case : List[str]=True , _snake_case : Any=True , **_snake_case : Optional[int] , ):
__lowercase : int = vocab_size
__lowercase : Optional[Any] = block_sizes
__lowercase : Tuple = [1] * len(__lowerCamelCase ) if block_repeats is None else block_repeats
assert len(__lowerCamelCase ) == len(
self.block_repeats ), "`block_sizes` and `block_repeats` should have the same length."
__lowercase : Optional[Any] = num_decoder_layers
__lowercase : List[str] = d_model
__lowercase : Dict = n_head
__lowercase : Optional[int] = d_head
__lowercase : List[Any] = d_inner
__lowercase : Any = hidden_act
__lowercase : List[Any] = hidden_dropout
__lowercase : Dict = attention_dropout
__lowercase : List[Any] = activation_dropout
__lowercase : List[Any] = initializer_range
__lowercase : str = initializer_std
__lowercase : Any = layer_norm_eps
assert pooling_type in [
"mean",
"max",
], F'Got {pooling_type} for `pooling_type` but only \'mean\' and \'max\' are supported.'
__lowercase : Union[str, Any] = pooling_type
assert attention_type in [
"relative_shift",
"factorized",
], F'Got {attention_type} for `attention_type` but only \'relative_shift\' and \'factorized\' are supported.'
__lowercase : List[Any] = attention_type
__lowercase : Tuple = separate_cls
__lowercase : Tuple = truncate_seq
__lowercase : int = pool_q_only
super().__init__(**__lowerCamelCase )
@property
def snake_case_ ( self : List[Any] ):
return sum(self.block_sizes )
@num_hidden_layers.setter
def snake_case_ ( self : Optional[int] , _snake_case : Union[str, Any] ):
raise NotImplementedError(
'''This model does not support the setting of `num_hidden_layers`. Please set `block_sizes`.''' )
@property
def snake_case_ ( self : str ):
return len(self.block_sizes )
@num_blocks.setter
def snake_case_ ( self : Optional[Any] , _snake_case : Tuple ):
raise NotImplementedError('''This model does not support the setting of `num_blocks`. Please set `block_sizes`.''' )
| 156 |
# flake8: noqa
# Lint as: python3
from typing import Dict, List, Optional, Type
from .. import config
from ..utils import logging
from .formatting import (
ArrowFormatter,
CustomFormatter,
Formatter,
PandasFormatter,
PythonFormatter,
TensorFormatter,
format_table,
query_table,
)
from .np_formatter import NumpyFormatter
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {}
lowerCAmelCase__ = {}
lowerCAmelCase__ = {}
def _UpperCAmelCase (UpperCamelCase__ : type , UpperCamelCase__ : Optional[str] , UpperCamelCase__ : Optional[List[str]] = None , ):
_A : Union[str, Any] = aliases if aliases is not None else []
if format_type in _FORMAT_TYPES:
logger.warning(
f"Overwriting format type '{format_type}' ({_FORMAT_TYPES[format_type].__name__} -> {formatter_cls.__name__})" )
_A : Dict = formatter_cls
for alias in set(aliases + [format_type] ):
if alias in _FORMAT_TYPES_ALIASES:
logger.warning(
f"Overwriting format type alias '{alias}' ({_FORMAT_TYPES_ALIASES[alias]} -> {format_type})" )
_A : Dict = format_type
def _UpperCAmelCase (UpperCamelCase__ : Exception , UpperCamelCase__ : Optional[str] , UpperCamelCase__ : Optional[List[str]] = None ):
_A : Union[str, Any] = aliases if aliases is not None else []
for alias in set(aliases + [format_type] ):
_A : Union[str, Any] = unavailable_error
# Here we define all the available formatting functions that can be used by `Dataset.set_format`
_register_formatter(PythonFormatter, None, aliases=['python'])
_register_formatter(ArrowFormatter, 'arrow', aliases=['pa', 'pyarrow'])
_register_formatter(NumpyFormatter, 'numpy', aliases=['np'])
_register_formatter(PandasFormatter, 'pandas', aliases=['pd'])
_register_formatter(CustomFormatter, 'custom')
if config.TORCH_AVAILABLE:
from .torch_formatter import TorchFormatter
_register_formatter(TorchFormatter, 'torch', aliases=['pt', 'pytorch'])
else:
lowerCAmelCase__ = ValueError('PyTorch needs to be installed to be able to return PyTorch tensors.')
_register_unavailable_formatter(_torch_error, 'torch', aliases=['pt', 'pytorch'])
if config.TF_AVAILABLE:
from .tf_formatter import TFFormatter
_register_formatter(TFFormatter, 'tensorflow', aliases=['tf'])
else:
lowerCAmelCase__ = ValueError('Tensorflow needs to be installed to be able to return Tensorflow tensors.')
_register_unavailable_formatter(_tf_error, 'tensorflow', aliases=['tf'])
if config.JAX_AVAILABLE:
from .jax_formatter import JaxFormatter
_register_formatter(JaxFormatter, 'jax', aliases=[])
else:
lowerCAmelCase__ = ValueError('JAX needs to be installed to be able to return JAX arrays.')
_register_unavailable_formatter(_jax_error, 'jax', aliases=[])
def _UpperCAmelCase (UpperCamelCase__ : Optional[str] ):
if format_type in _FORMAT_TYPES_ALIASES:
return _FORMAT_TYPES_ALIASES[format_type]
else:
return format_type
def _UpperCAmelCase (UpperCamelCase__ : Optional[str] , **UpperCamelCase__ : List[Any] ):
_A : List[str] = get_format_type_from_alias(UpperCamelCase__ )
if format_type in _FORMAT_TYPES:
return _FORMAT_TYPES[format_type](**UpperCamelCase__ )
if format_type in _FORMAT_TYPES_ALIASES_UNAVAILABLE:
raise _FORMAT_TYPES_ALIASES_UNAVAILABLE[format_type]
else:
raise ValueError(
f"Return type should be None or selected in {list(type for type in _FORMAT_TYPES.keys() if type != None )}, but got '{format_type}'" )
| 11 | 0 |
"""simple docstring"""
import torch
from torch import nn
from transformers import CLIPPreTrainedModel, CLIPVisionModel
from ...models.attention import BasicTransformerBlock
from ...utils import logging
__A = logging.get_logger(__name__) # pylint: disable=invalid-name
class UpperCAmelCase (_UpperCAmelCase ):
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=768 ):
super().__init__(__lowerCamelCase )
lowercase__: Union[str, Any] = proj_size
lowercase__: List[Any] = CLIPVisionModel(__lowerCamelCase )
lowercase__: Dict = PaintByExampleMapper(__lowerCamelCase )
lowercase__: Optional[Any] = nn.LayerNorm(config.hidden_size )
lowercase__: int = nn.Linear(config.hidden_size , self.proj_size )
# uncondition for scaling
lowercase__: Union[str, Any] = nn.Parameter(torch.randn((1, 1, self.proj_size) ) )
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase=False ):
lowercase__: Optional[int] = self.model(pixel_values=__lowerCamelCase )
lowercase__: Optional[int] = clip_output.pooler_output
lowercase__: Dict = self.mapper(latent_states[:, None] )
lowercase__: List[Any] = self.final_layer_norm(__lowerCamelCase )
lowercase__: Dict = self.proj_out(__lowerCamelCase )
if return_uncond_vector:
return latent_states, self.uncond_vector
return latent_states
class UpperCAmelCase (nn.Module ):
"""simple docstring"""
def __init__( self , _UpperCAmelCase ):
super().__init__()
lowercase__: Optional[int] = (config.num_hidden_layers + 1) // 5
lowercase__: Tuple = config.hidden_size
lowercase__: int = 1
lowercase__: int = nn.ModuleList(
[
BasicTransformerBlock(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , activation_fn='''gelu''' , attention_bias=__lowerCamelCase )
for _ in range(__lowerCamelCase )
] )
def _snake_case ( self , _UpperCAmelCase ):
for block in self.blocks:
lowercase__: Union[str, Any] = block(__lowerCamelCase )
return hidden_states
| 177 |
def _UpperCAmelCase (UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Union[str, Any] ):
# "extended trapezoidal rule"
# int(f) = dx/2 * (f1 + 2f2 + ... + fn)
_A : int = (boundary[1] - boundary[0]) / steps
_A : Any = boundary[0]
_A : List[Any] = boundary[1]
_A : str = make_points(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
_A : str = 0.0
y += (h / 2.0) * f(UpperCamelCase__ )
for i in x_i:
# print(i)
y += h * f(UpperCamelCase__ )
y += (h / 2.0) * f(UpperCamelCase__ )
return y
def _UpperCAmelCase (UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Any ):
_A : Optional[int] = a + h
while x < (b - h):
yield x
_A : Dict = x + h
def _UpperCAmelCase (UpperCamelCase__ : Optional[int] ): # enter your function here
_A : Any = (x - 0) * (x - 0)
return y
def _UpperCAmelCase ():
_A : Optional[Any] = 0.0 # Lower bound of integration
_A : Optional[int] = 1.0 # Upper bound of integration
_A : List[Any] = 10.0 # define number of steps or resolution
_A : Any = [a, b] # define boundary of integration
_A : Tuple = method_a(UpperCamelCase__ , UpperCamelCase__ )
print(f"y = {y}" )
if __name__ == "__main__":
main()
| 11 | 0 |
from string import ascii_uppercase
SCREAMING_SNAKE_CASE :str = {char: i for i, char in enumerate(ascii_uppercase)}
SCREAMING_SNAKE_CASE :str = dict(enumerate(ascii_uppercase))
def UpperCAmelCase ( a_ , a_ ) -> Tuple:
"""simple docstring"""
__A = len(UpperCamelCase__ )
__A = 0
while True:
if x == i:
__A = 0
if len(UpperCamelCase__ ) == len(UpperCamelCase__ ):
break
key += key[i]
i += 1
return key
def UpperCAmelCase ( a_ , a_ ) -> Any:
"""simple docstring"""
__A = ""
__A = 0
for letter in message:
if letter == " ":
cipher_text += " "
else:
__A = (dicta[letter] - dicta[key_new[i]]) % 2_6
i += 1
cipher_text += dicta[x]
return cipher_text
def UpperCAmelCase ( a_ , a_ ) -> str:
"""simple docstring"""
__A = ""
__A = 0
for letter in cipher_text:
if letter == " ":
or_txt += " "
else:
__A = (dicta[letter] + dicta[key_new[i]] + 2_6) % 2_6
i += 1
or_txt += dicta[x]
return or_txt
def UpperCAmelCase ( ) -> Any:
"""simple docstring"""
__A = "THE GERMAN ATTACK"
__A = "SECRET"
__A = generate_key(UpperCamelCase__ , UpperCamelCase__ )
__A = cipher_text(UpperCamelCase__ , UpperCamelCase__ )
print(F'''Encrypted Text = {s}''' )
print(F'''Original Text = {original_text(UpperCamelCase__ , UpperCamelCase__ )}''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 15 |
import copy
import tempfile
import unittest
from huggingface_hub import HfFolder, delete_repo
from parameterized import parameterized
from requests.exceptions import HTTPError
from transformers import AutoConfig, GenerationConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
class lowerCAmelCase__ ( unittest.TestCase):
'''simple docstring'''
@parameterized.expand([(None,), ("foo.json",)])
def _lowerCamelCase ( self , __lowerCamelCase) -> List[str]:
_A : str = GenerationConfig(
do_sample=__lowerCamelCase , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(__lowerCamelCase , config_name=__lowerCamelCase)
_A : Tuple = GenerationConfig.from_pretrained(__lowerCamelCase , config_name=__lowerCamelCase)
# Checks parameters that were specified
self.assertEqual(loaded_config.do_sample , __lowerCamelCase)
self.assertEqual(loaded_config.temperature , 0.7)
self.assertEqual(loaded_config.length_penalty , 1.0)
self.assertEqual(loaded_config.bad_words_ids , [[1, 2, 3], [4, 5]])
# Checks parameters that were not specified (defaults)
self.assertEqual(loaded_config.top_k , 5_0)
self.assertEqual(loaded_config.max_length , 2_0)
self.assertEqual(loaded_config.max_time , __lowerCamelCase)
def _lowerCamelCase ( self) -> Optional[int]:
_A : Optional[int] = AutoConfig.from_pretrained("gpt2")
_A : int = GenerationConfig.from_model_config(__lowerCamelCase)
_A : List[Any] = GenerationConfig()
# The generation config has loaded a few non-default parameters from the model config
self.assertNotEqual(__lowerCamelCase , __lowerCamelCase)
# One of those parameters is eos_token_id -- check if it matches
self.assertNotEqual(generation_config_from_model.eos_token_id , default_generation_config.eos_token_id)
self.assertEqual(generation_config_from_model.eos_token_id , model_config.eos_token_id)
def _lowerCamelCase ( self) -> Optional[Any]:
_A : Optional[Any] = GenerationConfig()
_A : List[Any] = {
"max_new_tokens": 1_0_2_4,
"foo": "bar",
}
_A : List[str] = copy.deepcopy(__lowerCamelCase)
_A : int = generation_config.update(**__lowerCamelCase)
# update_kwargs was not modified (no side effects)
self.assertEqual(__lowerCamelCase , __lowerCamelCase)
# update_kwargs was used to update the config on valid attributes
self.assertEqual(generation_config.max_new_tokens , 1_0_2_4)
# `.update()` returns a dictionary of unused kwargs
self.assertEqual(__lowerCamelCase , {"foo": "bar"})
def _lowerCamelCase ( self) -> Any:
_A : int = GenerationConfig()
_A : int = "bar"
with tempfile.TemporaryDirectory("test-generation-config") as tmp_dir:
generation_config.save_pretrained(__lowerCamelCase)
_A : Any = GenerationConfig.from_pretrained(__lowerCamelCase)
# update_kwargs was used to update the config on valid attributes
self.assertEqual(new_config.foo , "bar")
_A : Optional[Any] = GenerationConfig.from_model_config(__lowerCamelCase)
assert not hasattr(__lowerCamelCase , "foo") # no new kwargs should be initialized if from config
def _lowerCamelCase ( self) -> List[str]:
_A : Union[str, Any] = GenerationConfig()
self.assertEqual(default_config.temperature , 1.0)
self.assertEqual(default_config.do_sample , __lowerCamelCase)
self.assertEqual(default_config.num_beams , 1)
_A : Optional[int] = GenerationConfig(
do_sample=__lowerCamelCase , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
self.assertEqual(config.temperature , 0.7)
self.assertEqual(config.do_sample , __lowerCamelCase)
self.assertEqual(config.num_beams , 1)
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(__lowerCamelCase)
_A : Optional[int] = GenerationConfig.from_pretrained(__lowerCamelCase , temperature=1.0)
self.assertEqual(loaded_config.temperature , 1.0)
self.assertEqual(loaded_config.do_sample , __lowerCamelCase)
self.assertEqual(loaded_config.num_beams , 1) # default value
@is_staging_test
class lowerCAmelCase__ ( unittest.TestCase):
'''simple docstring'''
@classmethod
def _lowerCamelCase ( cls) -> Optional[int]:
_A : Dict = TOKEN
HfFolder.save_token(__lowerCamelCase)
@classmethod
def _lowerCamelCase ( cls) -> List[Any]:
try:
delete_repo(token=cls._token , repo_id="test-generation-config")
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-generation-config-org")
except HTTPError:
pass
def _lowerCamelCase ( self) -> Any:
_A : Optional[int] = GenerationConfig(
do_sample=__lowerCamelCase , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub("test-generation-config" , use_auth_token=self._token)
_A : Union[str, Any] = GenerationConfig.from_pretrained(F"{USER}/test-generation-config")
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__lowerCamelCase , getattr(__lowerCamelCase , __lowerCamelCase))
# Reset repo
delete_repo(token=self._token , repo_id="test-generation-config")
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
__lowerCamelCase , repo_id="test-generation-config" , push_to_hub=__lowerCamelCase , use_auth_token=self._token)
_A : Optional[Any] = GenerationConfig.from_pretrained(F"{USER}/test-generation-config")
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__lowerCamelCase , getattr(__lowerCamelCase , __lowerCamelCase))
def _lowerCamelCase ( self) -> Union[str, Any]:
_A : Union[str, Any] = GenerationConfig(
do_sample=__lowerCamelCase , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub("valid_org/test-generation-config-org" , use_auth_token=self._token)
_A : int = GenerationConfig.from_pretrained("valid_org/test-generation-config-org")
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__lowerCamelCase , getattr(__lowerCamelCase , __lowerCamelCase))
# Reset repo
delete_repo(token=self._token , repo_id="valid_org/test-generation-config-org")
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
__lowerCamelCase , repo_id="valid_org/test-generation-config-org" , push_to_hub=__lowerCamelCase , use_auth_token=self._token)
_A : Optional[int] = GenerationConfig.from_pretrained("valid_org/test-generation-config-org")
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__lowerCamelCase , getattr(__lowerCamelCase , __lowerCamelCase))
| 11 | 0 |
import argparse
import csv
import logging
import os
import random
import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from tqdm import tqdm, trange
from transformers import (
CONFIG_NAME,
WEIGHTS_NAME,
AdamW,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTTokenizer,
get_linear_schedule_with_warmup,
)
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", level=logging.INFO
)
lowerCamelCase__ = logging.getLogger(__name__)
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
__a = np.argmax(UpperCamelCase__ , axis=1 )
return np.sum(outputs == labels )
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : Optional[Any] ):
"""simple docstring"""
with open(UpperCamelCase__ , encoding="""utf_8""" ) as f:
__a = csv.reader(UpperCamelCase__ )
__a = []
next(UpperCamelCase__ ) # skip the first line
for line in tqdm(UpperCamelCase__ ):
output.append((""" """.join(line[1:5] ), line[5], line[6], int(line[-1] ) - 1) )
return output
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Optional[int] ):
"""simple docstring"""
__a = []
for dataset in encoded_datasets:
__a = len(UpperCamelCase__ )
__a = np.zeros((n_batch, 2, input_len) , dtype=np.intaa )
__a = np.zeros((n_batch, 2) , dtype=np.intaa )
__a = np.full((n_batch, 2, input_len) , fill_value=-100 , dtype=np.intaa )
__a = np.zeros((n_batch,) , dtype=np.intaa )
for (
i,
(story, conta, conta, mc_label),
) in enumerate(UpperCamelCase__ ):
__a = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
__a = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
__a = with_conta
__a = with_conta
__a = len(UpperCamelCase__ ) - 1
__a = len(UpperCamelCase__ ) - 1
__a = with_conta
__a = with_conta
__a = mc_label
__a = (input_ids, mc_token_ids, lm_labels, mc_labels)
tensor_datasets.append(tuple(torch.tensor(UpperCamelCase__ ) for t in all_inputs ) )
return tensor_datasets
def lowerCAmelCase__ ( ):
"""simple docstring"""
__a = argparse.ArgumentParser()
parser.add_argument("""--model_name""" , type=UpperCamelCase__ , default="""openai-gpt""" , help="""pretrained model name""" )
parser.add_argument("""--do_train""" , action="""store_true""" , help="""Whether to run training.""" )
parser.add_argument("""--do_eval""" , action="""store_true""" , help="""Whether to run eval on the dev set.""" )
parser.add_argument(
"""--output_dir""" , default=UpperCamelCase__ , type=UpperCamelCase__ , required=UpperCamelCase__ , help="""The output directory where the model predictions and checkpoints will be written.""" , )
parser.add_argument("""--train_dataset""" , type=UpperCamelCase__ , default="""""" )
parser.add_argument("""--eval_dataset""" , type=UpperCamelCase__ , default="""""" )
parser.add_argument("""--seed""" , type=UpperCamelCase__ , default=42 )
parser.add_argument("""--num_train_epochs""" , type=UpperCamelCase__ , default=3 )
parser.add_argument("""--train_batch_size""" , type=UpperCamelCase__ , default=8 )
parser.add_argument("""--eval_batch_size""" , type=UpperCamelCase__ , default=16 )
parser.add_argument("""--adam_epsilon""" , default=1e-8 , type=UpperCamelCase__ , help="""Epsilon for Adam optimizer.""" )
parser.add_argument("""--max_grad_norm""" , type=UpperCamelCase__ , default=1 )
parser.add_argument(
"""--max_steps""" , default=-1 , type=UpperCamelCase__ , help=(
"""If > 0: set total number of training steps to perform. Override num_train_epochs."""
) , )
parser.add_argument(
"""--gradient_accumulation_steps""" , type=UpperCamelCase__ , default=1 , help="""Number of updates steps to accumulate before performing a backward/update pass.""" , )
parser.add_argument("""--learning_rate""" , type=UpperCamelCase__ , default=6.25e-5 )
parser.add_argument("""--warmup_steps""" , default=0 , type=UpperCamelCase__ , help="""Linear warmup over warmup_steps.""" )
parser.add_argument("""--lr_schedule""" , type=UpperCamelCase__ , default="""warmup_linear""" )
parser.add_argument("""--weight_decay""" , type=UpperCamelCase__ , default=0.01 )
parser.add_argument("""--lm_coef""" , type=UpperCamelCase__ , default=0.9 )
parser.add_argument("""--n_valid""" , type=UpperCamelCase__ , default=374 )
parser.add_argument("""--server_ip""" , type=UpperCamelCase__ , default="""""" , help="""Can be used for distant debugging.""" )
parser.add_argument("""--server_port""" , type=UpperCamelCase__ , default="""""" , help="""Can be used for distant debugging.""" )
__a = parser.parse_args()
print(UpperCamelCase__ )
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("""Waiting for debugger attach""" )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=UpperCamelCase__ )
ptvsd.wait_for_attach()
random.seed(args.seed )
np.random.seed(args.seed )
torch.manual_seed(args.seed )
torch.cuda.manual_seed_all(args.seed )
__a = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" )
__a = torch.cuda.device_count()
logger.info("""device: {}, n_gpu {}""".format(UpperCamelCase__ , UpperCamelCase__ ) )
if not args.do_train and not args.do_eval:
raise ValueError("""At least one of `do_train` or `do_eval` must be True.""" )
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
# Load tokenizer and model
# This loading functions also add new tokens and embeddings called `special tokens`
# These new embeddings will be fine-tuned on the RocStories dataset
__a = ["_start_", "_delimiter_", "_classify_"]
__a = OpenAIGPTTokenizer.from_pretrained(args.model_name )
tokenizer.add_tokens(UpperCamelCase__ )
__a = tokenizer.convert_tokens_to_ids(UpperCamelCase__ )
__a = OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name )
model.resize_token_embeddings(len(UpperCamelCase__ ) )
model.to(UpperCamelCase__ )
# Load and encode the datasets
def tokenize_and_encode(_SCREAMING_SNAKE_CASE : int ):
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(UpperCamelCase__ ) )
elif isinstance(UpperCamelCase__ , UpperCamelCase__ ):
return obj
return [tokenize_and_encode(UpperCamelCase__ ) for o in obj]
logger.info("""Encoding dataset...""" )
__a = load_rocstories_dataset(args.train_dataset )
__a = load_rocstories_dataset(args.eval_dataset )
__a = (train_dataset, eval_dataset)
__a = tokenize_and_encode(UpperCamelCase__ )
# Compute the max input length for the Transformer
__a = model.config.n_positions // 2 - 2
__a = max(
len(story[:max_length] ) + max(len(conta[:max_length] ) , len(conta[:max_length] ) ) + 3
for dataset in encoded_datasets
for story, conta, conta, _ in dataset )
__a = min(UpperCamelCase__ , model.config.n_positions ) # Max size of input for the pre-trained model
# Prepare inputs tensors and dataloaders
__a = pre_process_datasets(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , *UpperCamelCase__ )
__a = tensor_datasets[0], tensor_datasets[1]
__a = TensorDataset(*UpperCamelCase__ )
__a = RandomSampler(UpperCamelCase__ )
__a = DataLoader(UpperCamelCase__ , sampler=UpperCamelCase__ , batch_size=args.train_batch_size )
__a = TensorDataset(*UpperCamelCase__ )
__a = SequentialSampler(UpperCamelCase__ )
__a = DataLoader(UpperCamelCase__ , sampler=UpperCamelCase__ , batch_size=args.eval_batch_size )
# Prepare optimizer
if args.do_train:
if args.max_steps > 0:
__a = args.max_steps
__a = args.max_steps // (len(UpperCamelCase__ ) // args.gradient_accumulation_steps) + 1
else:
__a = len(UpperCamelCase__ ) // args.gradient_accumulation_steps * args.num_train_epochs
__a = list(model.named_parameters() )
__a = ["bias", "LayerNorm.bias", "LayerNorm.weight"]
__a = [
{
"params": [p for n, p in param_optimizer if not any(nd in n for nd in no_decay )],
"weight_decay": args.weight_decay,
},
{"params": [p for n, p in param_optimizer if any(nd in n for nd in no_decay )], "weight_decay": 0.0},
]
__a = AdamW(UpperCamelCase__ , lr=args.learning_rate , eps=args.adam_epsilon )
__a = get_linear_schedule_with_warmup(
UpperCamelCase__ , num_warmup_steps=args.warmup_steps , num_training_steps=UpperCamelCase__ )
if args.do_train:
__a = 0, 0, None
model.train()
for _ in trange(int(args.num_train_epochs ) , desc="""Epoch""" ):
__a = 0
__a = 0
__a = tqdm(UpperCamelCase__ , desc="""Training""" )
for step, batch in enumerate(UpperCamelCase__ ):
__a = tuple(t.to(UpperCamelCase__ ) for t in batch )
__a = batch
__a = model(UpperCamelCase__ , mc_token_ids=UpperCamelCase__ , lm_labels=UpperCamelCase__ , mc_labels=UpperCamelCase__ )
__a = args.lm_coef * losses[0] + losses[1]
loss.backward()
optimizer.step()
scheduler.step()
optimizer.zero_grad()
tr_loss += loss.item()
__a = (
loss.item() if exp_average_loss is None else 0.7 * exp_average_loss + 0.3 * loss.item()
)
nb_tr_steps += 1
__a = "Training loss: {:.2e} lr: {:.2e}".format(UpperCamelCase__ , scheduler.get_lr()[0] )
# Save a trained model
if args.do_train:
# Save a trained model, configuration and tokenizer
__a = model.module if hasattr(UpperCamelCase__ , """module""" ) else model # Only save the model itself
# If we save using the predefined names, we can load using `from_pretrained`
__a = os.path.join(args.output_dir , UpperCamelCase__ )
__a = os.path.join(args.output_dir , UpperCamelCase__ )
torch.save(model_to_save.state_dict() , UpperCamelCase__ )
model_to_save.config.to_json_file(UpperCamelCase__ )
tokenizer.save_vocabulary(args.output_dir )
# Load a trained model and vocabulary that you have fine-tuned
__a = OpenAIGPTDoubleHeadsModel.from_pretrained(args.output_dir )
__a = OpenAIGPTTokenizer.from_pretrained(args.output_dir )
model.to(UpperCamelCase__ )
if args.do_eval:
model.eval()
__a = 0, 0
__a = 0, 0
for batch in tqdm(UpperCamelCase__ , desc="""Evaluating""" ):
__a = tuple(t.to(UpperCamelCase__ ) for t in batch )
__a = batch
with torch.no_grad():
__a = model(
UpperCamelCase__ , mc_token_ids=UpperCamelCase__ , lm_labels=UpperCamelCase__ , mc_labels=UpperCamelCase__ )
__a = mc_logits.detach().cpu().numpy()
__a = mc_labels.to("""cpu""" ).numpy()
__a = accuracy(UpperCamelCase__ , UpperCamelCase__ )
eval_loss += mc_loss.mean().item()
eval_accuracy += tmp_eval_accuracy
nb_eval_examples += input_ids.size(0 )
nb_eval_steps += 1
__a = eval_loss / nb_eval_steps
__a = eval_accuracy / nb_eval_examples
__a = tr_loss / nb_tr_steps if args.do_train else None
__a = {"eval_loss": eval_loss, "eval_accuracy": eval_accuracy, "train_loss": train_loss}
__a = os.path.join(args.output_dir , """eval_results.txt""" )
with open(UpperCamelCase__ , """w""" ) as writer:
logger.info("""***** Eval results *****""" )
for key in sorted(result.keys() ):
logger.info(""" %s = %s""" , UpperCamelCase__ , str(result[key] ) )
writer.write("""%s = %s\n""" % (key, str(result[key] )) )
if __name__ == "__main__":
main()
| 302 |
import pickle
import numpy as np
from matplotlib import pyplot as plt
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=0.2 , __lowerCamelCase=0.2) -> str:
_A : Optional[int] = bp_numa
_A : Dict = bp_numa
_A : Tuple = bp_numa
_A : List[str] = conva_get[:2]
_A : Tuple = conva_get[2]
_A : Optional[int] = size_pa
_A : Optional[Any] = rate_w
_A : Optional[Any] = rate_t
_A : Union[str, Any] = [
np.mat(-1 * np.random.rand(self.conva[0] , self.conva[0]) + 0.5)
for i in range(self.conva[1])
]
_A : int = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa) + 0.5)
_A : Dict = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa) + 0.5)
_A : Any = -2 * np.random.rand(self.conva[1]) + 1
_A : Optional[int] = -2 * np.random.rand(self.num_bpa) + 1
_A : Optional[Any] = -2 * np.random.rand(self.num_bpa) + 1
def _lowerCamelCase ( self , __lowerCamelCase) -> Dict:
# save model dict with pickle
_A : Dict = {
"num_bp1": self.num_bpa,
"num_bp2": self.num_bpa,
"num_bp3": self.num_bpa,
"conv1": self.conva,
"step_conv1": self.step_conva,
"size_pooling1": self.size_poolinga,
"rate_weight": self.rate_weight,
"rate_thre": self.rate_thre,
"w_conv1": self.w_conva,
"wkj": self.wkj,
"vji": self.vji,
"thre_conv1": self.thre_conva,
"thre_bp2": self.thre_bpa,
"thre_bp3": self.thre_bpa,
}
with open(__lowerCamelCase , "wb") as f:
pickle.dump(__lowerCamelCase , __lowerCamelCase)
print(F"Model saved: {save_path}")
@classmethod
def _lowerCamelCase ( cls , __lowerCamelCase) -> Any:
# read saved model
with open(__lowerCamelCase , "rb") as f:
_A : Any = pickle.load(__lowerCamelCase) # noqa: S301
_A : Optional[int] = model_dic.get("conv1")
conv_get.append(model_dic.get("step_conv1"))
_A : str = model_dic.get("size_pooling1")
_A : List[str] = model_dic.get("num_bp1")
_A : Union[str, Any] = model_dic.get("num_bp2")
_A : List[Any] = model_dic.get("num_bp3")
_A : Dict = model_dic.get("rate_weight")
_A : List[Any] = model_dic.get("rate_thre")
# create model instance
_A : str = CNN(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase)
# modify model parameter
_A : List[Any] = model_dic.get("w_conv1")
_A : Union[str, Any] = model_dic.get("wkj")
_A : str = model_dic.get("vji")
_A : List[str] = model_dic.get("thre_conv1")
_A : Optional[Any] = model_dic.get("thre_bp2")
_A : Dict = model_dic.get("thre_bp3")
return conv_ins
def _lowerCamelCase ( self , __lowerCamelCase) -> Dict:
return 1 / (1 + np.exp(-1 * x))
def _lowerCamelCase ( self , __lowerCamelCase) -> Union[str, Any]:
return round(__lowerCamelCase , 3)
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase) -> Union[str, Any]:
# convolution process
_A : Tuple = convs[0]
_A : Union[str, Any] = convs[1]
_A : List[Any] = np.shape(__lowerCamelCase)[0]
# get the data slice of original image data, data_focus
_A : Tuple = []
for i_focus in range(0 , size_data - size_conv + 1 , __lowerCamelCase):
for j_focus in range(0 , size_data - size_conv + 1 , __lowerCamelCase):
_A : Optional[int] = data[
i_focus : i_focus + size_conv, j_focus : j_focus + size_conv
]
data_focus.append(__lowerCamelCase)
# calculate the feature map of every single kernel, and saved as list of matrix
_A : Optional[Any] = []
_A : Optional[int] = int((size_data - size_conv) / conv_step + 1)
for i_map in range(__lowerCamelCase):
_A : Optional[int] = []
for i_focus in range(len(__lowerCamelCase)):
_A : Any = (
np.sum(np.multiply(data_focus[i_focus] , w_convs[i_map]))
- thre_convs[i_map]
)
featuremap.append(self.sig(__lowerCamelCase))
_A : Optional[Any] = np.asmatrix(__lowerCamelCase).reshape(
__lowerCamelCase , __lowerCamelCase)
data_featuremap.append(__lowerCamelCase)
# expanding the data slice to One dimenssion
_A : Optional[Any] = []
for each_focus in data_focus:
focusa_list.extend(self.Expand_Mat(__lowerCamelCase))
_A : Dict = np.asarray(__lowerCamelCase)
return focus_list, data_featuremap
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase="average_pool") -> Dict:
# pooling process
_A : Optional[Any] = len(featuremaps[0])
_A : str = int(size_map / size_pooling)
_A : Optional[int] = []
for i_map in range(len(__lowerCamelCase)):
_A : int = featuremaps[i_map]
_A : Optional[int] = []
for i_focus in range(0 , __lowerCamelCase , __lowerCamelCase):
for j_focus in range(0 , __lowerCamelCase , __lowerCamelCase):
_A : str = feature_map[
i_focus : i_focus + size_pooling,
j_focus : j_focus + size_pooling,
]
if pooling_type == "average_pool":
# average pooling
map_pooled.append(np.average(__lowerCamelCase))
elif pooling_type == "max_pooling":
# max pooling
map_pooled.append(np.max(__lowerCamelCase))
_A : Tuple = np.asmatrix(__lowerCamelCase).reshape(__lowerCamelCase , __lowerCamelCase)
featuremap_pooled.append(__lowerCamelCase)
return featuremap_pooled
def _lowerCamelCase ( self , __lowerCamelCase) -> Tuple:
# expanding three dimension data to one dimension list
_A : Tuple = []
for i in range(len(__lowerCamelCase)):
_A : Union[str, Any] = np.shape(data[i])
_A : List[Any] = data[i].reshape(1 , shapes[0] * shapes[1])
_A : Optional[Any] = data_listed.getA().tolist()[0]
data_expanded.extend(__lowerCamelCase)
_A : Optional[Any] = np.asarray(__lowerCamelCase)
return data_expanded
def _lowerCamelCase ( self , __lowerCamelCase) -> Union[str, Any]:
# expanding matrix to one dimension list
_A : List[Any] = np.asarray(__lowerCamelCase)
_A : Union[str, Any] = np.shape(__lowerCamelCase)
_A : Dict = data_mat.reshape(1 , shapes[0] * shapes[1])
return data_expanded
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase) -> Optional[int]:
_A : Dict = []
_A : Any = 0
for i_map in range(__lowerCamelCase):
_A : Union[str, Any] = np.ones((size_map, size_map))
for i in range(0 , __lowerCamelCase , __lowerCamelCase):
for j in range(0 , __lowerCamelCase , __lowerCamelCase):
_A : List[Any] = pd_pool[
i_pool
]
_A : Tuple = i_pool + 1
_A : Optional[Any] = np.multiply(
__lowerCamelCase , np.multiply(out_map[i_map] , (1 - out_map[i_map])))
pd_all.append(__lowerCamelCase)
return pd_all
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=bool) -> Union[str, Any]:
# model traning
print("----------------------Start Training-------------------------")
print((" - - Shape: Train_Data ", np.shape(__lowerCamelCase)))
print((" - - Shape: Teach_Data ", np.shape(__lowerCamelCase)))
_A : Tuple = 0
_A : Dict = []
_A : Optional[Any] = 1_0_0_0_0
while rp < n_repeat and mse >= error_accuracy:
_A : Union[str, Any] = 0
print(F"-------------Learning Time {rp}--------------")
for p in range(len(__lowerCamelCase)):
# print('------------Learning Image: %d--------------'%p)
_A : str = np.asmatrix(datas_train[p])
_A : Union[str, Any] = np.asarray(datas_teach[p])
_A , _A : Any = self.convolute(
__lowerCamelCase , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
_A : Optional[Any] = self.pooling(__lowerCamelCase , self.size_poolinga)
_A : Optional[int] = np.shape(__lowerCamelCase)
_A : List[str] = self._expand(__lowerCamelCase)
_A : Tuple = data_bp_input
_A : int = np.dot(__lowerCamelCase , self.vji.T) - self.thre_bpa
_A : List[Any] = self.sig(__lowerCamelCase)
_A : Union[str, Any] = np.dot(__lowerCamelCase , self.wkj.T) - self.thre_bpa
_A : List[str] = self.sig(__lowerCamelCase)
# --------------Model Leaning ------------------------
# calculate error and gradient---------------
_A : int = np.multiply(
(data_teach - bp_outa) , np.multiply(__lowerCamelCase , (1 - bp_outa)))
_A : Optional[Any] = np.multiply(
np.dot(__lowerCamelCase , self.wkj) , np.multiply(__lowerCamelCase , (1 - bp_outa)))
_A : Union[str, Any] = np.dot(__lowerCamelCase , self.vji)
_A : Any = pd_i_all / (self.size_poolinga * self.size_poolinga)
_A : Dict = pd_conva_pooled.T.getA().tolist()
_A : Optional[Any] = self._calculate_gradient_from_pool(
__lowerCamelCase , __lowerCamelCase , shape_featuremapa[0] , shape_featuremapa[1] , self.size_poolinga , )
# weight and threshold learning process---------
# convolution layer
for k_conv in range(self.conva[1]):
_A : int = self._expand_mat(pd_conva_all[k_conv])
_A : Optional[int] = self.rate_weight * np.dot(__lowerCamelCase , __lowerCamelCase)
_A : List[Any] = self.w_conva[k_conv] + delta_w.reshape(
(self.conva[0], self.conva[0]))
_A : Any = (
self.thre_conva[k_conv]
- np.sum(pd_conva_all[k_conv]) * self.rate_thre
)
# all connected layer
_A : Tuple = self.wkj + pd_k_all.T * bp_outa * self.rate_weight
_A : int = self.vji + pd_j_all.T * bp_outa * self.rate_weight
_A : Tuple = self.thre_bpa - pd_k_all * self.rate_thre
_A : List[str] = self.thre_bpa - pd_j_all * self.rate_thre
# calculate the sum error of all single image
_A : Optional[int] = np.sum(abs(data_teach - bp_outa))
error_count += errors
# print(' ----Teach ',data_teach)
# print(' ----BP_output ',bp_out3)
_A : Any = rp + 1
_A : Dict = error_count / patterns
all_mse.append(__lowerCamelCase)
def draw_error():
_A : Optional[int] = [error_accuracy for i in range(int(n_repeat * 1.2))]
plt.plot(__lowerCamelCase , "+-")
plt.plot(__lowerCamelCase , "r--")
plt.xlabel("Learning Times")
plt.ylabel("All_mse")
plt.grid(__lowerCamelCase , alpha=0.5)
plt.show()
print("------------------Training Complished---------------------")
print((" - - Training epoch: ", rp, F" - - Mse: {mse:.6f}"))
if draw_e:
draw_error()
return mse
def _lowerCamelCase ( self , __lowerCamelCase) -> int:
# model predict
_A : Union[str, Any] = []
print("-------------------Start Testing-------------------------")
print((" - - Shape: Test_Data ", np.shape(__lowerCamelCase)))
for p in range(len(__lowerCamelCase)):
_A : int = np.asmatrix(datas_test[p])
_A , _A : List[Any] = self.convolute(
__lowerCamelCase , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
_A : str = self.pooling(__lowerCamelCase , self.size_poolinga)
_A : Optional[int] = self._expand(__lowerCamelCase)
_A : List[Any] = data_bp_input
_A : Optional[int] = bp_outa * self.vji.T - self.thre_bpa
_A : int = self.sig(__lowerCamelCase)
_A : int = bp_outa * self.wkj.T - self.thre_bpa
_A : Optional[int] = self.sig(__lowerCamelCase)
produce_out.extend(bp_outa.getA().tolist())
_A : int = [list(map(self.do_round , __lowerCamelCase)) for each in produce_out]
return np.asarray(__lowerCamelCase)
def _lowerCamelCase ( self , __lowerCamelCase) -> Dict:
# return the data of image after convoluting process so we can check it out
_A : Optional[int] = np.asmatrix(__lowerCamelCase)
_A , _A : Tuple = self.convolute(
__lowerCamelCase , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
_A : Union[str, Any] = self.pooling(__lowerCamelCase , self.size_poolinga)
return data_conveda, data_pooleda
if __name__ == "__main__":
pass
| 11 | 0 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
A__ : int = {
'configuration_vivit': ['VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'VivitConfig'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : int = ['VivitImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Tuple = [
'VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'VivitModel',
'VivitPreTrainedModel',
'VivitForVideoClassification',
]
if TYPE_CHECKING:
from .configuration_vivit import VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, VivitConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_vivit import VivitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vivit import (
VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
VivitForVideoClassification,
VivitModel,
VivitPreTrainedModel,
)
else:
import sys
A__ : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 144 |
import re
from flax.core.frozen_dict import freeze
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.experimental import PartitionSpec as P
# Sentinels
lowerCAmelCase__ = object()
# For specifying empty leaf dict `{}`
lowerCAmelCase__ = object()
def _UpperCAmelCase (UpperCamelCase__ : str , UpperCamelCase__ : Union[str, Any] ):
_A : str = tuple((re.compile(x + "$" ) for x in qs) )
for i in range(len(UpperCamelCase__ ) - len(UpperCamelCase__ ) + 1 ):
_A : Tuple = [x.match(UpperCamelCase__ ) for x, y in zip(UpperCamelCase__ , ks[i:] )]
if matches and all(UpperCamelCase__ ):
return True
return False
def _UpperCAmelCase (UpperCamelCase__ : str ):
def replace(UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[int] ):
for rule, replacement in rules:
if _match(UpperCamelCase__ , UpperCamelCase__ ):
return replacement
return val
return replace
def _UpperCAmelCase ():
return [
# embeddings
(("transformer", "wpe", "embedding"), P("mp" , UpperCamelCase__ )),
(("transformer", "wte", "embedding"), P("mp" , UpperCamelCase__ )),
# atention
(("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(UpperCamelCase__ , "mp" )),
(("attention", "out_proj", "kernel"), P("mp" , UpperCamelCase__ )),
(("attention", "out_proj", "bias"), None),
# mlp
(("mlp", "c_fc", "kernel"), P(UpperCamelCase__ , "mp" )),
(("mlp", "c_fc", "bias"), P("mp" )),
(("mlp", "c_proj", "kernel"), P("mp" , UpperCamelCase__ )),
(("mlp", "c_proj", "bias"), None),
# layer norms
((r"ln_\d+", "bias"), None),
((r"\d+", r"ln_\d+", "scale"), None),
(("ln_f", "bias"), None),
(("ln_f", "scale"), None),
]
def _UpperCAmelCase (UpperCamelCase__ : List[str] ):
_A : int = _get_partition_rules()
_A : Optional[int] = _replacement_rules(UpperCamelCase__ )
_A : Optional[int] = {k: _unmatched for k in flatten_dict(UpperCamelCase__ )}
_A : List[str] = {k: replace(UpperCamelCase__ , UpperCamelCase__ ) for k, v in initd.items()}
assert _unmatched not in result.values(), "Incomplete partition spec."
return freeze(unflatten_dict(UpperCamelCase__ ) )
| 11 | 0 |
import os
from bleurt import score # From: git+https://github.com/google-research/bleurt.git
import datasets
a__: Tuple = datasets.logging.get_logger(__name__)
a__: Dict = '\\n@inproceedings{bleurt,\n title={BLEURT: Learning Robust Metrics for Text Generation},\n author={Thibault Sellam and Dipanjan Das and Ankur P. Parikh},\n booktitle={ACL},\n year={2020},\n url={https://arxiv.org/abs/2004.04696}\n}\n'
a__: Tuple = '\\nBLEURT a learnt evaluation metric for Natural Language Generation. It is built using multiple phases of transfer learning starting from a pretrained BERT model (Devlin et al. 2018)\nand then employing another pre-training phrase using synthetic data. Finally it is trained on WMT human annotations. You may run BLEURT out-of-the-box or fine-tune\nit for your specific application (the latter is expected to perform better).\n\nSee the project\'s README at https://github.com/google-research/bleurt#readme for more information.\n'
a__: int = '\nBLEURT score.\n\nArgs:\n `predictions` (list of str): prediction/candidate sentences\n `references` (list of str): reference sentences\n `checkpoint` BLEURT checkpoint. Will default to BLEURT-tiny if None.\n\nReturns:\n \'scores\': List of scores.\nExamples:\n\n >>> predictions = ["hello there", "general kenobi"]\n >>> references = ["hello there", "general kenobi"]\n >>> bleurt = datasets.load_metric("bleurt")\n >>> results = bleurt.compute(predictions=predictions, references=references)\n >>> print([round(v, 2) for v in results["scores"]])\n [1.03, 1.04]\n'
a__: Optional[int] = {
'bleurt-tiny-128': 'https://storage.googleapis.com/bleurt-oss/bleurt-tiny-128.zip',
'bleurt-tiny-512': 'https://storage.googleapis.com/bleurt-oss/bleurt-tiny-512.zip',
'bleurt-base-128': 'https://storage.googleapis.com/bleurt-oss/bleurt-base-128.zip',
'bleurt-base-512': 'https://storage.googleapis.com/bleurt-oss/bleurt-base-512.zip',
'bleurt-large-128': 'https://storage.googleapis.com/bleurt-oss/bleurt-large-128.zip',
'bleurt-large-512': 'https://storage.googleapis.com/bleurt-oss/bleurt-large-512.zip',
'BLEURT-20-D3': 'https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D3.zip',
'BLEURT-20-D6': 'https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D6.zip',
'BLEURT-20-D12': 'https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D12.zip',
'BLEURT-20': 'https://storage.googleapis.com/bleurt-oss-21/BLEURT-20.zip',
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE__ ( datasets.Metric ):
def UpperCamelCase ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION,citation=_CITATION,homepage='''https://github.com/google-research/bleurt''',inputs_description=_KWARGS_DESCRIPTION,features=datasets.Features(
{
'''predictions''': datasets.Value('''string''',id='''sequence''' ),
'''references''': datasets.Value('''string''',id='''sequence''' ),
} ),codebase_urls=['''https://github.com/google-research/bleurt'''],reference_urls=['''https://github.com/google-research/bleurt''', '''https://arxiv.org/abs/2004.04696'''],)
def UpperCamelCase ( self,__lowerCamelCase ):
# check that config name specifies a valid BLEURT model
if self.config_name == "default":
logger.warning(
'''Using default BLEURT-Base checkpoint for sequence maximum length 128. '''
'''You can use a bigger model for better results with e.g.: datasets.load_metric(\'bleurt\', \'bleurt-large-512\').''' )
A__ = "bleurt-base-128"
if self.config_name.lower() in CHECKPOINT_URLS:
A__ = self.config_name.lower()
elif self.config_name.upper() in CHECKPOINT_URLS:
A__ = self.config_name.upper()
else:
raise KeyError(
f"{self.config_name} model not found. You should supply the name of a model checkpoint for bleurt in {CHECKPOINT_URLS.keys()}" )
# download the model checkpoint specified by self.config_name and set up the scorer
A__ = dl_manager.download_and_extract(CHECKPOINT_URLS[checkpoint_name] )
A__ = score.BleurtScorer(os.path.join(__lowerCamelCase,__lowerCamelCase ) )
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase ):
A__ = self.scorer.score(references=__lowerCamelCase,candidates=__lowerCamelCase )
return {"scores": scores}
| 193 |
def _UpperCAmelCase (UpperCamelCase__ : str , UpperCamelCase__ : bool = False ):
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
_A : Optional[Any] = f"Expected string as input, found {type(UpperCamelCase__ )}"
raise ValueError(UpperCamelCase__ )
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
_A : Union[str, Any] = f"Expected boolean as use_pascal parameter, found {type(UpperCamelCase__ )}"
raise ValueError(UpperCamelCase__ )
_A : int = input_str.split("_" )
_A : str = 0 if use_pascal else 1
_A : str = words[start_index:]
_A : Optional[Any] = [word[0].upper() + word[1:] for word in words_to_capitalize]
_A : Any = "" if use_pascal else words[0]
return "".join([initial_word, *capitalized_words] )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 11 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__SCREAMING_SNAKE_CASE :Union[str, Any] = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE :int = {
'''microsoft/beit-base-patch16-224-pt22k''': (
'''https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json'''
),
# See all BEiT models at https://huggingface.co/models?filter=beit
}
class A_ ( lowerCAmelCase_ ):
_lowerCamelCase : List[str] = """beit"""
def __init__( self : Tuple , snake_case_ : List[str]=8_1_9_2 , snake_case_ : int=7_6_8 , snake_case_ : Tuple=1_2 , snake_case_ : Tuple=1_2 , snake_case_ : Union[str, Any]=3_0_7_2 , snake_case_ : Union[str, Any]="gelu" , snake_case_ : str=0.0 , snake_case_ : Union[str, Any]=0.0 , snake_case_ : int=0.0_2 , snake_case_ : Optional[Any]=1e-12 , snake_case_ : int=2_2_4 , snake_case_ : Optional[Any]=1_6 , snake_case_ : Optional[int]=3 , snake_case_ : Optional[int]=False , snake_case_ : Any=False , snake_case_ : Dict=False , snake_case_ : Union[str, Any]=False , snake_case_ : Tuple=0.1 , snake_case_ : Dict=0.1 , snake_case_ : int=True , snake_case_ : int=[3, 5, 7, 1_1] , snake_case_ : str=[1, 2, 3, 6] , snake_case_ : str=True , snake_case_ : Union[str, Any]=0.4 , snake_case_ : int=2_5_6 , snake_case_ : Union[str, Any]=1 , snake_case_ : Tuple=False , snake_case_ : List[Any]=2_5_5 , **snake_case_ : Optional[int] , ):
super().__init__(**__lowerCamelCase )
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = initializer_range
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = image_size
_UpperCAmelCase = patch_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = use_mask_token
_UpperCAmelCase = use_absolute_position_embeddings
_UpperCAmelCase = use_relative_position_bias
_UpperCAmelCase = use_shared_relative_position_bias
_UpperCAmelCase = layer_scale_init_value
_UpperCAmelCase = drop_path_rate
_UpperCAmelCase = use_mean_pooling
# decode head attributes (semantic segmentation)
_UpperCAmelCase = out_indices
_UpperCAmelCase = pool_scales
# auxiliary head attributes (semantic segmentation)
_UpperCAmelCase = use_auxiliary_head
_UpperCAmelCase = auxiliary_loss_weight
_UpperCAmelCase = auxiliary_channels
_UpperCAmelCase = auxiliary_num_convs
_UpperCAmelCase = auxiliary_concat_input
_UpperCAmelCase = semantic_loss_ignore_index
class A_ ( lowerCAmelCase_ ):
_lowerCamelCase : Any = version.parse("""1.11""" )
@property
def lowercase ( self : str ):
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def lowercase ( self : Dict ):
return 1e-4
| 22 |
from __future__ import annotations
def _UpperCAmelCase (UpperCamelCase__ : list[int] , UpperCamelCase__ : list[int] , UpperCamelCase__ : int ):
_A : Dict = list(range(len(UpperCamelCase__ ) ) )
_A : Any = [v / w for v, w in zip(UpperCamelCase__ , UpperCamelCase__ )]
index.sort(key=lambda UpperCamelCase__ : ratio[i] , reverse=UpperCamelCase__ )
_A : float = 0
_A : list[float] = [0] * len(UpperCamelCase__ )
for i in index:
if weight[i] <= capacity:
_A : Union[str, Any] = 1
max_value += value[i]
capacity -= weight[i]
else:
_A : Optional[Any] = capacity / weight[i]
max_value += value[i] * capacity / weight[i]
break
return max_value, fractions
if __name__ == "__main__":
import doctest
doctest.testmod()
| 11 | 0 |
'''simple docstring'''
import argparse
import json
import os
import time
import zipfile
from get_ci_error_statistics import download_artifact, get_artifacts_links
from transformers import logging
__A =logging.get_logger(__name__)
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
UpperCAmelCase__ : Dict = set()
UpperCAmelCase__ : Optional[int] = []
def parse_line(UpperCamelCase__ ):
for line in fp:
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
UpperCAmelCase__ : str = line.decode("""UTF-8""" )
if "warnings summary (final)" in line:
continue
# This means we are outside the body of a warning
elif not line.startswith(""" """ ):
# process a single warning and move it to `selected_warnings`.
if len(UpperCamelCase__ ) > 0:
UpperCAmelCase__ : List[str] = "\n".join(UpperCamelCase__ )
# Only keep the warnings specified in `targets`
if any(f''': {x}: ''' in warning for x in targets ):
selected_warnings.add(UpperCamelCase__ )
buffer.clear()
continue
else:
UpperCAmelCase__ : Optional[int] = line.strip()
buffer.append(UpperCamelCase__ )
if from_gh:
for filename in os.listdir(UpperCamelCase__ ):
UpperCAmelCase__ : Tuple = os.path.join(UpperCamelCase__ , UpperCamelCase__ )
if not os.path.isdir(UpperCamelCase__ ):
# read the file
if filename != "warnings.txt":
continue
with open(UpperCamelCase__ ) as fp:
parse_line(UpperCamelCase__ )
else:
try:
with zipfile.ZipFile(UpperCamelCase__ ) as z:
for filename in z.namelist():
if not os.path.isdir(UpperCamelCase__ ):
# read the file
if filename != "warnings.txt":
continue
with z.open(UpperCamelCase__ ) as fp:
parse_line(UpperCamelCase__ )
except Exception:
logger.warning(
f'''{artifact_path} is either an invalid zip file or something else wrong. This file is skipped.''' )
return selected_warnings
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
UpperCAmelCase__ : int = set()
UpperCAmelCase__ : int = [os.path.join(UpperCamelCase__ , UpperCamelCase__ ) for p in os.listdir(UpperCamelCase__ ) if (p.endswith(""".zip""" ) or from_gh)]
for p in paths:
selected_warnings.update(extract_warnings_from_single_artifact(UpperCamelCase__ , UpperCamelCase__ ) )
return selected_warnings
if __name__ == "__main__":
def _UpperCamelCase ( UpperCamelCase__ ):
return values.split(""",""" )
__A =argparse.ArgumentParser()
# Required parameters
parser.add_argument('--workflow_run_id', type=str, required=True, help='A GitHub Actions workflow run id.')
parser.add_argument(
'--output_dir',
type=str,
required=True,
help='Where to store the downloaded artifacts and other result files.',
)
parser.add_argument('--token', default=None, type=str, help='A token that has actions:read permission.')
# optional parameters
parser.add_argument(
'--targets',
default='DeprecationWarning,UserWarning,FutureWarning',
type=list_str,
help='Comma-separated list of target warning(s) which we want to extract.',
)
parser.add_argument(
'--from_gh',
action='store_true',
help='If running from a GitHub action workflow and collecting warnings from its artifacts.',
)
__A =parser.parse_args()
__A =args.from_gh
if from_gh:
# The artifacts have to be downloaded using `actions/download-artifact@v3`
pass
else:
os.makedirs(args.output_dir, exist_ok=True)
# get download links
__A =get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, 'artifacts.json'), 'w', encoding='UTF-8') as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
# download artifacts
for idx, (name, url) in enumerate(artifacts.items()):
print(name)
print(url)
print('=' * 80)
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
# extract warnings from artifacts
__A =extract_warnings(args.output_dir, args.targets)
__A =sorted(selected_warnings)
with open(os.path.join(args.output_dir, 'selected_warnings.json'), 'w', encoding='UTF-8') as fp:
json.dump(selected_warnings, fp, ensure_ascii=False, indent=4) | 163 |
import warnings
from ...utils import logging
from .image_processing_beit import BeitImageProcessor
lowerCAmelCase__ = logging.get_logger(__name__)
class lowerCAmelCase__ ( a):
'''simple docstring'''
def __init__( self , *__lowerCamelCase , **__lowerCamelCase) -> None:
warnings.warn(
"The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use BeitImageProcessor instead." , __lowerCamelCase , )
super().__init__(*__lowerCamelCase , **__lowerCamelCase)
| 11 | 0 |
"""simple docstring"""
import os
import tempfile
import unittest
from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter
from transformers.testing_utils import slow
from transformers.utils import cached_property
@unittest.skipUnless(os.path.exists(_A ) , "Tatoeba directory does not exist." )
class _UpperCAmelCase ( unittest.TestCase ):
@cached_property
def A ( self : List[Any] ) -> str:
lowercase_ : List[str] = tempfile.mkdtemp()
return TatoebaConverter(save_dir=__lowerCamelCase )
@slow
def A ( self : Tuple ) -> Tuple:
self.resolver.convert_models(['''heb-eng'''] )
@slow
def A ( self : List[str] ) -> str:
lowercase_ : Any = self.resolver.write_model_card('''opus-mt-he-en''' , dry_run=__lowerCamelCase )
assert mmeta["long_pair"] == "heb-eng"
| 33 |
import collections
import inspect
import unittest
from typing import Dict, List, Tuple
from transformers import MaskFormerSwinConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device
from transformers.utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MaskFormerSwinBackbone
from transformers.models.maskformer import MaskFormerSwinModel
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self , __lowerCamelCase , __lowerCamelCase=1_3 , __lowerCamelCase=3_2 , __lowerCamelCase=2 , __lowerCamelCase=3 , __lowerCamelCase=1_6 , __lowerCamelCase=[1, 2, 1] , __lowerCamelCase=[2, 2, 4] , __lowerCamelCase=2 , __lowerCamelCase=2.0 , __lowerCamelCase=True , __lowerCamelCase=0.0 , __lowerCamelCase=0.0 , __lowerCamelCase=0.1 , __lowerCamelCase="gelu" , __lowerCamelCase=False , __lowerCamelCase=True , __lowerCamelCase=0.0_2 , __lowerCamelCase=1e-5 , __lowerCamelCase=True , __lowerCamelCase=None , __lowerCamelCase=True , __lowerCamelCase=1_0 , __lowerCamelCase=8 , __lowerCamelCase=["stage1", "stage2", "stage3"] , __lowerCamelCase=[1, 2, 3] , ) -> Optional[Any]:
_A : int = parent
_A : Optional[Any] = batch_size
_A : str = image_size
_A : Tuple = patch_size
_A : Tuple = num_channels
_A : Optional[int] = embed_dim
_A : Dict = depths
_A : Any = num_heads
_A : Any = window_size
_A : int = mlp_ratio
_A : Any = qkv_bias
_A : Union[str, Any] = hidden_dropout_prob
_A : Optional[Any] = attention_probs_dropout_prob
_A : Dict = drop_path_rate
_A : List[Any] = hidden_act
_A : Any = use_absolute_embeddings
_A : Optional[int] = patch_norm
_A : Tuple = layer_norm_eps
_A : List[str] = initializer_range
_A : Optional[int] = is_training
_A : Optional[Any] = scope
_A : Optional[int] = use_labels
_A : Dict = type_sequence_label_size
_A : str = encoder_stride
_A : Optional[int] = out_features
_A : Optional[int] = out_indices
def _lowerCamelCase ( self) -> Union[str, Any]:
_A : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
_A : Optional[Any] = None
if self.use_labels:
_A : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size)
_A : Optional[int] = self.get_config()
return config, pixel_values, labels
def _lowerCamelCase ( self) -> Union[str, Any]:
return MaskFormerSwinConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase) -> List[Any]:
_A : Dict = MaskFormerSwinModel(config=__lowerCamelCase)
model.to(__lowerCamelCase)
model.eval()
_A : int = model(__lowerCamelCase)
_A : Any = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths) - 1))
_A : List[str] = int(config.embed_dim * 2 ** (len(config.depths) - 1))
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim))
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase) -> Dict:
_A : Optional[Any] = MaskFormerSwinBackbone(config=__lowerCamelCase)
model.to(__lowerCamelCase)
model.eval()
_A : Dict = model(__lowerCamelCase)
# verify feature maps
self.parent.assertEqual(len(result.feature_maps) , len(config.out_features))
self.parent.assertListEqual(list(result.feature_maps[0].shape) , [1_3, 1_6, 1_6, 1_6])
# verify channels
self.parent.assertEqual(len(model.channels) , len(config.out_features))
self.parent.assertListEqual(model.channels , [1_6, 3_2, 6_4])
# verify ValueError
with self.parent.assertRaises(__lowerCamelCase):
_A : Union[str, Any] = ["stem"]
_A : Union[str, Any] = MaskFormerSwinBackbone(config=__lowerCamelCase)
def _lowerCamelCase ( self) -> Dict:
_A : Any = self.prepare_config_and_inputs()
_A , _A , _A : List[Any] = config_and_inputs
_A : Optional[int] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class lowerCAmelCase__ ( a , a , unittest.TestCase):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = (
(
MaskFormerSwinModel,
MaskFormerSwinBackbone,
)
if is_torch_available()
else ()
)
__SCREAMING_SNAKE_CASE = {"feature-extraction": MaskFormerSwinModel} if is_torch_available() else {}
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
def _lowerCamelCase ( self) -> str:
_A : Union[str, Any] = MaskFormerSwinModelTester(self)
_A : Optional[int] = ConfigTester(self , config_class=__lowerCamelCase , embed_dim=3_7)
@require_torch_multi_gpu
@unittest.skip(
reason=(
"`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn't work well with"
" `nn.DataParallel`"
))
def _lowerCamelCase ( self) -> Union[str, Any]:
pass
def _lowerCamelCase ( self) -> int:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _lowerCamelCase ( self) -> str:
return
def _lowerCamelCase ( self) -> List[Any]:
_A : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase)
def _lowerCamelCase ( self) -> Union[str, Any]:
_A : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*__lowerCamelCase)
@unittest.skip("Swin does not use inputs_embeds")
def _lowerCamelCase ( self) -> str:
pass
@unittest.skip("Swin does not support feedforward chunking")
def _lowerCamelCase ( self) -> List[Any]:
pass
def _lowerCamelCase ( self) -> Optional[int]:
_A , _A : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A : Union[str, Any] = model_class(__lowerCamelCase)
self.assertIsInstance(model.get_input_embeddings() , (nn.Module))
_A : Dict = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__lowerCamelCase , nn.Linear))
def _lowerCamelCase ( self) -> Any:
_A , _A : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A : int = model_class(__lowerCamelCase)
_A : Optional[int] = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_A : int = [*signature.parameters.keys()]
_A : Optional[int] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __lowerCamelCase)
@unittest.skip(reason="MaskFormerSwin is only used as backbone and doesn't support output_attentions")
def _lowerCamelCase ( self) -> Tuple:
pass
@unittest.skip(reason="MaskFormerSwin is only used as an internal backbone")
def _lowerCamelCase ( self) -> str:
pass
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase) -> Optional[int]:
_A : Any = model_class(__lowerCamelCase)
model.to(__lowerCamelCase)
model.eval()
with torch.no_grad():
_A : str = model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase))
_A : Tuple = outputs.hidden_states
_A : Any = getattr(
self.model_tester , "expected_num_hidden_layers" , len(self.model_tester.depths) + 1)
self.assertEqual(len(__lowerCamelCase) , __lowerCamelCase)
# Swin has a different seq_length
_A : Optional[int] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable)
else (config.patch_size, config.patch_size)
)
_A : str = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:]) , [num_patches, self.model_tester.embed_dim] , )
def _lowerCamelCase ( self) -> Dict:
_A , _A : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
_A : Any = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable)
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
_A : List[Any] = True
self.check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_A : Optional[int] = True
self.check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase)
def _lowerCamelCase ( self) -> Tuple:
_A , _A : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
_A : Optional[int] = 3
_A : Dict = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable)
else (self.model_tester.image_size, self.model_tester.image_size)
)
_A : Optional[int] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable)
else (config.patch_size, config.patch_size)
)
_A : int = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
_A : Dict = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
_A : List[Any] = True
self.check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , (padded_height, padded_width))
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_A : Union[str, Any] = True
self.check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , (padded_height, padded_width))
@unittest.skip(reason="MaskFormerSwin doesn't have pretrained checkpoints")
def _lowerCamelCase ( self) -> List[str]:
pass
@unittest.skip(reason="This will be fixed once MaskFormerSwin is replaced by native Swin")
def _lowerCamelCase ( self) -> List[str]:
pass
@unittest.skip(reason="This will be fixed once MaskFormerSwin is replaced by native Swin")
def _lowerCamelCase ( self) -> str:
pass
def _lowerCamelCase ( self) -> Optional[Any]:
_A , _A : Any = self.model_tester.prepare_config_and_inputs_for_common()
def set_nan_tensor_to_zero(__lowerCamelCase):
_A : Optional[int] = 0
return t
def check_equivalence(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase={}):
with torch.no_grad():
_A : Any = model(**__lowerCamelCase , return_dict=__lowerCamelCase , **__lowerCamelCase)
_A : int = model(**__lowerCamelCase , return_dict=__lowerCamelCase , **__lowerCamelCase).to_tuple()
def recursive_check(__lowerCamelCase , __lowerCamelCase):
if isinstance(__lowerCamelCase , (List, Tuple)):
for tuple_iterable_value, dict_iterable_value in zip(__lowerCamelCase , __lowerCamelCase):
recursive_check(__lowerCamelCase , __lowerCamelCase)
elif isinstance(__lowerCamelCase , __lowerCamelCase):
for tuple_iterable_value, dict_iterable_value in zip(
tuple_object.values() , dict_object.values()):
recursive_check(__lowerCamelCase , __lowerCamelCase)
elif tuple_object is None:
return
else:
self.assertTrue(
torch.allclose(
set_nan_tensor_to_zero(__lowerCamelCase) , set_nan_tensor_to_zero(__lowerCamelCase) , atol=1e-5) , msg=(
"Tuple and dict output are not equal. Difference:"
F" {torch.max(torch.abs(tuple_object - dict_object))}. Tuple has `nan`:"
F" {torch.isnan(__lowerCamelCase).any()} and `inf`: {torch.isinf(__lowerCamelCase)}. Dict has"
F" `nan`: {torch.isnan(__lowerCamelCase).any()} and `inf`: {torch.isinf(__lowerCamelCase)}."
) , )
recursive_check(__lowerCamelCase , __lowerCamelCase)
for model_class in self.all_model_classes:
_A : List[Any] = model_class(__lowerCamelCase)
model.to(__lowerCamelCase)
model.eval()
_A : str = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase)
_A : Tuple = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase)
check_equivalence(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase)
_A : Any = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase)
_A : List[Any] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase)
check_equivalence(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase)
_A : List[Any] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase)
_A : str = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase)
check_equivalence(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , {"output_hidden_states": True})
_A : Union[str, Any] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase)
_A : Optional[Any] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase)
check_equivalence(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , {"output_hidden_states": True})
@require_torch
class lowerCAmelCase__ ( unittest.TestCase , a):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = (MaskFormerSwinBackbone,) if is_torch_available() else ()
__SCREAMING_SNAKE_CASE = MaskFormerSwinConfig
def _lowerCamelCase ( self) -> Optional[Any]:
_A : Tuple = MaskFormerSwinModelTester(self)
def _lowerCamelCase ( self) -> Optional[Any]:
_A , _A : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
_A : Union[str, Any] = inputs_dict["pixel_values"].shape[0]
for backbone_class in self.all_model_classes:
_A : Optional[Any] = backbone_class(__lowerCamelCase)
backbone.to(__lowerCamelCase)
backbone.eval()
_A : List[Any] = backbone(**__lowerCamelCase)
# Test default outputs and verify feature maps
self.assertIsInstance(outputs.feature_maps , __lowerCamelCase)
self.assertTrue(len(outputs.feature_maps) == len(backbone.channels))
for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels):
self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels))
self.assertIsNone(outputs.hidden_states)
self.assertIsNone(outputs.attentions)
# Test output_hidden_states=True
_A : List[str] = backbone(**__lowerCamelCase , output_hidden_states=__lowerCamelCase)
self.assertIsNotNone(outputs.hidden_states)
self.assertTrue(len(outputs.hidden_states) , len(backbone.stage_names))
# We skip the stem layer
for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels):
for hidden_state in hidden_states:
# Hidden states are in the format (batch_size, (height * width), n_channels)
_A , _A , _A : List[str] = hidden_state.shape
self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels))
# Test output_attentions=True
if self.has_attentions:
_A : int = backbone(**__lowerCamelCase , output_attentions=__lowerCamelCase)
self.assertIsNotNone(outputs.attentions)
| 11 | 0 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableDiffusionUpscalePipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def snake_case ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def snake_case ( self ):
__lowerCAmelCase = 1
__lowerCAmelCase = 3
__lowerCAmelCase = (32, 32)
__lowerCAmelCase = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(__lowerCamelCase )
return image
@property
def snake_case ( self ):
torch.manual_seed(0 )
__lowerCAmelCase = UNetaDConditionModel(
block_out_channels=(32, 32, 64) , layers_per_block=2 , sample_size=32 , in_channels=7 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , attention_head_dim=8 , use_linear_projection=__lowerCamelCase , only_cross_attention=(True, True, False) , num_class_embeds=1_00 , )
return model
@property
def snake_case ( self ):
torch.manual_seed(0 )
__lowerCAmelCase = AutoencoderKL(
block_out_channels=[32, 32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
return model
@property
def snake_case ( self ):
torch.manual_seed(0 )
__lowerCAmelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act="gelu" , projection_dim=5_12 , )
return CLIPTextModel(__lowerCamelCase )
def snake_case ( self ):
__lowerCAmelCase = "cpu" # ensure determinism for the device-dependent torch.Generator
__lowerCAmelCase = self.dummy_cond_unet_upscale
__lowerCAmelCase = DDPMScheduler()
__lowerCAmelCase = DDIMScheduler(prediction_type="v_prediction" )
__lowerCAmelCase = self.dummy_vae
__lowerCAmelCase = self.dummy_text_encoder
__lowerCAmelCase = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
__lowerCAmelCase = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
__lowerCAmelCase = Image.fromarray(np.uinta(__lowerCamelCase ) ).convert("RGB" ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
__lowerCAmelCase = StableDiffusionUpscalePipeline(
unet=__lowerCamelCase , low_res_scheduler=__lowerCamelCase , scheduler=__lowerCamelCase , vae=__lowerCamelCase , text_encoder=__lowerCamelCase , tokenizer=__lowerCamelCase , max_noise_level=3_50 , )
__lowerCAmelCase = sd_pipe.to(__lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=__lowerCamelCase )
__lowerCAmelCase = "A painting of a squirrel eating a burger"
__lowerCAmelCase = torch.Generator(device=__lowerCamelCase ).manual_seed(0 )
__lowerCAmelCase = sd_pipe(
[prompt] , image=__lowerCamelCase , generator=__lowerCamelCase , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="np" , )
__lowerCAmelCase = output.images
__lowerCAmelCase = torch.Generator(device=__lowerCamelCase ).manual_seed(0 )
__lowerCAmelCase = sd_pipe(
[prompt] , image=__lowerCamelCase , generator=__lowerCamelCase , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="np" , return_dict=__lowerCamelCase , )[0]
__lowerCAmelCase = image[0, -3:, -3:, -1]
__lowerCAmelCase = image_from_tuple[0, -3:, -3:, -1]
__lowerCAmelCase = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
__lowerCAmelCase = np.array([0.3_1_1_3, 0.3_9_1_0, 0.4_2_7_2, 0.4_8_5_9, 0.5_0_6_1, 0.4_6_5_2, 0.5_3_6_2, 0.5_7_1_5, 0.5_6_6_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def snake_case ( self ):
__lowerCAmelCase = "cpu" # ensure determinism for the device-dependent torch.Generator
__lowerCAmelCase = self.dummy_cond_unet_upscale
__lowerCAmelCase = DDPMScheduler()
__lowerCAmelCase = DDIMScheduler(prediction_type="v_prediction" )
__lowerCAmelCase = self.dummy_vae
__lowerCAmelCase = self.dummy_text_encoder
__lowerCAmelCase = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
__lowerCAmelCase = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
__lowerCAmelCase = Image.fromarray(np.uinta(__lowerCamelCase ) ).convert("RGB" ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
__lowerCAmelCase = StableDiffusionUpscalePipeline(
unet=__lowerCamelCase , low_res_scheduler=__lowerCamelCase , scheduler=__lowerCamelCase , vae=__lowerCamelCase , text_encoder=__lowerCamelCase , tokenizer=__lowerCamelCase , max_noise_level=3_50 , )
__lowerCAmelCase = sd_pipe.to(__lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=__lowerCamelCase )
__lowerCAmelCase = "A painting of a squirrel eating a burger"
__lowerCAmelCase = sd_pipe(
2 * [prompt] , image=2 * [low_res_image] , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="np" , )
__lowerCAmelCase = output.images
assert image.shape[0] == 2
__lowerCAmelCase = torch.Generator(device=__lowerCamelCase ).manual_seed(0 )
__lowerCAmelCase = sd_pipe(
[prompt] , image=__lowerCamelCase , generator=__lowerCamelCase , num_images_per_prompt=2 , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="np" , )
__lowerCAmelCase = output.images
assert image.shape[0] == 2
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" )
def snake_case ( self ):
__lowerCAmelCase = self.dummy_cond_unet_upscale
__lowerCAmelCase = DDPMScheduler()
__lowerCAmelCase = DDIMScheduler(prediction_type="v_prediction" )
__lowerCAmelCase = self.dummy_vae
__lowerCAmelCase = self.dummy_text_encoder
__lowerCAmelCase = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
__lowerCAmelCase = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
__lowerCAmelCase = Image.fromarray(np.uinta(__lowerCamelCase ) ).convert("RGB" ).resize((64, 64) )
# put models in fp16, except vae as it overflows in fp16
__lowerCAmelCase = unet.half()
__lowerCAmelCase = text_encoder.half()
# make sure here that pndm scheduler skips prk
__lowerCAmelCase = StableDiffusionUpscalePipeline(
unet=__lowerCamelCase , low_res_scheduler=__lowerCamelCase , scheduler=__lowerCamelCase , vae=__lowerCamelCase , text_encoder=__lowerCamelCase , tokenizer=__lowerCamelCase , max_noise_level=3_50 , )
__lowerCAmelCase = sd_pipe.to(__lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=__lowerCamelCase )
__lowerCAmelCase = "A painting of a squirrel eating a burger"
__lowerCAmelCase = torch.manual_seed(0 )
__lowerCAmelCase = sd_pipe(
[prompt] , image=__lowerCamelCase , generator=__lowerCamelCase , num_inference_steps=2 , output_type="np" , ).images
__lowerCAmelCase = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
@slow
@require_torch_gpu
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def snake_case ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case ( self ):
__lowerCAmelCase = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-upscale/low_res_cat.png" )
__lowerCAmelCase = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale"
"/upsampled_cat.npy" )
__lowerCAmelCase = "stabilityai/stable-diffusion-x4-upscaler"
__lowerCAmelCase = StableDiffusionUpscalePipeline.from_pretrained(__lowerCamelCase )
pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
pipe.enable_attention_slicing()
__lowerCAmelCase = "a cat sitting on a park bench"
__lowerCAmelCase = torch.manual_seed(0 )
__lowerCAmelCase = pipe(
prompt=__lowerCamelCase , image=__lowerCamelCase , generator=__lowerCamelCase , output_type="np" , )
__lowerCAmelCase = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert np.abs(expected_image - image ).max() < 1e-3
def snake_case ( self ):
__lowerCAmelCase = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-upscale/low_res_cat.png" )
__lowerCAmelCase = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale"
"/upsampled_cat_fp16.npy" )
__lowerCAmelCase = "stabilityai/stable-diffusion-x4-upscaler"
__lowerCAmelCase = StableDiffusionUpscalePipeline.from_pretrained(
__lowerCamelCase , torch_dtype=torch.floataa , )
pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
pipe.enable_attention_slicing()
__lowerCAmelCase = "a cat sitting on a park bench"
__lowerCAmelCase = torch.manual_seed(0 )
__lowerCAmelCase = pipe(
prompt=__lowerCamelCase , image=__lowerCamelCase , generator=__lowerCamelCase , output_type="np" , )
__lowerCAmelCase = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert np.abs(expected_image - image ).max() < 5e-1
def snake_case ( self ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__lowerCAmelCase = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-upscale/low_res_cat.png" )
__lowerCAmelCase = "stabilityai/stable-diffusion-x4-upscaler"
__lowerCAmelCase = StableDiffusionUpscalePipeline.from_pretrained(
__lowerCamelCase , torch_dtype=torch.floataa , )
pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
__lowerCAmelCase = "a cat sitting on a park bench"
__lowerCAmelCase = torch.manual_seed(0 )
__lowerCAmelCase = pipe(
prompt=__lowerCamelCase , image=__lowerCamelCase , generator=__lowerCamelCase , num_inference_steps=5 , output_type="np" , )
__lowerCAmelCase = torch.cuda.max_memory_allocated()
# make sure that less than 2.9 GB is allocated
assert mem_bytes < 2.9 * 10**9
| 57 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_torch_available,
)
lowerCAmelCase__ = {
'configuration_speecht5': [
'SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP',
'SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP',
'SpeechT5Config',
'SpeechT5HifiGanConfig',
],
'feature_extraction_speecht5': ['SpeechT5FeatureExtractor'],
'processing_speecht5': ['SpeechT5Processor'],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['SpeechT5Tokenizer']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST',
'SpeechT5ForSpeechToText',
'SpeechT5ForSpeechToSpeech',
'SpeechT5ForTextToSpeech',
'SpeechT5Model',
'SpeechT5PreTrainedModel',
'SpeechT5HifiGan',
]
if TYPE_CHECKING:
from .configuration_speechta import (
SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP,
SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP,
SpeechTaConfig,
SpeechTaHifiGanConfig,
)
from .feature_extraction_speechta import SpeechTaFeatureExtractor
from .processing_speechta import SpeechTaProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speechta import SpeechTaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speechta import (
SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaHifiGan,
SpeechTaModel,
SpeechTaPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 11 | 0 |
"""simple docstring"""
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a :Optional[int] = logging.get_logger(__name__)
a :Any = {
"huggingface/time-series-transformer-tourism-monthly": (
"https://huggingface.co/huggingface/time-series-transformer-tourism-monthly/resolve/main/config.json"
),
# See all TimeSeriesTransformer models at https://huggingface.co/models?filter=time_series_transformer
}
class __a (UpperCamelCase_):
'''simple docstring'''
_SCREAMING_SNAKE_CASE :Union[str, Any] = """time_series_transformer"""
_SCREAMING_SNAKE_CASE :Any = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
"""num_hidden_layers""": """encoder_layers""",
}
def __init__( self , _a = None , _a = None , _a = "student_t" , _a = "nll" , _a = 1 , _a = [1, 2, 3, 4, 5, 6, 7] , _a = "mean" , _a = 0 , _a = 0 , _a = 0 , _a = 0 , _a = None , _a = None , _a = 32 , _a = 32 , _a = 2 , _a = 2 , _a = 2 , _a = 2 , _a = True , _a = "gelu" , _a = 64 , _a = 0.1 , _a = 0.1 , _a = 0.1 , _a = 0.1 , _a = 0.1 , _a = 100 , _a = 0.02 , _a=True , **_a , ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = prediction_length
SCREAMING_SNAKE_CASE__ : Tuple = context_length or prediction_length
SCREAMING_SNAKE_CASE__ : str = distribution_output
SCREAMING_SNAKE_CASE__ : Optional[int] = loss
SCREAMING_SNAKE_CASE__ : Optional[int] = input_size
SCREAMING_SNAKE_CASE__ : str = num_time_features
SCREAMING_SNAKE_CASE__ : str = lags_sequence
SCREAMING_SNAKE_CASE__ : Optional[Any] = scaling
SCREAMING_SNAKE_CASE__ : Union[str, Any] = num_dynamic_real_features
SCREAMING_SNAKE_CASE__ : int = num_static_real_features
SCREAMING_SNAKE_CASE__ : List[Any] = num_static_categorical_features
if cardinality and num_static_categorical_features > 0:
if len(__lowerCamelCase ) != num_static_categorical_features:
raise ValueError(
"""The cardinality should be a list of the same length as `num_static_categorical_features`""" )
SCREAMING_SNAKE_CASE__ : List[Any] = cardinality
else:
SCREAMING_SNAKE_CASE__ : int = [0]
if embedding_dimension and num_static_categorical_features > 0:
if len(__lowerCamelCase ) != num_static_categorical_features:
raise ValueError(
"""The embedding dimension should be a list of the same length as `num_static_categorical_features`""" )
SCREAMING_SNAKE_CASE__ : Dict = embedding_dimension
else:
SCREAMING_SNAKE_CASE__ : List[str] = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
SCREAMING_SNAKE_CASE__ : Optional[int] = num_parallel_samples
# Transformer architecture configuration
SCREAMING_SNAKE_CASE__ : int = input_size * len(__lowerCamelCase ) + self._number_of_features
SCREAMING_SNAKE_CASE__ : List[str] = d_model
SCREAMING_SNAKE_CASE__ : Union[str, Any] = encoder_attention_heads
SCREAMING_SNAKE_CASE__ : str = decoder_attention_heads
SCREAMING_SNAKE_CASE__ : Any = encoder_ffn_dim
SCREAMING_SNAKE_CASE__ : Optional[Any] = decoder_ffn_dim
SCREAMING_SNAKE_CASE__ : Optional[int] = encoder_layers
SCREAMING_SNAKE_CASE__ : Optional[Any] = decoder_layers
SCREAMING_SNAKE_CASE__ : Tuple = dropout
SCREAMING_SNAKE_CASE__ : Tuple = attention_dropout
SCREAMING_SNAKE_CASE__ : str = activation_dropout
SCREAMING_SNAKE_CASE__ : List[Any] = encoder_layerdrop
SCREAMING_SNAKE_CASE__ : List[Any] = decoder_layerdrop
SCREAMING_SNAKE_CASE__ : Optional[Any] = activation_function
SCREAMING_SNAKE_CASE__ : Any = init_std
SCREAMING_SNAKE_CASE__ : List[Any] = use_cache
super().__init__(is_encoder_decoder=__lowerCamelCase , **__lowerCamelCase )
@property
def _a ( self ) -> int:
"""simple docstring"""
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 132 |
import inspect
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
import torch.utils.checkpoint
from ...models import UNetaDModel, VQModel
from ...schedulers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
)
from ...utils import PIL_INTERPOLATION, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
def _UpperCAmelCase (UpperCamelCase__ : Union[str, Any] ):
_A , _A : Any = image.size
_A , _A : str = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
_A : List[str] = image.resize((w, h) , resample=PIL_INTERPOLATION["lanczos"] )
_A : Any = np.array(UpperCamelCase__ ).astype(np.floataa ) / 2_55.0
_A : Optional[Any] = image[None].transpose(0 , 3 , 1 , 2 )
_A : Union[str, Any] = torch.from_numpy(UpperCamelCase__ )
return 2.0 * image - 1.0
class lowerCAmelCase__ ( a):
'''simple docstring'''
def __init__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , ) -> Optional[int]:
super().__init__()
self.register_modules(vqvae=__lowerCamelCase , unet=__lowerCamelCase , scheduler=__lowerCamelCase)
@torch.no_grad()
def __call__( self , __lowerCamelCase = None , __lowerCamelCase = 1 , __lowerCamelCase = 1_0_0 , __lowerCamelCase = 0.0 , __lowerCamelCase = None , __lowerCamelCase = "pil" , __lowerCamelCase = True , ) -> Union[Tuple, ImagePipelineOutput]:
if isinstance(__lowerCamelCase , PIL.Image.Image):
_A : Tuple = 1
elif isinstance(__lowerCamelCase , torch.Tensor):
_A : Union[str, Any] = image.shape[0]
else:
raise ValueError(F"`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(__lowerCamelCase)}")
if isinstance(__lowerCamelCase , PIL.Image.Image):
_A : Union[str, Any] = preprocess(__lowerCamelCase)
_A , _A : Union[str, Any] = image.shape[-2:]
# in_channels should be 6: 3 for latents, 3 for low resolution image
_A : Optional[Any] = (batch_size, self.unet.config.in_channels // 2, height, width)
_A : str = next(self.unet.parameters()).dtype
_A : Union[str, Any] = randn_tensor(__lowerCamelCase , generator=__lowerCamelCase , device=self.device , dtype=__lowerCamelCase)
_A : List[Any] = image.to(device=self.device , dtype=__lowerCamelCase)
# set timesteps and move to the correct device
self.scheduler.set_timesteps(__lowerCamelCase , device=self.device)
_A : Any = self.scheduler.timesteps
# scale the initial noise by the standard deviation required by the scheduler
_A : List[str] = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature.
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
_A : str = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
_A : Optional[int] = {}
if accepts_eta:
_A : List[Any] = eta
for t in self.progress_bar(__lowerCamelCase):
# concat latents and low resolution image in the channel dimension.
_A : List[Any] = torch.cat([latents, image] , dim=1)
_A : str = self.scheduler.scale_model_input(__lowerCamelCase , __lowerCamelCase)
# predict the noise residual
_A : Any = self.unet(__lowerCamelCase , __lowerCamelCase).sample
# compute the previous noisy sample x_t -> x_t-1
_A : Optional[int] = self.scheduler.step(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase).prev_sample
# decode the image latents with the VQVAE
_A : Union[str, Any] = self.vqvae.decode(__lowerCamelCase).sample
_A : Dict = torch.clamp(__lowerCamelCase , -1.0 , 1.0)
_A : Tuple = image / 2 + 0.5
_A : int = image.cpu().permute(0 , 2 , 3 , 1).numpy()
if output_type == "pil":
_A : Optional[int] = self.numpy_to_pil(__lowerCamelCase)
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__lowerCamelCase)
| 11 | 0 |
def UpperCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase ) -> str:
if mass < 0:
raise ValueError('''The mass of a body cannot be negative''' )
return 0.5 * mass * abs(UpperCamelCase__ ) * abs(UpperCamelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 156 |
import unittest
import torch
from diffusers import VQModel
from diffusers.utils import floats_tensor, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class lowerCAmelCase__ ( a , a , unittest.TestCase):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = VQModel
__SCREAMING_SNAKE_CASE = "sample"
@property
def _lowerCamelCase ( self , __lowerCamelCase=(3_2, 3_2)) -> Optional[Any]:
_A : Optional[int] = 4
_A : Tuple = 3
_A : List[Any] = floats_tensor((batch_size, num_channels) + sizes).to(__lowerCamelCase)
return {"sample": image}
@property
def _lowerCamelCase ( self) -> int:
return (3, 3_2, 3_2)
@property
def _lowerCamelCase ( self) -> List[Any]:
return (3, 3_2, 3_2)
def _lowerCamelCase ( self) -> Union[str, Any]:
_A : List[Any] = {
"block_out_channels": [3_2, 6_4],
"in_channels": 3,
"out_channels": 3,
"down_block_types": ["DownEncoderBlock2D", "DownEncoderBlock2D"],
"up_block_types": ["UpDecoderBlock2D", "UpDecoderBlock2D"],
"latent_channels": 3,
}
_A : int = self.dummy_input
return init_dict, inputs_dict
def _lowerCamelCase ( self) -> Union[str, Any]:
pass
def _lowerCamelCase ( self) -> Any:
pass
def _lowerCamelCase ( self) -> Any:
_A , _A : List[Any] = VQModel.from_pretrained("fusing/vqgan-dummy" , output_loading_info=__lowerCamelCase)
self.assertIsNotNone(__lowerCamelCase)
self.assertEqual(len(loading_info["missing_keys"]) , 0)
model.to(__lowerCamelCase)
_A : str = model(**self.dummy_input)
assert image is not None, "Make sure output is not None"
def _lowerCamelCase ( self) -> Union[str, Any]:
_A : Optional[Any] = VQModel.from_pretrained("fusing/vqgan-dummy")
model.to(__lowerCamelCase).eval()
torch.manual_seed(0)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(0)
_A : Tuple = torch.randn(1 , model.config.in_channels , model.config.sample_size , model.config.sample_size)
_A : Optional[int] = image.to(__lowerCamelCase)
with torch.no_grad():
_A : List[str] = model(__lowerCamelCase).sample
_A : int = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
_A : Optional[Any] = torch.tensor([-0.0_1_5_3, -0.4_0_4_4, -0.1_8_8_0, -0.5_1_6_1, -0.2_4_1_8, -0.4_0_7_2, -0.1_6_1_2, -0.0_6_3_3, -0.0_1_4_3])
# fmt: on
self.assertTrue(torch.allclose(__lowerCamelCase , __lowerCamelCase , atol=1e-3))
| 11 | 0 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__A = logging.get_logger(__name__)
__A = "▁"
__A = {"vocab_file": "sentencepiece.bpe.model", "monolingual_vocab_file": "dict.txt"}
__A = {
"vocab_file": {
"vinai/bartpho-syllable": "https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model",
},
"monolingual_vocab_file": {
"vinai/bartpho-syllable": "https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt",
},
}
__A = {"vinai/bartpho-syllable": 1_0_2_4}
class UpperCAmelCase (_UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase :Dict = VOCAB_FILES_NAMES
_UpperCAmelCase :Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase :Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase :Union[str, Any] = ["input_ids", "attention_mask"]
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase="<s>" , _UpperCAmelCase="</s>" , _UpperCAmelCase="</s>" , _UpperCAmelCase="<s>" , _UpperCAmelCase="<unk>" , _UpperCAmelCase="<pad>" , _UpperCAmelCase="<mask>" , _UpperCAmelCase = None , **_UpperCAmelCase , ):
# Mask token behave like a normal word, i.e. include the space before it
lowercase__: Optional[int] = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else mask_token
lowercase__: Optional[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , unk_token=__lowerCamelCase , sep_token=__lowerCamelCase , cls_token=__lowerCamelCase , pad_token=__lowerCamelCase , mask_token=__lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **__lowerCamelCase , )
lowercase__: Tuple = vocab_file
lowercase__: Tuple = monolingual_vocab_file
lowercase__: List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__lowerCamelCase ) )
# Load the reduced vocab
# Keep order of special tokens for backward compatibility
lowercase__: List[Any] = {}
lowercase__: Optional[Any] = 0
for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]:
if str(__lowerCamelCase ) not in self.fairseq_tokens_to_ids:
lowercase__: Union[str, Any] = cnt
cnt += 1
with open(__lowerCamelCase , '''r''' , encoding='''utf-8''' ) as f:
for line in f.readlines():
lowercase__: int = line.strip().split()[0]
lowercase__: str = len(self.fairseq_tokens_to_ids )
if str(__lowerCamelCase ) not in self.fairseq_tokens_to_ids:
lowercase__: Optional[int] = len(self.fairseq_tokens_to_ids )
lowercase__: Dict = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self ):
lowercase__: Dict = self.__dict__.copy()
lowercase__: int = None
lowercase__: Union[str, Any] = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , _UpperCAmelCase ):
lowercase__: List[str] = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
lowercase__: Optional[int] = {}
lowercase__: List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowercase__: int = [self.cls_token_id]
lowercase__: int = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowerCamelCase , token_ids_a=__lowerCamelCase , already_has_special_tokens=__lowerCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(__lowerCamelCase )) + [1]
return [1] + ([0] * len(__lowerCamelCase )) + [1, 1] + ([0] * len(__lowerCamelCase )) + [1]
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase = None ):
lowercase__: str = [self.sep_token_id]
lowercase__: int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def _snake_case ( self ):
return len(self.fairseq_ids_to_tokens )
def _snake_case ( self ):
lowercase__: Optional[Any] = {self.convert_ids_to_tokens(__lowerCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _snake_case ( self , _UpperCAmelCase ):
return self.sp_model.encode(__lowerCamelCase , out_type=__lowerCamelCase )
def _snake_case ( self , _UpperCAmelCase ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
else:
return self.unk_token_id
def _snake_case ( self , _UpperCAmelCase ):
return self.fairseq_ids_to_tokens[index]
def _snake_case ( self , _UpperCAmelCase ):
lowercase__: Optional[Any] = "".join(__lowerCamelCase ).replace(__lowerCamelCase , ''' ''' ).strip()
return out_string
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase = None ):
if not os.path.isdir(__lowerCamelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowercase__: Union[str, Any] = os.path.join(
__lowerCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
lowercase__: Optional[int] = os.path.join(
__lowerCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''monolingual_vocab_file'''] , )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __lowerCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__lowerCamelCase , '''wb''' ) as fi:
lowercase__: int = self.sp_model.serialized_model_proto()
fi.write(__lowerCamelCase )
if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath(
__lowerCamelCase ) and os.path.isfile(self.monolingual_vocab_file ):
copyfile(self.monolingual_vocab_file , __lowerCamelCase )
elif not os.path.isfile(self.monolingual_vocab_file ):
with open(__lowerCamelCase , '''w''' , encoding='''utf-8''' ) as fp:
for token in self.fairseq_tokens_to_ids:
if token not in self.all_special_tokens:
fp.write(F"""{str(__lowerCamelCase )} \n""" )
return out_vocab_file, out_monolingual_vocab_file
| 177 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_mbart import MBartTokenizer
else:
lowerCAmelCase__ = None
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'}
lowerCAmelCase__ = {
'vocab_file': {
'facebook/mbart-large-en-ro': (
'https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model'
),
'facebook/mbart-large-cc25': (
'https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model'
),
},
'tokenizer_file': {
'facebook/mbart-large-en-ro': 'https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json',
'facebook/mbart-large-cc25': 'https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json',
},
}
lowerCAmelCase__ = {
'facebook/mbart-large-en-ro': 10_24,
'facebook/mbart-large-cc25': 10_24,
}
# fmt: off
lowerCAmelCase__ = ['ar_AR', 'cs_CZ', 'de_DE', 'en_XX', 'es_XX', 'et_EE', 'fi_FI', 'fr_XX', 'gu_IN', 'hi_IN', 'it_IT', 'ja_XX', 'kk_KZ', 'ko_KR', 'lt_LT', 'lv_LV', 'my_MM', 'ne_NP', 'nl_XX', 'ro_RO', 'ru_RU', 'si_LK', 'tr_TR', 'vi_VN', 'zh_CN']
class lowerCAmelCase__ ( a):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE = ["input_ids", "attention_mask"]
__SCREAMING_SNAKE_CASE = MBartTokenizer
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = []
def __init__( self , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase="<s>" , __lowerCamelCase="</s>" , __lowerCamelCase="</s>" , __lowerCamelCase="<s>" , __lowerCamelCase="<unk>" , __lowerCamelCase="<pad>" , __lowerCamelCase="<mask>" , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=None , **__lowerCamelCase , ) -> Optional[int]:
# Mask token behave like a normal word, i.e. include the space before it
_A : List[str] = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase) if isinstance(__lowerCamelCase , __lowerCamelCase) else mask_token
super().__init__(
vocab_file=__lowerCamelCase , tokenizer_file=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , sep_token=__lowerCamelCase , cls_token=__lowerCamelCase , unk_token=__lowerCamelCase , pad_token=__lowerCamelCase , mask_token=__lowerCamelCase , src_lang=__lowerCamelCase , tgt_lang=__lowerCamelCase , additional_special_tokens=__lowerCamelCase , **__lowerCamelCase , )
_A : Union[str, Any] = vocab_file
_A : int = False if not self.vocab_file else True
_A : Optional[int] = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens])
self.add_special_tokens({"additional_special_tokens": _additional_special_tokens})
_A : Union[str, Any] = {
lang_code: self.convert_tokens_to_ids(__lowerCamelCase) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
_A : Optional[int] = src_lang if src_lang is not None else "en_XX"
_A : Union[str, Any] = self.convert_tokens_to_ids(self._src_lang)
_A : int = tgt_lang
self.set_src_lang_special_tokens(self._src_lang)
@property
def _lowerCamelCase ( self) -> str:
return self._src_lang
@src_lang.setter
def _lowerCamelCase ( self , __lowerCamelCase) -> None:
_A : Dict = new_src_lang
self.set_src_lang_special_tokens(self._src_lang)
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = None) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = None) -> List[int]:
_A : List[str] = [self.sep_token_id]
_A : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase) -> Dict:
if src_lang is None or tgt_lang is None:
raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model")
_A : str = src_lang
_A : Any = self(__lowerCamelCase , add_special_tokens=__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase)
_A : Tuple = self.convert_tokens_to_ids(__lowerCamelCase)
_A : Dict = tgt_lang_id
return inputs
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = "en_XX" , __lowerCamelCase = None , __lowerCamelCase = "ro_RO" , **__lowerCamelCase , ) -> BatchEncoding:
_A : Any = src_lang
_A : int = tgt_lang
return super().prepare_seqaseq_batch(__lowerCamelCase , __lowerCamelCase , **__lowerCamelCase)
def _lowerCamelCase ( self) -> List[str]:
return self.set_src_lang_special_tokens(self.src_lang)
def _lowerCamelCase ( self) -> List[Any]:
return self.set_tgt_lang_special_tokens(self.tgt_lang)
def _lowerCamelCase ( self , __lowerCamelCase) -> None:
_A : int = self.convert_tokens_to_ids(__lowerCamelCase)
_A : int = []
_A : List[str] = [self.eos_token_id, self.cur_lang_code]
_A : Union[str, Any] = self.convert_ids_to_tokens(self.prefix_tokens)
_A : str = self.convert_ids_to_tokens(self.suffix_tokens)
_A : List[Any] = processors.TemplateProcessing(
single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens)) , )
def _lowerCamelCase ( self , __lowerCamelCase) -> None:
_A : Optional[int] = self.convert_tokens_to_ids(__lowerCamelCase)
_A : List[Any] = []
_A : str = [self.eos_token_id, self.cur_lang_code]
_A : Optional[int] = self.convert_ids_to_tokens(self.prefix_tokens)
_A : int = self.convert_ids_to_tokens(self.suffix_tokens)
_A : str = processors.TemplateProcessing(
single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens)) , )
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = None) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer.")
if not os.path.isdir(__lowerCamelCase):
logger.error(F"Vocabulary path ({save_directory}) should be a directory.")
return
_A : int = os.path.join(
__lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
if os.path.abspath(self.vocab_file) != os.path.abspath(__lowerCamelCase):
copyfile(self.vocab_file , __lowerCamelCase)
return (out_vocab_file,)
| 11 | 0 |
import unittest
import numpy as np
from transformers import RobertaPreLayerNormConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roberta_prelayernorm.modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
)
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Union[str, Any] ,A : Optional[Any] ,A : Dict=13 ,A : str=7 ,A : Optional[Any]=True ,A : str=True ,A : Any=True ,A : int=True ,A : Dict=99 ,A : Tuple=32 ,A : Optional[int]=5 ,A : str=4 ,A : Tuple=37 ,A : Dict="gelu" ,A : Tuple=0.1 ,A : List[str]=0.1 ,A : Optional[int]=5_12 ,A : int=16 ,A : Union[str, Any]=2 ,A : List[str]=0.02 ,A : Optional[Any]=4 ,):
__A = parent
__A = batch_size
__A = seq_length
__A = is_training
__A = use_attention_mask
__A = use_token_type_ids
__A = use_labels
__A = vocab_size
__A = hidden_size
__A = num_hidden_layers
__A = num_attention_heads
__A = intermediate_size
__A = hidden_act
__A = hidden_dropout_prob
__A = attention_probs_dropout_prob
__A = max_position_embeddings
__A = type_vocab_size
__A = type_sequence_label_size
__A = initializer_range
__A = num_choices
def UpperCamelCase_ ( self : str ):
__A = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
__A = None
if self.use_attention_mask:
__A = random_attention_mask([self.batch_size, self.seq_length] )
__A = None
if self.use_token_type_ids:
__A = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
__A = RobertaPreLayerNormConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=__lowerCamelCase ,initializer_range=self.initializer_range ,)
return config, input_ids, token_type_ids, attention_mask
def UpperCamelCase_ ( self : Tuple ):
__A = self.prepare_config_and_inputs()
__A = config_and_inputs
__A = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
def UpperCamelCase_ ( self : Tuple ):
__A = self.prepare_config_and_inputs()
__A = config_and_inputs
__A = True
__A = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__A = ids_tensor([self.batch_size, self.seq_length] ,vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
# Copied from tests.models.roberta.test_modelling_flax_roberta.FlaxRobertaPreLayerNormModelTest with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta-base->andreasmadsen/efficient_mlm_m0.40
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
snake_case_ = True
snake_case_ = (
(
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
)
if is_flax_available()
else ()
)
def UpperCamelCase_ ( self : Dict ):
__A = FlaxRobertaPreLayerNormModelTester(self )
@slow
def UpperCamelCase_ ( self : Union[str, Any] ):
for model_class_name in self.all_model_classes:
__A = model_class_name.from_pretrained("andreasmadsen/efficient_mlm_m0.40" ,from_pt=__lowerCamelCase )
__A = model(np.ones((1, 1) ) )
self.assertIsNotNone(__lowerCamelCase )
@require_flax
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCamelCase_ ( self : str ):
__A = FlaxRobertaPreLayerNormForMaskedLM.from_pretrained("andreasmadsen/efficient_mlm_m0.40" ,from_pt=__lowerCamelCase )
__A = np.array([[0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2]] ,dtype=jnp.intaa )
__A = model(__lowerCamelCase )[0]
__A = [1, 11, 5_02_65]
self.assertEqual(list(output.shape ) ,__lowerCamelCase )
# compare the actual values for a slice.
__A = np.array(
[[[40.48_80, 18.01_99, -5.23_67], [-1.88_77, -4.08_85, 10.70_85], [-2.26_13, -5.61_10, 7.26_65]]] ,dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] ,__lowerCamelCase ,atol=1E-4 ) )
@slow
def UpperCamelCase_ ( self : Tuple ):
__A = FlaxRobertaPreLayerNormModel.from_pretrained("andreasmadsen/efficient_mlm_m0.40" ,from_pt=__lowerCamelCase )
__A = np.array([[0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2]] ,dtype=jnp.intaa )
__A = model(__lowerCamelCase )[0]
# compare the actual values for a slice.
__A = np.array(
[[[0.02_08, -0.03_56, 0.02_37], [-0.15_69, -0.04_11, -0.26_26], [0.18_79, 0.01_25, -0.00_89]]] ,dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] ,__lowerCamelCase ,atol=1E-4 ) )
| 15 |
import json
import os
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'vocab_file': 'vocab.json',
'tokenizer_config_file': 'tokenizer_config.json',
'merges_file': 'merges.txt',
}
lowerCAmelCase__ = {
'vocab_file': {
'facebook/s2t-wav2vec2-large-en-de': (
'https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/vocab.json'
),
},
'tokenizer_config_file': {
'facebook/s2t-wav2vec2-large-en-de': (
'https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/tokenizer_config.json'
),
},
'merges_file': {
'facebook/s2t-wav2vec2-large-en-de': (
'https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/merges.txt'
),
},
}
lowerCAmelCase__ = '</w>'
lowerCAmelCase__ = '@@ '
def _UpperCAmelCase (UpperCamelCase__ : Optional[Any] ):
_A : Optional[int] = set()
_A : Optional[Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_A : List[Any] = char
return pairs
# Speech2Text2 has no max input length
lowerCAmelCase__ = {'facebook/s2t-wav2vec2-large-en-de': 10_24}
class lowerCAmelCase__ ( a):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE = ["input_ids", "attention_mask"]
def __init__( self , __lowerCamelCase , __lowerCamelCase="<s>" , __lowerCamelCase="<pad>" , __lowerCamelCase="</s>" , __lowerCamelCase="<unk>" , __lowerCamelCase=False , __lowerCamelCase=None , **__lowerCamelCase , ) -> Optional[Any]:
super().__init__(
unk_token=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , pad_token=__lowerCamelCase , do_lower_case=__lowerCamelCase , **__lowerCamelCase , )
_A : Dict = do_lower_case
with open(__lowerCamelCase , encoding="utf-8") as vocab_handle:
_A : Optional[int] = json.load(__lowerCamelCase)
_A : Optional[Any] = {v: k for k, v in self.encoder.items()}
if merges_file is None:
logger.info(F"No merges files provided. {self.__class__.__name__} can only be used for decoding.")
_A : Optional[Any] = None
_A : Tuple = None
else:
with open(__lowerCamelCase , encoding="utf-8") as merges_handle:
_A : Optional[int] = merges_handle.read().split("\n")[:-1]
_A : Union[str, Any] = [tuple(merge.split()[:2]) for merge in merges]
_A : Optional[int] = dict(zip(__lowerCamelCase , range(len(__lowerCamelCase))))
_A : List[Any] = {}
@property
def _lowerCamelCase ( self) -> int:
return len(self.decoder)
def _lowerCamelCase ( self) -> Dict:
return dict(self.encoder , **self.added_tokens_encoder)
def _lowerCamelCase ( self , __lowerCamelCase) -> Dict:
_A : Tuple = tuple(token[:-1]) + (token[-1] + BPE_TOKEN_MERGES,)
if token in self.cache:
return self.cache[token]
_A : int = get_pairs(__lowerCamelCase)
if not pairs:
return token
while True:
_A : Any = min(__lowerCamelCase , key=lambda __lowerCamelCase: self.bpe_ranks.get(__lowerCamelCase , float("inf")))
if bigram not in self.bpe_ranks:
break
_A , _A : Optional[int] = bigram
_A : int = []
_A : str = 0
while i < len(__lowerCamelCase):
try:
_A : str = word.index(__lowerCamelCase , __lowerCamelCase)
except ValueError:
new_word.extend(word[i:])
break
else:
new_word.extend(word[i:j])
_A : str = j
if word[i] == first and i < len(__lowerCamelCase) - 1 and word[i + 1] == second:
new_word.append(first + second)
i += 2
else:
new_word.append(word[i])
i += 1
_A : List[str] = tuple(__lowerCamelCase)
_A : List[str] = new_word
if len(__lowerCamelCase) == 1:
break
else:
_A : List[Any] = get_pairs(__lowerCamelCase)
_A : Tuple = " ".join(__lowerCamelCase)
if word == "\n " + BPE_TOKEN_MERGES:
_A : List[str] = "\n" + BPE_TOKEN_MERGES
if word.endswith(__lowerCamelCase):
_A : int = word.replace(__lowerCamelCase , "")
_A : int = word.replace(" " , __lowerCamelCase)
_A : Union[str, Any] = word
return word
def _lowerCamelCase ( self , __lowerCamelCase) -> Optional[Any]:
if self.bpe_ranks is None:
raise ValueError(
"This tokenizer was instantiated without a `merges.txt` file, so"
" that it can only be used for decoding, not for encoding."
"Make sure to provide `merges.txt` file at instantiation to enable "
"encoding.")
if self.do_lower_case:
_A : List[Any] = text.lower()
_A : Optional[int] = text.split()
_A : List[str] = []
for token in text:
if token:
split_tokens.extend(list(self.bpe(__lowerCamelCase).split(" ")))
return split_tokens
def _lowerCamelCase ( self , __lowerCamelCase) -> int:
return self.encoder.get(__lowerCamelCase , self.encoder.get(self.unk_token))
def _lowerCamelCase ( self , __lowerCamelCase) -> str:
_A : List[str] = self.decoder.get(__lowerCamelCase , self.unk_token)
return result
def _lowerCamelCase ( self , __lowerCamelCase) -> str:
_A : str = " ".join(__lowerCamelCase)
# make sure @@ tokens are concatenated
_A : int = "".join(string.split(__lowerCamelCase))
return string
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = None) -> Tuple[str]:
if not os.path.isdir(__lowerCamelCase):
logger.error(F"Vocabulary path ({save_directory}) should be a directory")
return
_A : Any = os.path.join(
__lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
_A : Any = os.path.join(
__lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"])
with open(__lowerCamelCase , "w" , encoding="utf-8") as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__lowerCamelCase , ensure_ascii=__lowerCamelCase) + "\n")
_A : Union[str, Any] = 0
if self.bpe_ranks is None:
return (vocab_file,)
with open(__lowerCamelCase , "w" , encoding="utf-8") as writer:
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __lowerCamelCase: kv[1]):
if index != token_index:
logger.warning(
F"Saving vocabulary to {merges_file}: BPE merge indices are not consecutive."
" Please check that the tokenizer is not corrupted!")
_A : Optional[int] = token_index
writer.write(" ".join(__lowerCamelCase) + "\n")
index += 1
return (vocab_file, merges_file)
| 11 | 0 |
import os
from itertools import chain
from random import randrange, shuffle
import pytest
from .sola import PokerHand
lowerCamelCase__ = (
"""4S 3H 2C 7S 5H""",
"""9D 8H 2C 6S 7H""",
"""2D 6D 9D TH 7D""",
"""TC 8C 2S JH 6C""",
"""JH 8S TH AH QH""",
"""TS KS 5S 9S AC""",
"""KD 6S 9D TH AD""",
"""KS 8D 4D 9S 4S""", # pair
"""8C 4S KH JS 4D""", # pair
"""QH 8H KD JH 8S""", # pair
"""KC 4H KS 2H 8D""", # pair
"""KD 4S KC 3H 8S""", # pair
"""AH 8S AS KC JH""", # pair
"""3H 4C 4H 3S 2H""", # 2 pairs
"""5S 5D 2C KH KH""", # 2 pairs
"""3C KH 5D 5S KH""", # 2 pairs
"""AS 3C KH AD KH""", # 2 pairs
"""7C 7S 3S 7H 5S""", # 3 of a kind
"""7C 7S KH 2H 7H""", # 3 of a kind
"""AC KH QH AH AS""", # 3 of a kind
"""2H 4D 3C AS 5S""", # straight (low ace)
"""3C 5C 4C 2C 6H""", # straight
"""6S 8S 7S 5H 9H""", # straight
"""JS QS 9H TS KH""", # straight
"""QC KH TS JS AH""", # straight (high ace)
"""8C 9C 5C 3C TC""", # flush
"""3S 8S 9S 5S KS""", # flush
"""4C 5C 9C 8C KC""", # flush
"""JH 8H AH KH QH""", # flush
"""3D 2H 3H 2C 2D""", # full house
"""2H 2C 3S 3H 3D""", # full house
"""KH KC 3S 3H 3D""", # full house
"""JC 6H JS JD JH""", # 4 of a kind
"""JC 7H JS JD JH""", # 4 of a kind
"""JC KH JS JD JH""", # 4 of a kind
"""2S AS 4S 5S 3S""", # straight flush (low ace)
"""2D 6D 3D 4D 5D""", # straight flush
"""5C 6C 3C 7C 4C""", # straight flush
"""JH 9H TH KH QH""", # straight flush
"""JH AH TH KH QH""", # royal flush (high ace straight flush)
)
lowerCamelCase__ = (
("""2H 3H 4H 5H 6H""", """KS AS TS QS JS""", """Loss"""),
("""2H 3H 4H 5H 6H""", """AS AD AC AH JD""", """Win"""),
("""AS AH 2H AD AC""", """JS JD JC JH 3D""", """Win"""),
("""2S AH 2H AS AC""", """JS JD JC JH AD""", """Loss"""),
("""2S AH 2H AS AC""", """2H 3H 5H 6H 7H""", """Win"""),
("""AS 3S 4S 8S 2S""", """2H 3H 5H 6H 7H""", """Win"""),
("""2H 3H 5H 6H 7H""", """2S 3H 4H 5S 6C""", """Win"""),
("""2S 3H 4H 5S 6C""", """3D 4C 5H 6H 2S""", """Tie"""),
("""2S 3H 4H 5S 6C""", """AH AC 5H 6H AS""", """Win"""),
("""2S 2H 4H 5S 4C""", """AH AC 5H 6H AS""", """Loss"""),
("""2S 2H 4H 5S 4C""", """AH AC 5H 6H 7S""", """Win"""),
("""6S AD 7H 4S AS""", """AH AC 5H 6H 7S""", """Loss"""),
("""2S AH 4H 5S KC""", """AH AC 5H 6H 7S""", """Loss"""),
("""2S 3H 6H 7S 9C""", """7H 3C TH 6H 9S""", """Loss"""),
("""4S 5H 6H TS AC""", """3S 5H 6H TS AC""", """Win"""),
("""2S AH 4H 5S 6C""", """AD 4C 5H 6H 2C""", """Tie"""),
("""AS AH 3H AD AC""", """AS AH 2H AD AC""", """Win"""),
("""AH AC 5H 5C QS""", """AH AC 5H 5C KS""", """Loss"""),
("""AH AC 5H 5C QS""", """KH KC 5H 5C QS""", """Win"""),
("""7C 7S KH 2H 7H""", """3C 3S AH 2H 3H""", """Win"""),
("""3C 3S AH 2H 3H""", """7C 7S KH 2H 7H""", """Loss"""),
("""6H 5H 4H 3H 2H""", """5H 4H 3H 2H AH""", """Win"""),
("""5H 4H 3H 2H AH""", """5H 4H 3H 2H AH""", """Tie"""),
("""5H 4H 3H 2H AH""", """6H 5H 4H 3H 2H""", """Loss"""),
("""AH AD KS KC AC""", """AH KD KH AC KC""", """Win"""),
("""2H 4D 3C AS 5S""", """2H 4D 3C 6S 5S""", """Loss"""),
("""2H 3S 3C 3H 2S""", """3S 3C 2S 2H 2D""", """Win"""),
("""4D 6D 5D 2D JH""", """3S 8S 3H TC KH""", """Loss"""),
("""4S 6C 8S 3S 7S""", """AD KS 2D 7D 7C""", """Loss"""),
("""6S 4C 7H 8C 3H""", """5H JC AH 9D 9C""", """Loss"""),
("""9D 9H JH TC QH""", """3C 2S JS 5C 7H""", """Win"""),
("""2H TC 8S AD 9S""", """4H TS 7H 2C 5C""", """Win"""),
("""9D 3S 2C 7S 7C""", """JC TD 3C TC 9H""", """Loss"""),
)
lowerCamelCase__ = (
("""2H 3H 4H 5H 6H""", True),
("""AS AH 2H AD AC""", False),
("""2H 3H 5H 6H 7H""", True),
("""KS AS TS QS JS""", True),
("""8H 9H QS JS TH""", False),
("""AS 3S 4S 8S 2S""", True),
)
lowerCamelCase__ = (
("""2H 3H 4H 5H 6H""", True),
("""AS AH 2H AD AC""", False),
("""2H 3H 5H 6H 7H""", False),
("""KS AS TS QS JS""", True),
("""8H 9H QS JS TH""", True),
)
lowerCamelCase__ = (
("""2H 4D 3C AS 5S""", True, [5, 4, 3, 2, 14]),
("""2H 5D 3C AS 5S""", False, [14, 5, 5, 3, 2]),
("""JH QD KC AS TS""", False, [14, 13, 12, 11, 10]),
("""9D 3S 2C 7S 7C""", False, [9, 7, 7, 3, 2]),
)
lowerCamelCase__ = (
("""JH AH TH KH QH""", 0),
("""JH 9H TH KH QH""", 0),
("""JC KH JS JD JH""", 7),
("""KH KC 3S 3H 3D""", 6),
("""8C 9C 5C 3C TC""", 0),
("""JS QS 9H TS KH""", 0),
("""7C 7S KH 2H 7H""", 3),
("""3C KH 5D 5S KH""", 2),
("""QH 8H KD JH 8S""", 1),
("""2D 6D 9D TH 7D""", 0),
)
lowerCamelCase__ = (
("""JH AH TH KH QH""", 23),
("""JH 9H TH KH QH""", 22),
("""JC KH JS JD JH""", 21),
("""KH KC 3S 3H 3D""", 20),
("""8C 9C 5C 3C TC""", 19),
("""JS QS 9H TS KH""", 18),
("""7C 7S KH 2H 7H""", 17),
("""3C KH 5D 5S KH""", 16),
("""QH 8H KD JH 8S""", 15),
("""2D 6D 9D TH 7D""", 14),
)
def lowerCAmelCase__ ( ):
"""simple docstring"""
__a = randrange(len(UpperCamelCase__ ) ), randrange(len(UpperCamelCase__ ) )
__a = ["Loss", "Tie", "Win"][(play >= oppo) + (play > oppo)]
__a = SORTED_HANDS[play], SORTED_HANDS[oppo]
return hand, other, expected
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : int = 100 ):
"""simple docstring"""
return (generate_random_hand() for _ in range(UpperCamelCase__ ))
@pytest.mark.parametrize("""hand, expected""" , UpperCamelCase__ )
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Optional[int] ):
"""simple docstring"""
assert PokerHand(UpperCamelCase__ )._is_flush() == expected
@pytest.mark.parametrize("""hand, expected""" , UpperCamelCase__ )
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : List[str] ):
"""simple docstring"""
assert PokerHand(UpperCamelCase__ )._is_straight() == expected
@pytest.mark.parametrize("""hand, expected, card_values""" , UpperCamelCase__ )
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : List[Any] ):
"""simple docstring"""
__a = PokerHand(UpperCamelCase__ )
assert player._is_five_high_straight() == expected
assert player._card_values == card_values
@pytest.mark.parametrize("""hand, expected""" , UpperCamelCase__ )
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Tuple ):
"""simple docstring"""
assert PokerHand(UpperCamelCase__ )._is_same_kind() == expected
@pytest.mark.parametrize("""hand, expected""" , UpperCamelCase__ )
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Union[str, Any] ):
"""simple docstring"""
assert PokerHand(UpperCamelCase__ )._hand_type == expected
@pytest.mark.parametrize("""hand, other, expected""" , UpperCamelCase__ )
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Optional[Any] ):
"""simple docstring"""
assert PokerHand(UpperCamelCase__ ).compare_with(PokerHand(UpperCamelCase__ ) ) == expected
@pytest.mark.parametrize("""hand, other, expected""" , generate_random_hands() )
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Any ):
"""simple docstring"""
assert PokerHand(UpperCamelCase__ ).compare_with(PokerHand(UpperCamelCase__ ) ) == expected
def lowerCAmelCase__ ( ):
"""simple docstring"""
__a = [PokerHand(UpperCamelCase__ ) for hand in SORTED_HANDS]
__a = poker_hands.copy()
shuffle(UpperCamelCase__ )
__a = chain(sorted(UpperCamelCase__ ) )
for index, hand in enumerate(UpperCamelCase__ ):
assert hand == poker_hands[index]
def lowerCAmelCase__ ( ):
"""simple docstring"""
__a = [PokerHand("""2D AC 3H 4H 5S""" ), PokerHand("""2S 3H 4H 5S 6C""" )]
pokerhands.sort(reverse=UpperCamelCase__ )
assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C"
def lowerCAmelCase__ ( ):
"""simple docstring"""
__a = PokerHand("""2C 4S AS 3D 5C""" )
__a = True
__a = [5, 4, 3, 2, 14]
for _ in range(10 ):
assert pokerhand._is_five_high_straight() == expected
assert pokerhand._card_values == expected_card_values
def lowerCAmelCase__ ( ):
"""simple docstring"""
__a = 0
__a = os.path.abspath(os.path.dirname(UpperCamelCase__ ) )
__a = os.path.join(UpperCamelCase__ , """poker_hands.txt""" )
with open(UpperCamelCase__ ) as file_hand:
for line in file_hand:
__a = line[:14].strip()
__a = line[15:].strip()
__a = PokerHand(UpperCamelCase__ ), PokerHand(UpperCamelCase__ )
__a = player.compare_with(UpperCamelCase__ )
if output == "Win":
answer += 1
assert answer == 376
| 302 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'facebook/vit-mae-base': 'https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json',
# See all ViT MAE models at https://huggingface.co/models?filter=vit-mae
}
class lowerCAmelCase__ ( a):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = "vit_mae"
def __init__( self , __lowerCamelCase=7_6_8 , __lowerCamelCase=1_2 , __lowerCamelCase=1_2 , __lowerCamelCase=3_0_7_2 , __lowerCamelCase="gelu" , __lowerCamelCase=0.0 , __lowerCamelCase=0.0 , __lowerCamelCase=0.0_2 , __lowerCamelCase=1e-12 , __lowerCamelCase=2_2_4 , __lowerCamelCase=1_6 , __lowerCamelCase=3 , __lowerCamelCase=True , __lowerCamelCase=1_6 , __lowerCamelCase=5_1_2 , __lowerCamelCase=8 , __lowerCamelCase=2_0_4_8 , __lowerCamelCase=0.7_5 , __lowerCamelCase=False , **__lowerCamelCase , ) -> int:
super().__init__(**__lowerCamelCase)
_A : int = hidden_size
_A : List[str] = num_hidden_layers
_A : List[Any] = num_attention_heads
_A : Optional[Any] = intermediate_size
_A : Optional[int] = hidden_act
_A : List[Any] = hidden_dropout_prob
_A : List[Any] = attention_probs_dropout_prob
_A : Union[str, Any] = initializer_range
_A : str = layer_norm_eps
_A : Any = image_size
_A : int = patch_size
_A : int = num_channels
_A : Dict = qkv_bias
_A : Tuple = decoder_num_attention_heads
_A : Tuple = decoder_hidden_size
_A : List[str] = decoder_num_hidden_layers
_A : Optional[Any] = decoder_intermediate_size
_A : List[str] = mask_ratio
_A : Union[str, Any] = norm_pix_loss
| 11 | 0 |
"""simple docstring"""
from __future__ import annotations
A__ : Optional[Any] = '#'
class lowercase__ :
def __init__( self : int ):
lowerCamelCase_ : dict ={}
def UpperCAmelCase__ ( self : List[Any] , snake_case__ : Any ):
lowerCamelCase_ : Dict =self._trie
for char in text:
if char not in trie:
lowerCamelCase_ : List[str] ={}
lowerCamelCase_ : List[str] =trie[char]
lowerCamelCase_ : Dict =True
def UpperCAmelCase__ ( self : str , snake_case__ : Union[str, Any] ):
lowerCamelCase_ : Optional[int] =self._trie
for char in prefix:
if char in trie:
lowerCamelCase_ : Any =trie[char]
else:
return []
return self._elements(__lowerCamelCase )
def UpperCAmelCase__ ( self : List[Any] , snake_case__ : List[Any] ):
lowerCamelCase_ : Any =[]
for c, v in d.items():
lowerCamelCase_ : Optional[Any] =[" "] if c == END else [(c + s) for s in self._elements(__lowerCamelCase )]
result.extend(__lowerCamelCase )
return tuple(__lowerCamelCase )
A__ : List[str] = Trie()
A__ : str = ('depart', 'detergent', 'daring', 'dog', 'deer', 'deal')
for word in words:
trie.insert_word(word)
def _snake_case ( lowerCamelCase__ : str ) -> Optional[Any]:
lowerCamelCase_ : Tuple =trie.find_word(UpperCamelCase__ )
return tuple(string + word for word in suffixes )
def _snake_case ( ) -> Tuple:
print(autocomplete_using_trie("de" ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 144 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
lowerCAmelCase__ = {'configuration_speech_encoder_decoder': ['SpeechEncoderDecoderConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['SpeechEncoderDecoderModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['FlaxSpeechEncoderDecoderModel']
if TYPE_CHECKING:
from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 11 | 0 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import ShapEPipeline
else:
from .camera import create_pan_cameras
from .pipeline_shap_e import ShapEPipeline
from .pipeline_shap_e_img2img import ShapEImgaImgPipeline
from .renderer import (
BoundingBoxVolume,
ImportanceRaySampler,
MLPNeRFModelOutput,
MLPNeRSTFModel,
ShapEParamsProjModel,
ShapERenderer,
StratifiedRaySampler,
VoidNeRFModel,
)
| 193 |
# HF Trainer benchmarking tool
#
# This tool can be used to run and compare multiple dimensions of the HF Trainers args.
#
# It then prints a report once in github format with all the information that needs to be shared
# with others and second time in a console-friendly format, so it's easier to use for tuning things up.
#
# The main idea is:
#
# ./trainer-benchmark.py --base-cmd '<cmd args that don't change>' \
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1' \
# --target-metric-key train_samples_per_second
#
# The variations can be any command line argument that you want to compare and not just dtype as in
# the example.
#
# --variations allows you to compare variations in multiple dimensions.
#
# as the first dimention has 2 options and the second 3 in our example, this will run the trainer 6
# times adding one of:
#
# 1. --tf32 0 --fp16 0
# 2. --tf32 0 --fp16 1
# 3. --tf32 0 --bf16 1
# 4. --tf32 1 --fp16 0
# 5. --tf32 1 --fp16 1
# 6. --tf32 1 --bf16 1
#
# and print the results. This is just a cartesian product - and more than 2 dimensions can be used.
#
# If you want to rely on defaults, this:
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1'
# is identical to this:
# --variations '--tf32 0|--tf32 1' '|--fp16|--bf16'
#
# the leading empty variation in the 2nd dimension is a valid variation.
#
# So here we get the following 6 variations:
#
# 1. --tf32 0
# 2. --tf32 0 --fp16
# 3. --tf32 0 --bf16
# 4. --tf32 1
# 5. --tf32 1 --fp16
# 6. --tf32 1 --bf16
#
# In this particular case we don't know what the default tf32 setting is as it's normally
# pytorch-version dependent). That's why it's best to do an explicit setting of each variation:
# `--tf32 0|--tf32 1`
#
# Here is a full example of a train:
#
# CUDA_VISIBLE_DEVICES=0 python ./scripts/benchmark/trainer-benchmark.py \
# --base-cmd \
# ' examples/pytorch/translation/run_translation.py --model_name_or_path t5-small \
# --output_dir output_dir --do_train --label_smoothing 0.1 --logging_strategy no \
# --save_strategy no --per_device_train_batch_size 32 --max_source_length 512 \
# --max_target_length 512 --num_train_epochs 1 --overwrite_output_dir \
# --source_lang en --target_lang ro --dataset_name wmt16 --dataset_config "ro-en" \
# --source_prefix "translate English to Romanian: " --warmup_steps 50 \
# --max_train_samples 20000 --dataloader_num_workers 2 ' \
# --target-metric-key train_samples_per_second --repeat-times 1 --variations \
# '|--fp16|--bf16' '--tf32 0|--tf32 1' --report-metric-keys train_loss \
# --repeat-times 1 --base-variation '--tf32 0'
#
# and here is a possible output:
#
#
# | Variation | Train | Diff | Train |
# | | samples | % | loss |
# | | per | | |
# | | second | | |
# |:----------------|----------:|-------:|--------:|
# | --tf32 0 | 285.11 | 0 | 2.51 |
# | --tf32 1 | 342.09 | 20 | 2.51 |
# | --fp16 --tf32 0 | 423.49 | 49 | 2.51 |
# | --fp16 --tf32 1 | 423.13 | 48 | 2.51 |
# | --bf16 --tf32 0 | 416.80 | 46 | 2.52 |
# | --bf16 --tf32 1 | 415.87 | 46 | 2.52 |
#
#
# So you can quickly compare the different outcomes.
#
# Typically running each experiment once is enough, but if the environment is unstable you can
# re-run each multiple times, e.g., 3 using --repeat-times 3 and it will report the averaged results.
#
# By default it'll use the lowest result as the base line to use as 100% and then compare the rest to
# it as can be seen from the table above, but you can also specify which combination is the one to use as
# the baseline, e.g., to change to another entry use: --base-variation '--tf32 1 --fp16 0'
#
# --target-metric-key is there to tell the program which metrics to compare - the different metric keys are
# inside output_dir/all_results.json. e.g., to measure eval performance instead of train use:
# --target-metric-key eval_samples_per_second
# but of course you will need to adjust the --base-cmd value in the example to perform evaluation as
# well (as currently it doesn't)
#
import argparse
import datetime
import io
import itertools
import json
import math
import os
import platform
import re
import shlex
import subprocess
import sys
from pathlib import Path
from statistics import fmean
import pandas as pd
import torch
from tqdm import tqdm
import transformers
lowerCAmelCase__ = float('nan')
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self , __lowerCamelCase) -> Optional[Any]:
_A : List[Any] = sys.stdout
_A : str = open(__lowerCamelCase , "a")
def __getattr__( self , __lowerCamelCase) -> List[str]:
return getattr(self.stdout , __lowerCamelCase)
def _lowerCamelCase ( self , __lowerCamelCase) -> str:
self.stdout.write(__lowerCamelCase)
# strip tqdm codes
self.file.write(re.sub(r"^.*\r" , "" , __lowerCamelCase , 0 , re.M))
def _UpperCAmelCase (UpperCamelCase__ : str=80 , UpperCamelCase__ : Tuple=False ):
_A : Tuple = []
# deal with critical env vars
_A : Dict = ["CUDA_VISIBLE_DEVICES"]
for key in env_keys:
_A : Optional[int] = os.environ.get(UpperCamelCase__ , UpperCamelCase__ )
if val is not None:
cmd.append(f"{key}={val}" )
# python executable (not always needed if the script is executable)
_A : Optional[int] = sys.executable if full_python_path else sys.executable.split("/" )[-1]
cmd.append(UpperCamelCase__ )
# now the normal args
cmd += list(map(shlex.quote , sys.argv ) )
# split up into up to MAX_WIDTH lines with shell multi-line escapes
_A : Tuple = []
_A : Dict = ""
while len(UpperCamelCase__ ) > 0:
current_line += f"{cmd.pop(0 )} "
if len(UpperCamelCase__ ) == 0 or len(UpperCamelCase__ ) + len(cmd[0] ) + 1 > max_width - 1:
lines.append(UpperCamelCase__ )
_A : Union[str, Any] = ""
return "\\\n".join(UpperCamelCase__ )
def _UpperCAmelCase (UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Tuple ):
# unwrap multi-line input
_A : Union[str, Any] = re.sub(r"[\\\n]+" , " " , args.base_cmd )
# remove --output_dir if any and set our own
_A : int = re.sub("--output_dir\s+[^\s]+" , "" , args.base_cmd )
args.base_cmd += f" --output_dir {output_dir}"
# ensure we have --overwrite_output_dir
_A : int = re.sub("--overwrite_output_dir\s+" , "" , args.base_cmd )
args.base_cmd += " --overwrite_output_dir"
return [sys.executable] + shlex.split(args.base_cmd )
def _UpperCAmelCase (UpperCamelCase__ : List[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Tuple , UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[int] ):
# Enable to debug everything but the run itself, to do it fast and see the progress.
# This is useful for debugging the output formatting quickly - we can remove it later once
# everybody is happy with the output
if 0:
import random
from time import sleep
sleep(0 )
return dict(
{k: random.uniform(0 , 100 ) for k in metric_keys} , **{target_metric_key: random.choice([nan, 10.31, 1_00.2, 55.66_66, 2_22.22_22_22_22] )} , )
_A : Dict = subprocess.run(UpperCamelCase__ , capture_output=UpperCamelCase__ , text=UpperCamelCase__ )
if verbose:
print("STDOUT" , result.stdout )
print("STDERR" , result.stderr )
# save the streams
_A : Tuple = variation.replace(" " , "-" )
with open(Path(UpperCamelCase__ ) / f"log.{prefix}.stdout.txt" , "w" ) as f:
f.write(result.stdout )
with open(Path(UpperCamelCase__ ) / f"log.{prefix}.stderr.txt" , "w" ) as f:
f.write(result.stderr )
if result.returncode != 0:
if verbose:
print("failed" )
return {target_metric_key: nan}
with io.open(f"{output_dir}/all_results.json" , "r" , encoding="utf-8" ) as f:
_A : List[str] = json.load(UpperCamelCase__ )
# filter out just the keys we want
return {k: v for k, v in metrics.items() if k in metric_keys}
def _UpperCAmelCase (UpperCamelCase__ : int , UpperCamelCase__ : Dict , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : int , UpperCamelCase__ : Tuple , UpperCamelCase__ : List[Any] , UpperCamelCase__ : str , UpperCamelCase__ : Any , ):
_A : Union[str, Any] = []
_A : Optional[int] = []
_A : Any = f"{id}: {variation:<{longest_variation_len}}"
_A : Dict = f"{preamble}: "
_A : Union[str, Any] = set(report_metric_keys + [target_metric_key] )
for i in tqdm(range(UpperCamelCase__ ) , desc=UpperCamelCase__ , leave=UpperCamelCase__ ):
_A : Optional[Any] = process_run_single(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
_A : Optional[Any] = single_run_metrics[target_metric_key]
if not math.isnan(UpperCamelCase__ ):
metrics.append(UpperCamelCase__ )
results.append(UpperCamelCase__ )
outcome += "✓"
else:
outcome += "✘"
_A : str = f"\33[2K\r{outcome}"
if len(UpperCamelCase__ ) > 0:
_A : List[str] = {k: fmean([x[k] for x in metrics] ) for k in metrics[0].keys()}
_A : Any = round(mean_metrics[target_metric_key] , 2 )
_A : Tuple = f"{outcome} {mean_target}"
if len(UpperCamelCase__ ) > 1:
results_str += f" {tuple(round(UpperCamelCase__ , 2 ) for x in results )}"
print(UpperCamelCase__ )
_A : Optional[int] = variation
return mean_metrics
else:
print(UpperCamelCase__ )
return {variation_key: variation, target_metric_key: nan}
def _UpperCAmelCase ():
_A : int = torch.cuda.get_device_properties(torch.device("cuda" ) )
return f"\nDatetime : {datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S' )}\n\nSoftware:\ntransformers: {transformers.__version__}\ntorch : {torch.__version__}\ncuda : {torch.version.cuda}\npython : {platform.python_version()}\n\nHardware:\n{torch.cuda.device_count()} GPUs : {properties.name}, {properties.total_memory/2**30:0.2f}GB\n"
def _UpperCAmelCase (UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : Any , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Dict ):
_A : Any = pd.DataFrame(UpperCamelCase__ )
_A : List[str] = "variation"
_A : List[Any] = "diff_%"
_A : int = nan
if base_variation is not None and len(df[df[variation_key] == base_variation] ):
# this may still return nan
_A : int = df.loc[df[variation_key] == base_variation][target_metric_key].item()
if math.isnan(UpperCamelCase__ ):
# as a fallback, use the minimal value as the sentinel
_A : List[str] = df.loc[df[target_metric_key] != nan][target_metric_key].min()
# create diff column if possible
if not math.isnan(UpperCamelCase__ ):
_A : Optional[Any] = df.apply(
lambda UpperCamelCase__ : round(100 * (r[target_metric_key] - sentinel_value) / sentinel_value )
if not math.isnan(r[target_metric_key] )
else 0 , axis="columns" , )
# re-order columns
_A : Union[str, Any] = [variation_key, target_metric_key, diff_key, *report_metric_keys]
_A : Any = df.reindex(UpperCamelCase__ , axis="columns" ) # reorder cols
# capitalize
_A : Tuple = df.rename(str.capitalize , axis="columns" )
# make the cols as narrow as possible
_A : List[str] = df.rename(lambda UpperCamelCase__ : c.replace("_" , "<br>" ) , axis="columns" )
_A : Union[str, Any] = df.rename(lambda UpperCamelCase__ : c.replace("_" , "\n" ) , axis="columns" )
_A : Optional[int] = ["", "Copy between the cut-here-lines and paste as is to github or a forum"]
report += ["----------8<-----------------8<--------"]
report += ["*** Results:", df_github.to_markdown(index=UpperCamelCase__ , floatfmt=".2f" )]
report += ["```"]
report += ["*** Setup:", get_versions()]
report += ["*** The benchmark command line was:", get_original_command()]
report += ["```"]
report += ["----------8<-----------------8<--------"]
report += ["*** Results (console):", df_console.to_markdown(index=UpperCamelCase__ , floatfmt=".2f" )]
print("\n\n".join(UpperCamelCase__ ) )
def _UpperCAmelCase ():
_A : int = argparse.ArgumentParser()
parser.add_argument(
"--base-cmd" , default=UpperCamelCase__ , type=UpperCamelCase__ , required=UpperCamelCase__ , help="Base cmd" , )
parser.add_argument(
"--variations" , default=UpperCamelCase__ , type=UpperCamelCase__ , nargs="+" , required=UpperCamelCase__ , help="Multi-dimensional variations, example: '|--fp16|--bf16' '|--tf32'" , )
parser.add_argument(
"--base-variation" , default=UpperCamelCase__ , type=UpperCamelCase__ , help="Baseline variation to compare to. if None the minimal target value will be used to compare against" , )
parser.add_argument(
"--target-metric-key" , default=UpperCamelCase__ , type=UpperCamelCase__ , required=UpperCamelCase__ , help="Target metric key in output_dir/all_results.json, e.g., train_samples_per_second" , )
parser.add_argument(
"--report-metric-keys" , default="" , type=UpperCamelCase__ , help="Report metric keys - other metric keys from output_dir/all_results.json to report, e.g., train_loss. Use a single argument e.g., 'train_loss train_samples" , )
parser.add_argument(
"--repeat-times" , default=1 , type=UpperCamelCase__ , help="How many times to re-run each variation - an average will be reported" , )
parser.add_argument(
"--output_dir" , default="output_benchmark" , type=UpperCamelCase__ , help="The output directory where all the benchmark reports will go to and additionally this directory will be used to override --output_dir in the script that is being benchmarked" , )
parser.add_argument(
"--verbose" , default=UpperCamelCase__ , action="store_true" , help="Whether to show the outputs of each run or just the benchmark progress" , )
_A : int = parser.parse_args()
_A : Union[str, Any] = args.output_dir
Path(UpperCamelCase__ ).mkdir(exist_ok=UpperCamelCase__ )
_A : Tuple = get_base_command(UpperCamelCase__ , UpperCamelCase__ )
# split each dimension into its --foo variations
_A : Dict = [list(map(str.strip , re.split(r"\|" , UpperCamelCase__ ) ) ) for x in args.variations]
# build a cartesian product of dimensions and convert those back into cmd-line arg strings,
# while stripping white space for inputs that were empty
_A : Union[str, Any] = list(map(str.strip , map(" ".join , itertools.product(*UpperCamelCase__ ) ) ) )
_A : Union[str, Any] = max(len(UpperCamelCase__ ) for x in variations )
# split wanted keys
_A : str = args.report_metric_keys.split()
# capture prints into a log file for convenience
_A : Optional[int] = f"benchmark-report-{datetime.datetime.now().strftime('%Y-%m-%d-%H-%M-%S' )}.txt"
print(f"\nNote: each run's output is also logged under {output_dir}/log.*.std*.txt" )
print(f"and this script's output is also piped into {report_fn}" )
_A : Tuple = Tee(UpperCamelCase__ )
print(f"\n*** Running {len(UpperCamelCase__ )} benchmarks:" )
print(f"Base command: {' '.join(UpperCamelCase__ )}" )
_A : str = "variation"
_A : Union[str, Any] = []
for id, variation in enumerate(tqdm(UpperCamelCase__ , desc="Total completion: " , leave=UpperCamelCase__ ) ):
_A : Dict = base_cmd + variation.split()
results.append(
process_run(
id + 1 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , args.target_metric_key , UpperCamelCase__ , args.repeat_times , UpperCamelCase__ , args.verbose , ) )
process_results(UpperCamelCase__ , args.target_metric_key , UpperCamelCase__ , args.base_variation , UpperCamelCase__ )
if __name__ == "__main__":
main()
| 11 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast
from ...utils import logging
__SCREAMING_SNAKE_CASE :List[str] = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE :Optional[Any] = {
'''EleutherAI/gpt-neo-1.3B''': '''https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json''',
# See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo
}
class A_ ( lowerCAmelCase_ ):
_lowerCamelCase : List[Any] = """gpt_neo"""
_lowerCamelCase : Optional[Any] = ["""past_key_values"""]
_lowerCamelCase : List[str] = {"""num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers"""}
def __init__( self : str , snake_case_ : int=5_0_2_5_7 , snake_case_ : str=2_0_4_8 , snake_case_ : Tuple=2_0_4_8 , snake_case_ : List[str]=2_4 , snake_case_ : Dict=[[["global", "local"], 1_2]] , snake_case_ : List[str]=1_6 , snake_case_ : Dict=None , snake_case_ : Dict=2_5_6 , snake_case_ : int="gelu_new" , snake_case_ : Union[str, Any]=0.0 , snake_case_ : Optional[Any]=0.0 , snake_case_ : Optional[Any]=0.0 , snake_case_ : int=0.1 , snake_case_ : Optional[int]=1e-5 , snake_case_ : Tuple=0.0_2 , snake_case_ : Dict=True , snake_case_ : str=5_0_2_5_6 , snake_case_ : Dict=5_0_2_5_6 , **snake_case_ : Tuple , ):
_UpperCAmelCase = vocab_size
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_layers
_UpperCAmelCase = num_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = window_size
_UpperCAmelCase = activation_function
_UpperCAmelCase = resid_dropout
_UpperCAmelCase = embed_dropout
_UpperCAmelCase = attention_dropout
_UpperCAmelCase = classifier_dropout
_UpperCAmelCase = layer_norm_epsilon
_UpperCAmelCase = initializer_range
_UpperCAmelCase = use_cache
_UpperCAmelCase = bos_token_id
_UpperCAmelCase = eos_token_id
_UpperCAmelCase = attention_types
_UpperCAmelCase = self.expand_attention_types_params(__lowerCamelCase )
if len(self.attention_layers ) != self.num_layers:
raise ValueError(
"Configuration for convolutional module is incorrect. "
"It is required that `len(config.attention_layers)` == `config.num_layers` "
f'but is `len(config.attention_layers) = {len(self.attention_layers )}`, '
f'`config.num_layers = {self.num_layers}`. '
"`config.attention_layers` is prepared using `config.attention_types`. "
"Please verify the value of `config.attention_types` argument." )
super().__init__(bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , **__lowerCamelCase )
@staticmethod
def lowercase ( snake_case_ : Optional[int] ):
_UpperCAmelCase = []
for item in attention_types:
for _ in range(item[1] ):
attentions.extend(item[0] )
return attentions
def UpperCAmelCase_ ( __lowercase : List[str] , __lowercase : Optional[Any] , __lowercase : Optional[Any] , __lowercase : Optional[int] ) -> List[str]:
'''simple docstring'''
import torch
_UpperCAmelCase = input.size()
_UpperCAmelCase = len(UpperCamelCase__ )
_UpperCAmelCase = shape[dimension]
_UpperCAmelCase = torch.arange(0 , UpperCamelCase__ , UpperCamelCase__ )
_UpperCAmelCase = torch.div(sizedim - size , UpperCamelCase__ , rounding_mode="floor" ) + 1
_UpperCAmelCase = torch.arange(UpperCamelCase__ ) + low_indices[:min_length][:, None]
_UpperCAmelCase = [slice(UpperCamelCase__ )] * rank
_UpperCAmelCase = indices
_UpperCAmelCase = input[s]
_UpperCAmelCase = list(range(0 , rank + 1 ) )
perm.append(perm.pop(dimension + 1 ) )
return sliced.permute(UpperCamelCase__ )
def UpperCAmelCase_ ( __lowercase : Any , __lowercase : Tuple ) -> Any:
'''simple docstring'''
import torch
_UpperCAmelCase = torch.arange(1 , UpperCamelCase__ )
_UpperCAmelCase = torch.remainder(UpperCamelCase__ , UpperCamelCase__ )
_UpperCAmelCase = remainders == 0
_UpperCAmelCase = candidates[divisor_indices]
_UpperCAmelCase = torch.max(UpperCamelCase__ )
return largest_divisor, torch.div(UpperCamelCase__ , UpperCamelCase__ , rounding_mode="floor" )
class A_ ( lowerCAmelCase_ ):
@property
def lowercase ( self : Dict ):
_UpperCAmelCase = OrderedDict({"input_ids": {0: "batch", 1: "sequence"}} )
if self.use_past:
self.fill_with_past_key_values_(__lowerCamelCase , direction="inputs" )
_UpperCAmelCase = {0: "batch", 1: "past_sequence + sequence"}
else:
_UpperCAmelCase = {0: "batch", 1: "sequence"}
return common_inputs
@property
def lowercase ( self : Any ):
return self._config.num_heads
def lowercase ( self : int , snake_case_ : Optional[int] , snake_case_ : List[str] = -1 , snake_case_ : Any = -1 , snake_case_ : List[Any] = False , snake_case_ : List[str] = None , ):
_UpperCAmelCase = super(__lowerCamelCase , self ).generate_dummy_inputs(
__lowerCamelCase , batch_size=__lowerCamelCase , seq_length=__lowerCamelCase , is_pair=__lowerCamelCase , framework=__lowerCamelCase )
# We need to order the input in the way they appears in the forward()
_UpperCAmelCase = OrderedDict({"input_ids": common_inputs["input_ids"]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
_UpperCAmelCase = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
_UpperCAmelCase = seqlen + 2
_UpperCAmelCase = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
_UpperCAmelCase = [
(torch.zeros(__lowerCamelCase ), torch.zeros(__lowerCamelCase )) for _ in range(self.num_layers )
]
_UpperCAmelCase = common_inputs["attention_mask"]
if self.use_past:
_UpperCAmelCase = ordered_inputs["attention_mask"].dtype
_UpperCAmelCase = torch.cat(
[ordered_inputs["attention_mask"], torch.ones(__lowerCamelCase , __lowerCamelCase , dtype=__lowerCamelCase )] , dim=1 )
return ordered_inputs
@property
def lowercase ( self : Union[str, Any] ):
return 1_3
| 22 |
import logging
import os
import sys
from dataclasses import dataclass, field
from itertools import chain
from typing import Optional, Union
import datasets
import numpy as np
import torch
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.31.0')
lowerCAmelCase__ = logging.getLogger(__name__)
@dataclass
class lowerCAmelCase__ :
'''simple docstring'''
__SCREAMING_SNAKE_CASE = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"})
__SCREAMING_SNAKE_CASE = field(
default=a , metadata={"help": "Pretrained config name or path if not the same as model_name"})
__SCREAMING_SNAKE_CASE = field(
default=a , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"})
__SCREAMING_SNAKE_CASE = field(
default=a , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
__SCREAMING_SNAKE_CASE = field(
default=a , metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."} , )
__SCREAMING_SNAKE_CASE = field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
__SCREAMING_SNAKE_CASE = field(
default=a , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
@dataclass
class lowerCAmelCase__ :
'''simple docstring'''
__SCREAMING_SNAKE_CASE = field(default=a , metadata={"help": "The input training data file (a text file)."})
__SCREAMING_SNAKE_CASE = field(
default=a , metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."} , )
__SCREAMING_SNAKE_CASE = field(
default=a , metadata={"help": "Overwrite the cached training and evaluation sets"})
__SCREAMING_SNAKE_CASE = field(
default=a , metadata={"help": "The number of processes to use for the preprocessing."} , )
__SCREAMING_SNAKE_CASE = field(
default=a , metadata={
"help": (
"The maximum total input sequence length after tokenization. If passed, sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
__SCREAMING_SNAKE_CASE = field(
default=a , metadata={
"help": (
"Whether to pad all samples to the maximum sentence length. "
"If False, will pad the samples dynamically when batching to the maximum length in the batch. More "
"efficient on GPU but very bad for TPU."
)
} , )
__SCREAMING_SNAKE_CASE = field(
default=a , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
__SCREAMING_SNAKE_CASE = field(
default=a , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} , )
def _lowerCamelCase ( self) -> int:
if self.train_file is not None:
_A : Optional[int] = self.train_file.split(".")[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
_A : Dict = self.validation_file.split(".")[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
@dataclass
class lowerCAmelCase__ :
'''simple docstring'''
__SCREAMING_SNAKE_CASE = 42
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
def __call__( self , __lowerCamelCase) -> str:
_A : List[Any] = "label" if "label" in features[0].keys() else "labels"
_A : Any = [feature.pop(__lowerCamelCase) for feature in features]
_A : Optional[int] = len(__lowerCamelCase)
_A : int = len(features[0]["input_ids"])
_A : Tuple = [
[{k: v[i] for k, v in feature.items()} for i in range(__lowerCamelCase)] for feature in features
]
_A : str = list(chain(*__lowerCamelCase))
_A : Tuple = self.tokenizer.pad(
__lowerCamelCase , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="pt" , )
# Un-flatten
_A : Optional[int] = {k: v.view(__lowerCamelCase , __lowerCamelCase , -1) for k, v in batch.items()}
# Add back labels
_A : Optional[int] = torch.tensor(__lowerCamelCase , dtype=torch.intaa)
return batch
def _UpperCAmelCase ():
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_A : int = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_A , _A , _A : int = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_A , _A , _A : Union[str, Any] = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_swag" , UpperCamelCase__ , UpperCamelCase__ )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
_A : int = training_args.get_process_log_level()
logger.setLevel(UpperCamelCase__ )
datasets.utils.logging.set_verbosity(UpperCamelCase__ )
transformers.utils.logging.set_verbosity(UpperCamelCase__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ f"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" )
logger.info(f"Training/evaluation parameters {training_args}" )
# Detecting last checkpoint.
_A : List[Any] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_A : Optional[int] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
"Use --overwrite_output_dir to overcome." )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.train_file is not None or data_args.validation_file is not None:
_A : List[str] = {}
if data_args.train_file is not None:
_A : Optional[int] = data_args.train_file
if data_args.validation_file is not None:
_A : Tuple = data_args.validation_file
_A : Union[str, Any] = data_args.train_file.split("." )[-1]
_A : List[str] = load_dataset(
UpperCamelCase__ , data_files=UpperCamelCase__ , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
# Downloading and loading the swag dataset from the hub.
_A : Union[str, Any] = load_dataset(
"swag" , "regular" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_A : Optional[Any] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
_A : Optional[Any] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
_A : List[Any] = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=UpperCamelCase__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# When using your own dataset or a different dataset from swag, you will probably need to change this.
_A : str = [f"ending{i}" for i in range(4 )]
_A : Union[str, Any] = "sent1"
_A : str = "sent2"
if data_args.max_seq_length is None:
_A : Any = tokenizer.model_max_length
if max_seq_length > 1024:
logger.warning(
"The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value"
" of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can"
" override this default with `--block_size xxx`." )
_A : Optional[Any] = 1024
else:
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f"The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"
f"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}." )
_A : int = min(data_args.max_seq_length , tokenizer.model_max_length )
# Preprocessing the datasets.
def preprocess_function(UpperCamelCase__ : List[Any] ):
_A : List[Any] = [[context] * 4 for context in examples[context_name]]
_A : Any = examples[question_header_name]
_A : Union[str, Any] = [
[f"{header} {examples[end][i]}" for end in ending_names] for i, header in enumerate(UpperCamelCase__ )
]
# Flatten out
_A : Dict = list(chain(*UpperCamelCase__ ) )
_A : List[Any] = list(chain(*UpperCamelCase__ ) )
# Tokenize
_A : str = tokenizer(
UpperCamelCase__ , UpperCamelCase__ , truncation=UpperCamelCase__ , max_length=UpperCamelCase__ , padding="max_length" if data_args.pad_to_max_length else False , )
# Un-flatten
return {k: [v[i : i + 4] for i in range(0 , len(UpperCamelCase__ ) , 4 )] for k, v in tokenized_examples.items()}
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError("--do_train requires a train dataset" )
_A : Optional[int] = raw_datasets["train"]
if data_args.max_train_samples is not None:
_A : Union[str, Any] = min(len(UpperCamelCase__ ) , data_args.max_train_samples )
_A : Any = train_dataset.select(range(UpperCamelCase__ ) )
with training_args.main_process_first(desc="train dataset map pre-processing" ):
_A : Optional[int] = train_dataset.map(
UpperCamelCase__ , batched=UpperCamelCase__ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
if training_args.do_eval:
if "validation" not in raw_datasets:
raise ValueError("--do_eval requires a validation dataset" )
_A : Optional[int] = raw_datasets["validation"]
if data_args.max_eval_samples is not None:
_A : str = min(len(UpperCamelCase__ ) , data_args.max_eval_samples )
_A : Dict = eval_dataset.select(range(UpperCamelCase__ ) )
with training_args.main_process_first(desc="validation dataset map pre-processing" ):
_A : List[str] = eval_dataset.map(
UpperCamelCase__ , batched=UpperCamelCase__ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
# Data collator
_A : str = (
default_data_collator
if data_args.pad_to_max_length
else DataCollatorForMultipleChoice(tokenizer=UpperCamelCase__ , pad_to_multiple_of=8 if training_args.fpaa else None )
)
# Metric
def compute_metrics(UpperCamelCase__ : Tuple ):
_A , _A : List[str] = eval_predictions
_A : Optional[int] = np.argmax(UpperCamelCase__ , axis=1 )
return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()}
# Initialize our Trainer
_A : List[str] = Trainer(
model=UpperCamelCase__ , args=UpperCamelCase__ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=UpperCamelCase__ , data_collator=UpperCamelCase__ , compute_metrics=UpperCamelCase__ , )
# Training
if training_args.do_train:
_A : Any = None
if training_args.resume_from_checkpoint is not None:
_A : Optional[int] = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
_A : int = last_checkpoint
_A : Any = trainer.train(resume_from_checkpoint=UpperCamelCase__ )
trainer.save_model() # Saves the tokenizer too for easy upload
_A : Optional[int] = train_result.metrics
_A : Tuple = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(UpperCamelCase__ )
)
_A : Tuple = min(UpperCamelCase__ , len(UpperCamelCase__ ) )
trainer.log_metrics("train" , UpperCamelCase__ )
trainer.save_metrics("train" , UpperCamelCase__ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("*** Evaluate ***" )
_A : List[Any] = trainer.evaluate()
_A : int = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(UpperCamelCase__ )
_A : Optional[Any] = min(UpperCamelCase__ , len(UpperCamelCase__ ) )
trainer.log_metrics("eval" , UpperCamelCase__ )
trainer.save_metrics("eval" , UpperCamelCase__ )
_A : Tuple = {
"finetuned_from": model_args.model_name_or_path,
"tasks": "multiple-choice",
"dataset_tags": "swag",
"dataset_args": "regular",
"dataset": "SWAG",
"language": "en",
}
if training_args.push_to_hub:
trainer.push_to_hub(**UpperCamelCase__ )
else:
trainer.create_model_card(**UpperCamelCase__ )
def _UpperCAmelCase (UpperCamelCase__ : Optional[int] ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 11 | 0 |
'''simple docstring'''
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, PLBartTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
__A =get_tests_dir('fixtures/test_sentencepiece.model')
if is_torch_available():
from transformers.models.plbart.modeling_plbart import shift_tokens_right
__A =5_00_03
__A =5_00_02
@require_sentencepiece
@require_tokenizers
class _snake_case ( a__ , unittest.TestCase ):
lowerCAmelCase :Union[str, Any] = PLBartTokenizer
lowerCAmelCase :Union[str, Any] = None
lowerCAmelCase :Optional[int] = False
def snake_case__ ( self):
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase__ : Union[str, Any] = PLBartTokenizer(__lowerCamelCase , language_codes="""base""" , keep_accents=__lowerCamelCase)
tokenizer.save_pretrained(self.tmpdirname)
def snake_case__ ( self):
UpperCAmelCase__ : List[Any] = PLBartTokenizer(__lowerCamelCase , language_codes="""base""" , keep_accents=__lowerCamelCase)
UpperCAmelCase__ : List[Any] = tokenizer.tokenize("""This is a test""")
self.assertListEqual(__lowerCamelCase , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__lowerCamelCase) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
UpperCAmelCase__ : Dict = tokenizer.tokenize("""I was born in 92000, and this is falsé.""")
self.assertListEqual(
__lowerCamelCase , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
UpperCAmelCase__ : Optional[int] = tokenizer.convert_tokens_to_ids(__lowerCamelCase)
self.assertListEqual(
__lowerCamelCase , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
UpperCAmelCase__ : Union[str, Any] = tokenizer.convert_ids_to_tokens(__lowerCamelCase)
self.assertListEqual(
__lowerCamelCase , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
UpperCAmelCase__ : Tuple = tokenizer.vocab_size
UpperCAmelCase__ : int = [tokenizer.convert_ids_to_tokens(__lowerCamelCase) for x in range(end - 4 , __lowerCamelCase)]
self.assertListEqual(__lowerCamelCase , ["""__java__""", """__python__""", """__en_XX__""", """<mask>"""])
UpperCAmelCase__ : Tuple = "java.lang.Exception, python.lang.Exception, javascript, php, ruby, go"
UpperCAmelCase__ : Dict = tokenizer(__lowerCamelCase).input_ids
self.assertEqual(
tokenizer.decode(__lowerCamelCase , skip_special_tokens=__lowerCamelCase , clean_up_tokenization_spaces=__lowerCamelCase) , __lowerCamelCase , )
def snake_case__ ( self):
UpperCAmelCase__ : str = PLBartTokenizer(__lowerCamelCase , language_codes="""multi""" , keep_accents=__lowerCamelCase)
UpperCAmelCase__ : Optional[Any] = tokenizer.tokenize("""This is a test""")
self.assertListEqual(__lowerCamelCase , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__lowerCamelCase) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
UpperCAmelCase__ : List[str] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""")
self.assertListEqual(
__lowerCamelCase , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
UpperCAmelCase__ : List[Any] = tokenizer.convert_tokens_to_ids(__lowerCamelCase)
self.assertListEqual(
__lowerCamelCase , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
UpperCAmelCase__ : str = tokenizer.convert_ids_to_tokens(__lowerCamelCase)
self.assertListEqual(
__lowerCamelCase , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
UpperCAmelCase__ : Dict = tokenizer.vocab_size
UpperCAmelCase__ : List[Any] = [tokenizer.convert_ids_to_tokens(__lowerCamelCase) for x in range(end - 7 , __lowerCamelCase)]
self.assertListEqual(
__lowerCamelCase , ["""__java__""", """__python__""", """__en_XX__""", """__javascript__""", """__php__""", """__ruby__""", """__go__"""])
UpperCAmelCase__ : Tuple = "java.lang.Exception, python.lang.Exception, javascript, php, ruby, go"
UpperCAmelCase__ : Any = tokenizer(__lowerCamelCase).input_ids
self.assertEqual(
tokenizer.decode(__lowerCamelCase , skip_special_tokens=__lowerCamelCase , clean_up_tokenization_spaces=__lowerCamelCase) , __lowerCamelCase , )
@require_torch
@require_sentencepiece
@require_tokenizers
class _snake_case ( unittest.TestCase ):
lowerCAmelCase :List[Any] = '''uclanlp/plbart-python-en_XX'''
lowerCAmelCase :Any = [
'''def maximum(a,b,c):NEW_LINE_INDENTreturn max([a,b,c])''',
'''def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])''',
]
lowerCAmelCase :Union[str, Any] = [
'''Returns the maximum value of a b c.''',
'''Sums the values of a b c.''',
]
lowerCAmelCase :Any = [
134,
5_452,
33_460,
33_441,
33_463,
33_465,
33_463,
33_449,
988,
20,
33_456,
19,
33_456,
771,
39,
4_258,
889,
3_318,
33_441,
33_463,
33_465,
33_463,
33_449,
2_471,
2,
PYTHON_CODE,
]
@classmethod
def snake_case__ ( cls):
UpperCAmelCase__ : PLBartTokenizer = PLBartTokenizer.from_pretrained(
cls.checkpoint_name , language_codes="""base""" , src_lang="""python""" , tgt_lang="""en_XX""")
UpperCAmelCase__ : str = 1
return cls
def snake_case__ ( self):
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""__java__"""] , 5_0001)
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""__python__"""] , 5_0002)
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""__en_XX__"""] , 5_0003)
def snake_case__ ( self):
UpperCAmelCase__ : Optional[Any] = self.tokenizer.batch_encode_plus(self.src_text).input_ids[0]
self.assertListEqual(self.expected_src_tokens , __lowerCamelCase)
def snake_case__ ( self):
self.assertIn(__lowerCamelCase , self.tokenizer.all_special_ids)
UpperCAmelCase__ : int = [EN_CODE, 9037, 3_3442, 57, 752, 153, 14, 56, 18, 9, 2]
UpperCAmelCase__ : Dict = self.tokenizer.decode(__lowerCamelCase , skip_special_tokens=__lowerCamelCase)
UpperCAmelCase__ : Optional[Any] = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=__lowerCamelCase)
self.assertEqual(__lowerCamelCase , __lowerCamelCase)
self.assertNotIn(self.tokenizer.eos_token , __lowerCamelCase)
def snake_case__ ( self):
UpperCAmelCase__ : Optional[int] = ["def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])" * 20]
self.assertIsInstance(src_text[0] , __lowerCamelCase)
UpperCAmelCase__ : str = 10
UpperCAmelCase__ : str = self.tokenizer(__lowerCamelCase , max_length=__lowerCamelCase , truncation=__lowerCamelCase).input_ids[0]
self.assertEqual(ids[-2] , 2)
self.assertEqual(ids[-1] , __lowerCamelCase)
self.assertEqual(len(__lowerCamelCase) , __lowerCamelCase)
def snake_case__ ( self):
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["""<mask>""", """__java__"""]) , [5_0004, 5_0001])
def snake_case__ ( self):
UpperCAmelCase__ : Any = tempfile.mkdtemp()
UpperCAmelCase__ : Dict = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(__lowerCamelCase)
UpperCAmelCase__ : int = PLBartTokenizer.from_pretrained(__lowerCamelCase)
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , __lowerCamelCase)
@require_torch
def snake_case__ ( self):
UpperCAmelCase__ : Optional[int] = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=__lowerCamelCase , return_tensors="""pt""")
UpperCAmelCase__ : Union[str, Any] = shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id)
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
self.assertEqual(batch.input_ids[1][-2:].tolist() , [2, PYTHON_CODE])
self.assertEqual(batch.decoder_input_ids[1][0] , __lowerCamelCase)
self.assertEqual(batch.decoder_input_ids[1][-1] , 2)
self.assertEqual(batch.labels[1][-2:].tolist() , [2, EN_CODE])
@require_torch
def snake_case__ ( self):
UpperCAmelCase__ : List[Any] = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=__lowerCamelCase , truncation=__lowerCamelCase , max_length=len(self.expected_src_tokens) , return_tensors="""pt""" , )
UpperCAmelCase__ : Tuple = shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id)
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase)
self.assertEqual((2, 26) , batch.input_ids.shape)
self.assertEqual((2, 26) , batch.attention_mask.shape)
UpperCAmelCase__ : Tuple = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , __lowerCamelCase)
self.assertEqual(2 , batch.decoder_input_ids[0, -1]) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [])
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, PYTHON_CODE])
def snake_case__ ( self):
UpperCAmelCase__ : List[Any] = self.tokenizer(self.src_text , padding=__lowerCamelCase , truncation=__lowerCamelCase , max_length=3 , return_tensors="""pt""")
UpperCAmelCase__ : Dict = self.tokenizer(
text_target=self.tgt_text , padding=__lowerCamelCase , truncation=__lowerCamelCase , max_length=10 , return_tensors="""pt""")
UpperCAmelCase__ : Any = targets["input_ids"]
UpperCAmelCase__ : Union[str, Any] = shift_tokens_right(__lowerCamelCase , self.tokenizer.pad_token_id)
self.assertEqual(batch.input_ids.shape[1] , 3)
self.assertEqual(batch.decoder_input_ids.shape[1] , 10)
@require_torch
def snake_case__ ( self):
UpperCAmelCase__ : str = self.tokenizer._build_translation_inputs(
"""A test""" , return_tensors="""pt""" , src_lang="""en_XX""" , tgt_lang="""java""")
self.assertEqual(
nested_simplify(__lowerCamelCase) , {
# A, test, EOS, en_XX
"""input_ids""": [[150, 242, 2, 5_0003]],
"""attention_mask""": [[1, 1, 1, 1]],
# java
"""forced_bos_token_id""": 5_0001,
} , ) | 163 |
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("TEST_SAGEMAKER" , "False")) is not True , reason="Skipping test because should only be run when releasing minor transformers version" , )
@pytest.mark.usefixtures("sm_env")
@parameterized_class(
[
{
"framework": "pytorch",
"script": "run_glue.py",
"model_name_or_path": "distilbert-base-cased",
"instance_type": "ml.p3.16xlarge",
"results": {"train_runtime": 650, "eval_accuracy": 0.7, "eval_loss": 0.6},
},
{
"framework": "pytorch",
"script": "run_ddp.py",
"model_name_or_path": "distilbert-base-cased",
"instance_type": "ml.p3.16xlarge",
"results": {"train_runtime": 600, "eval_accuracy": 0.7, "eval_loss": 0.6},
},
{
"framework": "tensorflow",
"script": "run_tf_dist.py",
"model_name_or_path": "distilbert-base-cased",
"instance_type": "ml.p3.16xlarge",
"results": {"train_runtime": 600, "eval_accuracy": 0.6, "eval_loss": 0.7},
},
])
class lowerCAmelCase__ ( unittest.TestCase):
'''simple docstring'''
def _lowerCamelCase ( self) -> str:
if self.framework == "pytorch":
subprocess.run(
F"cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py".split() , encoding="utf-8" , check=__lowerCamelCase , )
assert hasattr(self , "env")
def _lowerCamelCase ( self , __lowerCamelCase) -> Tuple:
_A : Dict = F"{self.env.base_job_name}-{instance_count}-{'ddp' if 'ddp' in self.script else 'smd'}"
# distributed data settings
_A : Optional[Any] = {"smdistributed": {"dataparallel": {"enabled": True}}} if self.script != "run_ddp.py" else None
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=__lowerCamelCase , instance_count=__lowerCamelCase , instance_type=self.instance_type , debugger_hook_config=__lowerCamelCase , hyperparameters={**self.env.distributed_hyperparameters, "model_name_or_path": self.model_name_or_path} , metric_definitions=self.env.metric_definitions , distribution=__lowerCamelCase , py_version="py36" , )
def _lowerCamelCase ( self , __lowerCamelCase) -> Optional[Any]:
TrainingJobAnalytics(__lowerCamelCase).export_csv(F"{self.env.test_path}/{job_name}_metrics.csv")
@parameterized.expand([(2,)])
def _lowerCamelCase ( self , __lowerCamelCase) -> Any:
# create estimator
_A : Union[str, Any] = self.create_estimator(__lowerCamelCase)
# run training
estimator.fit()
# result dataframe
_A : Optional[Any] = TrainingJobAnalytics(estimator.latest_training_job.name).dataframe()
# extract kpis
_A : List[Any] = list(result_metrics_df[result_metrics_df.metric_name == "eval_accuracy"]["value"])
_A : Dict = list(result_metrics_df[result_metrics_df.metric_name == "eval_loss"]["value"])
# get train time from SageMaker job, this includes starting, preprocessing, stopping
_A : Optional[Any] = (
Session().describe_training_job(estimator.latest_training_job.name).get("TrainingTimeInSeconds" , 9_9_9_9_9_9)
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["eval_accuracy"] for t in eval_accuracy)
assert all(t <= self.results["eval_loss"] for t in eval_loss)
# dump tests result into json file to share in PR
with open(F"{estimator.latest_training_job.name}.json" , "w") as outfile:
json.dump({"train_time": train_runtime, "eval_accuracy": eval_accuracy, "eval_loss": eval_loss} , __lowerCamelCase)
| 11 | 0 |
"""simple docstring"""
import torch
from diffusers import UnCLIPScheduler
from .test_schedulers import SchedulerCommonTest
class _UpperCAmelCase ( _A ):
SCREAMING_SNAKE_CASE_ : Tuple = (UnCLIPScheduler,)
def A ( self : Tuple , **A : Any ) -> Any:
lowercase_ : Dict = {
"num_train_timesteps": 10_00,
"variance_type": "fixed_small_log",
"clip_sample": True,
"clip_sample_range": 1.0,
"prediction_type": "epsilon",
}
config.update(**__lowerCamelCase )
return config
def A ( self : List[str] ) -> Dict:
for timesteps in [1, 5, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=__lowerCamelCase )
def A ( self : Union[str, Any] ) -> str:
for variance in ["fixed_small_log", "learned_range"]:
self.check_over_configs(variance_type=__lowerCamelCase )
def A ( self : int ) -> Any:
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=__lowerCamelCase )
def A ( self : List[str] ) -> int:
for clip_sample_range in [1, 5, 10, 20]:
self.check_over_configs(clip_sample_range=__lowerCamelCase )
def A ( self : Optional[Any] ) -> List[Any]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(prediction_type=__lowerCamelCase )
def A ( self : Optional[Any] ) -> Dict:
for time_step in [0, 5_00, 9_99]:
for prev_timestep in [None, 5, 1_00, 2_50, 5_00, 7_50]:
if prev_timestep is not None and prev_timestep >= time_step:
continue
self.check_over_forward(time_step=__lowerCamelCase , prev_timestep=__lowerCamelCase )
def A ( self : Union[str, Any] ) -> Optional[int]:
lowercase_ : List[Any] = self.scheduler_classes[0]
lowercase_ : Dict = self.get_scheduler_config(variance_type='''fixed_small_log''' )
lowercase_ : Optional[Any] = scheduler_class(**__lowerCamelCase )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.0000e-10 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(4_87 ) - 0.0549625 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(9_99 ) - 0.9994987 ) ) < 1e-5
def A ( self : Tuple ) -> str:
lowercase_ : List[Any] = self.scheduler_classes[0]
lowercase_ : Union[str, Any] = self.get_scheduler_config(variance_type='''learned_range''' )
lowercase_ : List[str] = scheduler_class(**__lowerCamelCase )
lowercase_ : Any = 0.5
assert scheduler._get_variance(1 , predicted_variance=__lowerCamelCase ) - -10.1_71_27_90 < 1e-5
assert scheduler._get_variance(4_87 , predicted_variance=__lowerCamelCase ) - -5.7998052 < 1e-5
assert scheduler._get_variance(9_99 , predicted_variance=__lowerCamelCase ) - -0.0010011 < 1e-5
def A ( self : Union[str, Any] ) -> Optional[int]:
lowercase_ : List[str] = self.scheduler_classes[0]
lowercase_ : Dict = self.get_scheduler_config()
lowercase_ : List[str] = scheduler_class(**__lowerCamelCase )
lowercase_ : Optional[int] = scheduler.timesteps
lowercase_ : Union[str, Any] = self.dummy_model()
lowercase_ : List[Any] = self.dummy_sample_deter
lowercase_ : Any = torch.manual_seed(0 )
for i, t in enumerate(__lowerCamelCase ):
# 1. predict noise residual
lowercase_ : Optional[Any] = model(__lowerCamelCase , __lowerCamelCase )
# 2. predict previous mean of sample x_t-1
lowercase_ : Optional[int] = scheduler.step(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , generator=__lowerCamelCase ).prev_sample
lowercase_ : List[Any] = pred_prev_sample
lowercase_ : str = torch.sum(torch.abs(__lowerCamelCase ) )
lowercase_ : Optional[Any] = torch.mean(torch.abs(__lowerCamelCase ) )
assert abs(result_sum.item() - 2_52.2_68_24_95 ) < 1e-2
assert abs(result_mean.item() - 0.3284743 ) < 1e-3
def A ( self : int ) -> Any:
lowercase_ : Optional[Any] = self.scheduler_classes[0]
lowercase_ : Union[str, Any] = self.get_scheduler_config()
lowercase_ : Dict = scheduler_class(**__lowerCamelCase )
scheduler.set_timesteps(25 )
lowercase_ : Dict = scheduler.timesteps
lowercase_ : List[Any] = self.dummy_model()
lowercase_ : Optional[int] = self.dummy_sample_deter
lowercase_ : str = torch.manual_seed(0 )
for i, t in enumerate(__lowerCamelCase ):
# 1. predict noise residual
lowercase_ : Optional[int] = model(__lowerCamelCase , __lowerCamelCase )
if i + 1 == timesteps.shape[0]:
lowercase_ : List[str] = None
else:
lowercase_ : int = timesteps[i + 1]
# 2. predict previous mean of sample x_t-1
lowercase_ : Any = scheduler.step(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , prev_timestep=__lowerCamelCase , generator=__lowerCamelCase ).prev_sample
lowercase_ : Dict = pred_prev_sample
lowercase_ : Dict = torch.sum(torch.abs(__lowerCamelCase ) )
lowercase_ : Any = torch.mean(torch.abs(__lowerCamelCase ) )
assert abs(result_sum.item() - 2_58.2_04_49_83 ) < 1e-2
assert abs(result_mean.item() - 0.3362038 ) < 1e-3
def A ( self : List[str] ) -> str:
pass
def A ( self : Optional[int] ) -> int:
pass
| 33 |
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class lowerCAmelCase__ ( a):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = ["image_processor", "tokenizer"]
__SCREAMING_SNAKE_CASE = "OwlViTImageProcessor"
__SCREAMING_SNAKE_CASE = ("CLIPTokenizer", "CLIPTokenizerFast")
def __init__( self , __lowerCamelCase=None , __lowerCamelCase=None , **__lowerCamelCase) -> Union[str, Any]:
_A : int = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , __lowerCamelCase , )
_A : List[Any] = kwargs.pop("feature_extractor")
_A : Dict = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`.")
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`.")
super().__init__(__lowerCamelCase , __lowerCamelCase)
def __call__( self , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase="max_length" , __lowerCamelCase="np" , **__lowerCamelCase) -> Any:
if text is None and query_images is None and images is None:
raise ValueError(
"You have to specify at least one text or query image or image. All three cannot be none.")
if text is not None:
if isinstance(__lowerCamelCase , __lowerCamelCase) or (isinstance(__lowerCamelCase , __lowerCamelCase) and not isinstance(text[0] , __lowerCamelCase)):
_A : Union[str, Any] = [self.tokenizer(__lowerCamelCase , padding=__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase)]
elif isinstance(__lowerCamelCase , __lowerCamelCase) and isinstance(text[0] , __lowerCamelCase):
_A : Optional[Any] = []
# Maximum number of queries across batch
_A : str = max([len(__lowerCamelCase) for t in text])
# Pad all batch samples to max number of text queries
for t in text:
if len(__lowerCamelCase) != max_num_queries:
_A : Optional[int] = t + [" "] * (max_num_queries - len(__lowerCamelCase))
_A : List[Any] = self.tokenizer(__lowerCamelCase , padding=__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase)
encodings.append(__lowerCamelCase)
else:
raise TypeError("Input text should be a string, a list of strings or a nested list of strings")
if return_tensors == "np":
_A : Tuple = np.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0)
_A : Optional[Any] = np.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0)
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
_A : Optional[int] = jnp.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0)
_A : Optional[int] = jnp.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0)
elif return_tensors == "pt" and is_torch_available():
import torch
_A : Optional[Any] = torch.cat([encoding["input_ids"] for encoding in encodings] , dim=0)
_A : Union[str, Any] = torch.cat([encoding["attention_mask"] for encoding in encodings] , dim=0)
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
_A : Any = tf.stack([encoding["input_ids"] for encoding in encodings] , axis=0)
_A : Tuple = tf.stack([encoding["attention_mask"] for encoding in encodings] , axis=0)
else:
raise ValueError("Target return tensor type could not be returned")
_A : Optional[Any] = BatchEncoding()
_A : Tuple = input_ids
_A : Dict = attention_mask
if query_images is not None:
_A : Optional[Any] = BatchEncoding()
_A : List[str] = self.image_processor(
__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase).pixel_values
_A : Union[str, Any] = query_pixel_values
if images is not None:
_A : int = self.image_processor(__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase)
if text is not None and images is not None:
_A : Tuple = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
_A : int = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**__lowerCamelCase) , tensor_type=__lowerCamelCase)
def _lowerCamelCase ( self , *__lowerCamelCase , **__lowerCamelCase) -> str:
return self.image_processor.post_process(*__lowerCamelCase , **__lowerCamelCase)
def _lowerCamelCase ( self , *__lowerCamelCase , **__lowerCamelCase) -> List[str]:
return self.image_processor.post_process_object_detection(*__lowerCamelCase , **__lowerCamelCase)
def _lowerCamelCase ( self , *__lowerCamelCase , **__lowerCamelCase) -> Optional[int]:
return self.image_processor.post_process_image_guided_detection(*__lowerCamelCase , **__lowerCamelCase)
def _lowerCamelCase ( self , *__lowerCamelCase , **__lowerCamelCase) -> int:
return self.tokenizer.batch_decode(*__lowerCamelCase , **__lowerCamelCase)
def _lowerCamelCase ( self , *__lowerCamelCase , **__lowerCamelCase) -> Optional[int]:
return self.tokenizer.decode(*__lowerCamelCase , **__lowerCamelCase)
@property
def _lowerCamelCase ( self) -> int:
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , __lowerCamelCase , )
return self.image_processor_class
@property
def _lowerCamelCase ( self) -> List[str]:
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , __lowerCamelCase , )
return self.image_processor
| 11 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A : Optional[int] = logging.get_logger(__name__)
A : int = {
"xlm-mlm-en-2048": "https://huggingface.co/xlm-mlm-en-2048/resolve/main/config.json",
"xlm-mlm-ende-1024": "https://huggingface.co/xlm-mlm-ende-1024/resolve/main/config.json",
"xlm-mlm-enfr-1024": "https://huggingface.co/xlm-mlm-enfr-1024/resolve/main/config.json",
"xlm-mlm-enro-1024": "https://huggingface.co/xlm-mlm-enro-1024/resolve/main/config.json",
"xlm-mlm-tlm-xnli15-1024": "https://huggingface.co/xlm-mlm-tlm-xnli15-1024/resolve/main/config.json",
"xlm-mlm-xnli15-1024": "https://huggingface.co/xlm-mlm-xnli15-1024/resolve/main/config.json",
"xlm-clm-enfr-1024": "https://huggingface.co/xlm-clm-enfr-1024/resolve/main/config.json",
"xlm-clm-ende-1024": "https://huggingface.co/xlm-clm-ende-1024/resolve/main/config.json",
"xlm-mlm-17-1280": "https://huggingface.co/xlm-mlm-17-1280/resolve/main/config.json",
"xlm-mlm-100-1280": "https://huggingface.co/xlm-mlm-100-1280/resolve/main/config.json",
}
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : Any ="""xlm"""
__UpperCAmelCase : int ={
"""hidden_size""": """emb_dim""",
"""num_attention_heads""": """n_heads""",
"""num_hidden_layers""": """n_layers""",
"""n_words""": """vocab_size""", # For backward compatibility
}
def __init__( self , __a=3_01_45 , __a=20_48 , __a=12 , __a=16 , __a=0.1 , __a=0.1 , __a=True , __a=False , __a=False , __a=False , __a=1 , __a=True , __a=5_12 , __a=20_48**-0.5 , __a=1e-1_2 , __a=0.0_2 , __a=0 , __a=1 , __a=2 , __a=3 , __a=5 , __a=True , __a="first" , __a=True , __a=None , __a=True , __a=0.1 , __a=5 , __a=5 , __a=0 , __a=0 , __a=2 , __a=0 , **__a , ):
__lowerCAmelCase = vocab_size
__lowerCAmelCase = emb_dim
__lowerCAmelCase = n_layers
__lowerCAmelCase = n_heads
__lowerCAmelCase = dropout
__lowerCAmelCase = attention_dropout
__lowerCAmelCase = gelu_activation
__lowerCAmelCase = sinusoidal_embeddings
__lowerCAmelCase = causal
__lowerCAmelCase = asm
__lowerCAmelCase = n_langs
__lowerCAmelCase = use_lang_emb
__lowerCAmelCase = layer_norm_eps
__lowerCAmelCase = bos_index
__lowerCAmelCase = eos_index
__lowerCAmelCase = pad_index
__lowerCAmelCase = unk_index
__lowerCAmelCase = mask_index
__lowerCAmelCase = is_encoder
__lowerCAmelCase = max_position_embeddings
__lowerCAmelCase = embed_init_std
__lowerCAmelCase = init_std
__lowerCAmelCase = summary_type
__lowerCAmelCase = summary_use_proj
__lowerCAmelCase = summary_activation
__lowerCAmelCase = summary_proj_to_labels
__lowerCAmelCase = summary_first_dropout
__lowerCAmelCase = start_n_top
__lowerCAmelCase = end_n_top
__lowerCAmelCase = mask_token_id
__lowerCAmelCase = lang_id
if "n_words" in kwargs:
__lowerCAmelCase = kwargs["n_words"]
super().__init__(pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , **__lowerCamelCase )
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
@property
def snake_case ( self ):
if self.task == "multiple-choice":
__lowerCAmelCase = {0: "batch", 1: "choice", 2: "sequence"}
else:
__lowerCAmelCase = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("token_type_ids", dynamic_axis),
] )
| 57 |
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_torch_available
from transformers.testing_utils import require_torch, torch_device
if is_torch_available():
from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments
@require_torch
class lowerCAmelCase__ ( unittest.TestCase):
'''simple docstring'''
def _lowerCamelCase ( self , __lowerCamelCase) -> Dict:
for model_result in results.values():
for batch_size, sequence_length in zip(model_result["bs"] , model_result["ss"]):
_A : Optional[int] = model_result["result"][batch_size][sequence_length]
self.assertIsNotNone(__lowerCamelCase)
def _lowerCamelCase ( self) -> int:
_A : Optional[int] = "sshleifer/tiny-gpt2"
_A : int = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , )
_A : List[str] = PyTorchBenchmark(__lowerCamelCase)
_A : Optional[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def _lowerCamelCase ( self) -> Dict:
_A : int = "sgugger/tiny-distilbert-classification"
_A : str = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , only_pretrain_model=__lowerCamelCase , )
_A : Dict = PyTorchBenchmark(__lowerCamelCase)
_A : Union[str, Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def _lowerCamelCase ( self) -> Optional[Any]:
_A : Tuple = "sshleifer/tiny-gpt2"
_A : Optional[Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , torchscript=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , )
_A : Union[str, Any] = PyTorchBenchmark(__lowerCamelCase)
_A : List[str] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
@unittest.skipIf(torch_device == "cpu" , "Cant do half precision")
def _lowerCamelCase ( self) -> int:
_A : Any = "sshleifer/tiny-gpt2"
_A : Tuple = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , fpaa=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , )
_A : Any = PyTorchBenchmark(__lowerCamelCase)
_A : Union[str, Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def _lowerCamelCase ( self) -> Any:
_A : Union[str, Any] = "sshleifer/tiny-gpt2"
_A : Any = AutoConfig.from_pretrained(__lowerCamelCase)
# set architectures equal to `None`
_A : Dict = None
_A : Any = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , )
_A : Union[str, Any] = PyTorchBenchmark(__lowerCamelCase , configs=[config])
_A : int = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def _lowerCamelCase ( self) -> int:
_A : List[Any] = "sshleifer/tiny-gpt2"
_A : int = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , )
_A : Optional[Any] = PyTorchBenchmark(__lowerCamelCase)
_A : int = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result)
self.check_results_dict_not_empty(results.memory_train_result)
@unittest.skipIf(torch_device == "cpu" , "Can't do half precision")
def _lowerCamelCase ( self) -> Optional[Any]:
_A : Any = "sshleifer/tiny-gpt2"
_A : Tuple = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , fpaa=__lowerCamelCase , multi_process=__lowerCamelCase , )
_A : List[Any] = PyTorchBenchmark(__lowerCamelCase)
_A : Tuple = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result)
self.check_results_dict_not_empty(results.memory_train_result)
def _lowerCamelCase ( self) -> str:
_A : List[str] = "sshleifer/tiny-gpt2"
_A : Union[str, Any] = AutoConfig.from_pretrained(__lowerCamelCase)
_A : Any = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , )
_A : Optional[Any] = PyTorchBenchmark(__lowerCamelCase , configs=[config])
_A : Tuple = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def _lowerCamelCase ( self) -> int:
_A : Tuple = "sshleifer/tinier_bart"
_A : Optional[Any] = AutoConfig.from_pretrained(__lowerCamelCase)
_A : Optional[int] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , )
_A : Dict = PyTorchBenchmark(__lowerCamelCase , configs=[config])
_A : Optional[int] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def _lowerCamelCase ( self) -> str:
_A : List[Any] = "sshleifer/tiny-gpt2"
_A : Optional[Any] = AutoConfig.from_pretrained(__lowerCamelCase)
_A : Optional[Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , )
_A : List[str] = PyTorchBenchmark(__lowerCamelCase , configs=[config])
_A : Dict = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result)
self.check_results_dict_not_empty(results.memory_train_result)
def _lowerCamelCase ( self) -> int:
_A : int = "sshleifer/tinier_bart"
_A : str = AutoConfig.from_pretrained(__lowerCamelCase)
_A : Tuple = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , )
_A : Tuple = PyTorchBenchmark(__lowerCamelCase , configs=[config])
_A : Union[str, Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result)
self.check_results_dict_not_empty(results.memory_train_result)
def _lowerCamelCase ( self) -> Dict:
_A : List[str] = "sshleifer/tiny-gpt2"
with tempfile.TemporaryDirectory() as tmp_dir:
_A : Optional[Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , save_to_csv=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(__lowerCamelCase , "inf_time.csv") , train_memory_csv_file=os.path.join(__lowerCamelCase , "train_mem.csv") , inference_memory_csv_file=os.path.join(__lowerCamelCase , "inf_mem.csv") , train_time_csv_file=os.path.join(__lowerCamelCase , "train_time.csv") , env_info_csv_file=os.path.join(__lowerCamelCase , "env.csv") , multi_process=__lowerCamelCase , )
_A : Tuple = PyTorchBenchmark(__lowerCamelCase)
benchmark.run()
self.assertTrue(Path(os.path.join(__lowerCamelCase , "inf_time.csv")).exists())
self.assertTrue(Path(os.path.join(__lowerCamelCase , "train_time.csv")).exists())
self.assertTrue(Path(os.path.join(__lowerCamelCase , "inf_mem.csv")).exists())
self.assertTrue(Path(os.path.join(__lowerCamelCase , "train_mem.csv")).exists())
self.assertTrue(Path(os.path.join(__lowerCamelCase , "env.csv")).exists())
def _lowerCamelCase ( self) -> int:
_A : Dict = "sshleifer/tiny-gpt2"
def _check_summary_is_not_empty(__lowerCamelCase):
self.assertTrue(hasattr(__lowerCamelCase , "sequential"))
self.assertTrue(hasattr(__lowerCamelCase , "cumulative"))
self.assertTrue(hasattr(__lowerCamelCase , "current"))
self.assertTrue(hasattr(__lowerCamelCase , "total"))
with tempfile.TemporaryDirectory() as tmp_dir:
_A : Union[str, Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(__lowerCamelCase , "log.txt") , log_print=__lowerCamelCase , trace_memory_line_by_line=__lowerCamelCase , multi_process=__lowerCamelCase , )
_A : Optional[int] = PyTorchBenchmark(__lowerCamelCase)
_A : Dict = benchmark.run()
_check_summary_is_not_empty(result.inference_summary)
_check_summary_is_not_empty(result.train_summary)
self.assertTrue(Path(os.path.join(__lowerCamelCase , "log.txt")).exists())
| 11 | 0 |
"""simple docstring"""
import math
import unittest
def _lowercase ( __lowerCAmelCase ) -> Union[str, Any]:
assert isinstance(UpperCamelCase__ , UpperCamelCase__ ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(UpperCamelCase__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
class __a (unittest.TestCase):
'''simple docstring'''
def _a ( self ) -> Optional[Any]:
"""simple docstring"""
self.assertTrue(is_prime(2 ) )
self.assertTrue(is_prime(3 ) )
self.assertTrue(is_prime(5 ) )
self.assertTrue(is_prime(7 ) )
self.assertTrue(is_prime(11 ) )
self.assertTrue(is_prime(13 ) )
self.assertTrue(is_prime(17 ) )
self.assertTrue(is_prime(19 ) )
self.assertTrue(is_prime(23 ) )
self.assertTrue(is_prime(29 ) )
def _a ( self ) -> List[Any]:
"""simple docstring"""
with self.assertRaises(__lowerCamelCase ):
is_prime(-19 )
self.assertFalse(
is_prime(0 ) , """Zero doesn't have any positive factors, primes must have exactly two.""" , )
self.assertFalse(
is_prime(1 ) , """One only has 1 positive factor, primes must have exactly two.""" , )
self.assertFalse(is_prime(2 * 2 ) )
self.assertFalse(is_prime(2 * 3 ) )
self.assertFalse(is_prime(3 * 3 ) )
self.assertFalse(is_prime(3 * 5 ) )
self.assertFalse(is_prime(3 * 5 * 7 ) )
if __name__ == "__main__":
unittest.main()
| 132 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_nllb import NllbTokenizer
else:
lowerCAmelCase__ = None
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'}
lowerCAmelCase__ = {
'vocab_file': {
'facebook/nllb-200-distilled-600M': (
'https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model'
),
},
'tokenizer_file': {
'facebook/nllb-200-distilled-600M': (
'https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json'
),
},
}
lowerCAmelCase__ = {
'facebook/nllb-large-en-ro': 10_24,
'facebook/nllb-200-distilled-600M': 10_24,
}
# fmt: off
lowerCAmelCase__ = ['ace_Arab', 'ace_Latn', 'acm_Arab', 'acq_Arab', 'aeb_Arab', 'afr_Latn', 'ajp_Arab', 'aka_Latn', 'amh_Ethi', 'apc_Arab', 'arb_Arab', 'ars_Arab', 'ary_Arab', 'arz_Arab', 'asm_Beng', 'ast_Latn', 'awa_Deva', 'ayr_Latn', 'azb_Arab', 'azj_Latn', 'bak_Cyrl', 'bam_Latn', 'ban_Latn', 'bel_Cyrl', 'bem_Latn', 'ben_Beng', 'bho_Deva', 'bjn_Arab', 'bjn_Latn', 'bod_Tibt', 'bos_Latn', 'bug_Latn', 'bul_Cyrl', 'cat_Latn', 'ceb_Latn', 'ces_Latn', 'cjk_Latn', 'ckb_Arab', 'crh_Latn', 'cym_Latn', 'dan_Latn', 'deu_Latn', 'dik_Latn', 'dyu_Latn', 'dzo_Tibt', 'ell_Grek', 'eng_Latn', 'epo_Latn', 'est_Latn', 'eus_Latn', 'ewe_Latn', 'fao_Latn', 'pes_Arab', 'fij_Latn', 'fin_Latn', 'fon_Latn', 'fra_Latn', 'fur_Latn', 'fuv_Latn', 'gla_Latn', 'gle_Latn', 'glg_Latn', 'grn_Latn', 'guj_Gujr', 'hat_Latn', 'hau_Latn', 'heb_Hebr', 'hin_Deva', 'hne_Deva', 'hrv_Latn', 'hun_Latn', 'hye_Armn', 'ibo_Latn', 'ilo_Latn', 'ind_Latn', 'isl_Latn', 'ita_Latn', 'jav_Latn', 'jpn_Jpan', 'kab_Latn', 'kac_Latn', 'kam_Latn', 'kan_Knda', 'kas_Arab', 'kas_Deva', 'kat_Geor', 'knc_Arab', 'knc_Latn', 'kaz_Cyrl', 'kbp_Latn', 'kea_Latn', 'khm_Khmr', 'kik_Latn', 'kin_Latn', 'kir_Cyrl', 'kmb_Latn', 'kon_Latn', 'kor_Hang', 'kmr_Latn', 'lao_Laoo', 'lvs_Latn', 'lij_Latn', 'lim_Latn', 'lin_Latn', 'lit_Latn', 'lmo_Latn', 'ltg_Latn', 'ltz_Latn', 'lua_Latn', 'lug_Latn', 'luo_Latn', 'lus_Latn', 'mag_Deva', 'mai_Deva', 'mal_Mlym', 'mar_Deva', 'min_Latn', 'mkd_Cyrl', 'plt_Latn', 'mlt_Latn', 'mni_Beng', 'khk_Cyrl', 'mos_Latn', 'mri_Latn', 'zsm_Latn', 'mya_Mymr', 'nld_Latn', 'nno_Latn', 'nob_Latn', 'npi_Deva', 'nso_Latn', 'nus_Latn', 'nya_Latn', 'oci_Latn', 'gaz_Latn', 'ory_Orya', 'pag_Latn', 'pan_Guru', 'pap_Latn', 'pol_Latn', 'por_Latn', 'prs_Arab', 'pbt_Arab', 'quy_Latn', 'ron_Latn', 'run_Latn', 'rus_Cyrl', 'sag_Latn', 'san_Deva', 'sat_Beng', 'scn_Latn', 'shn_Mymr', 'sin_Sinh', 'slk_Latn', 'slv_Latn', 'smo_Latn', 'sna_Latn', 'snd_Arab', 'som_Latn', 'sot_Latn', 'spa_Latn', 'als_Latn', 'srd_Latn', 'srp_Cyrl', 'ssw_Latn', 'sun_Latn', 'swe_Latn', 'swh_Latn', 'szl_Latn', 'tam_Taml', 'tat_Cyrl', 'tel_Telu', 'tgk_Cyrl', 'tgl_Latn', 'tha_Thai', 'tir_Ethi', 'taq_Latn', 'taq_Tfng', 'tpi_Latn', 'tsn_Latn', 'tso_Latn', 'tuk_Latn', 'tum_Latn', 'tur_Latn', 'twi_Latn', 'tzm_Tfng', 'uig_Arab', 'ukr_Cyrl', 'umb_Latn', 'urd_Arab', 'uzn_Latn', 'vec_Latn', 'vie_Latn', 'war_Latn', 'wol_Latn', 'xho_Latn', 'ydd_Hebr', 'yor_Latn', 'yue_Hant', 'zho_Hans', 'zho_Hant', 'zul_Latn']
class lowerCAmelCase__ ( a):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE = ["input_ids", "attention_mask"]
__SCREAMING_SNAKE_CASE = NllbTokenizer
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = []
def __init__( self , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase="<s>" , __lowerCamelCase="</s>" , __lowerCamelCase="</s>" , __lowerCamelCase="<s>" , __lowerCamelCase="<unk>" , __lowerCamelCase="<pad>" , __lowerCamelCase="<mask>" , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=False , **__lowerCamelCase , ) -> Tuple:
# Mask token behave like a normal word, i.e. include the space before it
_A : Any = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase) if isinstance(__lowerCamelCase , __lowerCamelCase) else mask_token
_A : Optional[int] = legacy_behaviour
super().__init__(
vocab_file=__lowerCamelCase , tokenizer_file=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , sep_token=__lowerCamelCase , cls_token=__lowerCamelCase , unk_token=__lowerCamelCase , pad_token=__lowerCamelCase , mask_token=__lowerCamelCase , src_lang=__lowerCamelCase , tgt_lang=__lowerCamelCase , additional_special_tokens=__lowerCamelCase , legacy_behaviour=__lowerCamelCase , **__lowerCamelCase , )
_A : int = vocab_file
_A : Optional[Any] = False if not self.vocab_file else True
_A : Tuple = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens])
self.add_special_tokens({"additional_special_tokens": _additional_special_tokens})
_A : Union[str, Any] = {
lang_code: self.convert_tokens_to_ids(__lowerCamelCase) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
_A : Optional[int] = src_lang if src_lang is not None else "eng_Latn"
_A : Union[str, Any] = self.convert_tokens_to_ids(self._src_lang)
_A : List[str] = tgt_lang
self.set_src_lang_special_tokens(self._src_lang)
@property
def _lowerCamelCase ( self) -> str:
return self._src_lang
@src_lang.setter
def _lowerCamelCase ( self , __lowerCamelCase) -> None:
_A : Tuple = new_src_lang
self.set_src_lang_special_tokens(self._src_lang)
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = None) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = None) -> List[int]:
_A : Tuple = [self.sep_token_id]
_A : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase) -> Optional[int]:
if src_lang is None or tgt_lang is None:
raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model")
_A : List[Any] = src_lang
_A : Optional[int] = self(__lowerCamelCase , add_special_tokens=__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase)
_A : Tuple = self.convert_tokens_to_ids(__lowerCamelCase)
_A : Tuple = tgt_lang_id
return inputs
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = "eng_Latn" , __lowerCamelCase = None , __lowerCamelCase = "fra_Latn" , **__lowerCamelCase , ) -> BatchEncoding:
_A : Tuple = src_lang
_A : int = tgt_lang
return super().prepare_seqaseq_batch(__lowerCamelCase , __lowerCamelCase , **__lowerCamelCase)
def _lowerCamelCase ( self) -> str:
return self.set_src_lang_special_tokens(self.src_lang)
def _lowerCamelCase ( self) -> List[str]:
return self.set_tgt_lang_special_tokens(self.tgt_lang)
def _lowerCamelCase ( self , __lowerCamelCase) -> None:
_A : Dict = self.convert_tokens_to_ids(__lowerCamelCase)
if self.legacy_behaviour:
_A : List[str] = []
_A : Dict = [self.eos_token_id, self.cur_lang_code]
else:
_A : Tuple = [self.cur_lang_code]
_A : Optional[Any] = [self.eos_token_id]
_A : Optional[int] = self.convert_ids_to_tokens(self.prefix_tokens)
_A : int = self.convert_ids_to_tokens(self.suffix_tokens)
_A : List[Any] = processors.TemplateProcessing(
single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens)) , )
def _lowerCamelCase ( self , __lowerCamelCase) -> None:
_A : Optional[Any] = self.convert_tokens_to_ids(__lowerCamelCase)
if self.legacy_behaviour:
_A : Tuple = []
_A : Any = [self.eos_token_id, self.cur_lang_code]
else:
_A : Union[str, Any] = [self.cur_lang_code]
_A : str = [self.eos_token_id]
_A : Optional[Any] = self.convert_ids_to_tokens(self.prefix_tokens)
_A : Dict = self.convert_ids_to_tokens(self.suffix_tokens)
_A : Union[str, Any] = processors.TemplateProcessing(
single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens)) , )
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = None) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer.")
if not os.path.isdir(__lowerCamelCase):
logger.error(F"Vocabulary path ({save_directory}) should be a directory.")
return
_A : Dict = os.path.join(
__lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
if os.path.abspath(self.vocab_file) != os.path.abspath(__lowerCamelCase):
copyfile(self.vocab_file , __lowerCamelCase)
return (out_vocab_file,)
| 11 | 0 |
def UpperCAmelCase_ ( __lowerCAmelCase ) -> Optional[int]:
__lowercase : Tuple = len(UpperCamelCase__ )
__lowercase : List[str] = sum(UpperCamelCase__ )
__lowercase : Tuple = [[False for x in range(s + 1 )] for y in range(n + 1 )]
for i in range(1 , n + 1 ):
__lowercase : int = True
for i in range(1 , s + 1 ):
__lowercase : Dict = False
for i in range(1 , n + 1 ):
for j in range(1 , s + 1 ):
__lowercase : Optional[int] = dp[i][j - 1]
if arr[i - 1] <= j:
__lowercase : Any = dp[i][j] or dp[i - 1][j - arr[i - 1]]
for j in range(int(s / 2 ) , -1 , -1 ):
if dp[n][j] is True:
__lowercase : Optional[int] = s - 2 * j
break
return diff
| 156 |
# flake8: noqa
# Lint as: python3
from typing import Dict, List, Optional, Type
from .. import config
from ..utils import logging
from .formatting import (
ArrowFormatter,
CustomFormatter,
Formatter,
PandasFormatter,
PythonFormatter,
TensorFormatter,
format_table,
query_table,
)
from .np_formatter import NumpyFormatter
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {}
lowerCAmelCase__ = {}
lowerCAmelCase__ = {}
def _UpperCAmelCase (UpperCamelCase__ : type , UpperCamelCase__ : Optional[str] , UpperCamelCase__ : Optional[List[str]] = None , ):
_A : Union[str, Any] = aliases if aliases is not None else []
if format_type in _FORMAT_TYPES:
logger.warning(
f"Overwriting format type '{format_type}' ({_FORMAT_TYPES[format_type].__name__} -> {formatter_cls.__name__})" )
_A : Dict = formatter_cls
for alias in set(aliases + [format_type] ):
if alias in _FORMAT_TYPES_ALIASES:
logger.warning(
f"Overwriting format type alias '{alias}' ({_FORMAT_TYPES_ALIASES[alias]} -> {format_type})" )
_A : Dict = format_type
def _UpperCAmelCase (UpperCamelCase__ : Exception , UpperCamelCase__ : Optional[str] , UpperCamelCase__ : Optional[List[str]] = None ):
_A : Union[str, Any] = aliases if aliases is not None else []
for alias in set(aliases + [format_type] ):
_A : Union[str, Any] = unavailable_error
# Here we define all the available formatting functions that can be used by `Dataset.set_format`
_register_formatter(PythonFormatter, None, aliases=['python'])
_register_formatter(ArrowFormatter, 'arrow', aliases=['pa', 'pyarrow'])
_register_formatter(NumpyFormatter, 'numpy', aliases=['np'])
_register_formatter(PandasFormatter, 'pandas', aliases=['pd'])
_register_formatter(CustomFormatter, 'custom')
if config.TORCH_AVAILABLE:
from .torch_formatter import TorchFormatter
_register_formatter(TorchFormatter, 'torch', aliases=['pt', 'pytorch'])
else:
lowerCAmelCase__ = ValueError('PyTorch needs to be installed to be able to return PyTorch tensors.')
_register_unavailable_formatter(_torch_error, 'torch', aliases=['pt', 'pytorch'])
if config.TF_AVAILABLE:
from .tf_formatter import TFFormatter
_register_formatter(TFFormatter, 'tensorflow', aliases=['tf'])
else:
lowerCAmelCase__ = ValueError('Tensorflow needs to be installed to be able to return Tensorflow tensors.')
_register_unavailable_formatter(_tf_error, 'tensorflow', aliases=['tf'])
if config.JAX_AVAILABLE:
from .jax_formatter import JaxFormatter
_register_formatter(JaxFormatter, 'jax', aliases=[])
else:
lowerCAmelCase__ = ValueError('JAX needs to be installed to be able to return JAX arrays.')
_register_unavailable_formatter(_jax_error, 'jax', aliases=[])
def _UpperCAmelCase (UpperCamelCase__ : Optional[str] ):
if format_type in _FORMAT_TYPES_ALIASES:
return _FORMAT_TYPES_ALIASES[format_type]
else:
return format_type
def _UpperCAmelCase (UpperCamelCase__ : Optional[str] , **UpperCamelCase__ : List[Any] ):
_A : List[str] = get_format_type_from_alias(UpperCamelCase__ )
if format_type in _FORMAT_TYPES:
return _FORMAT_TYPES[format_type](**UpperCamelCase__ )
if format_type in _FORMAT_TYPES_ALIASES_UNAVAILABLE:
raise _FORMAT_TYPES_ALIASES_UNAVAILABLE[format_type]
else:
raise ValueError(
f"Return type should be None or selected in {list(type for type in _FORMAT_TYPES.keys() if type != None )}, but got '{format_type}'" )
| 11 | 0 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
from ..models.clipseg import CLIPSegForImageSegmentation
from ..utils import is_vision_available, requires_backends
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class UpperCAmelCase (_UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase :List[str] = (
"This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image."
"It takes two arguments named `image` which should be the original image, and `label` which should be a text "
"describing the elements what should be identified in the segmentation mask. The tool returns the mask."
)
_UpperCAmelCase :Any = "CIDAS/clipseg-rd64-refined"
_UpperCAmelCase :List[str] = "image_segmenter"
_UpperCAmelCase :Any = CLIPSegForImageSegmentation
_UpperCAmelCase :str = ["image", "text"]
_UpperCAmelCase :Tuple = ["image"]
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ):
requires_backends(self , ['''vision'''] )
super().__init__(*__lowerCamelCase , **__lowerCamelCase )
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase ):
return self.pre_processor(text=[label] , images=[image] , padding=__lowerCamelCase , return_tensors='''pt''' )
def _snake_case ( self , _UpperCAmelCase ):
with torch.no_grad():
lowercase__: Dict = self.model(**__lowerCamelCase ).logits
return logits
def _snake_case ( self , _UpperCAmelCase ):
lowercase__: Any = outputs.cpu().detach().numpy()
lowercase__: int = 0
lowercase__: List[Any] = 1
return Image.fromarray((array * 255).astype(np.uinta ) )
| 177 |
def _UpperCAmelCase (UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Union[str, Any] ):
# "extended trapezoidal rule"
# int(f) = dx/2 * (f1 + 2f2 + ... + fn)
_A : int = (boundary[1] - boundary[0]) / steps
_A : Any = boundary[0]
_A : List[Any] = boundary[1]
_A : str = make_points(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
_A : str = 0.0
y += (h / 2.0) * f(UpperCamelCase__ )
for i in x_i:
# print(i)
y += h * f(UpperCamelCase__ )
y += (h / 2.0) * f(UpperCamelCase__ )
return y
def _UpperCAmelCase (UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Any ):
_A : Optional[int] = a + h
while x < (b - h):
yield x
_A : Dict = x + h
def _UpperCAmelCase (UpperCamelCase__ : Optional[int] ): # enter your function here
_A : Any = (x - 0) * (x - 0)
return y
def _UpperCAmelCase ():
_A : Optional[Any] = 0.0 # Lower bound of integration
_A : Optional[int] = 1.0 # Upper bound of integration
_A : List[Any] = 10.0 # define number of steps or resolution
_A : Any = [a, b] # define boundary of integration
_A : Tuple = method_a(UpperCamelCase__ , UpperCamelCase__ )
print(f"y = {y}" )
if __name__ == "__main__":
main()
| 11 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.