code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
'''simple docstring'''
from torch import nn
def SCREAMING_SNAKE_CASE ( lowercase_ : Union[str, Any] ):
if act_fn in ["swish", "silu"]:
return nn.SiLU()
elif act_fn == "mish":
return nn.Mish()
elif act_fn == "gelu":
return nn.GELU()
else:
raise ValueError(F"""Unsupported activation function: {act_fn}""" )
| 718 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowercase_ : Optional[Any] = logging.get_logger(__name__)
lowercase_ : int = {'''vocab_file''': '''spm_char.model'''}
lowercase_ : int = {
'''vocab_file''': {
'''microsoft/speecht5_asr''': '''https://huggingface.co/microsoft/speecht5_asr/resolve/main/spm_char.model''',
'''microsoft/speecht5_tts''': '''https://huggingface.co/microsoft/speecht5_tts/resolve/main/spm_char.model''',
'''microsoft/speecht5_vc''': '''https://huggingface.co/microsoft/speecht5_vc/resolve/main/spm_char.model''',
}
}
lowercase_ : Optional[Any] = {
'''microsoft/speecht5_asr''': 1024,
'''microsoft/speecht5_tts''': 1024,
'''microsoft/speecht5_vc''': 1024,
}
class __UpperCamelCase (_UpperCAmelCase ):
__A = VOCAB_FILES_NAMES
__A = PRETRAINED_VOCAB_FILES_MAP
__A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__A = ['''input_ids''', '''attention_mask''']
def __init__( self , _lowerCAmelCase , _lowerCAmelCase="<s>" , _lowerCAmelCase="</s>" , _lowerCAmelCase="<unk>" , _lowerCAmelCase="<pad>" , _lowerCAmelCase = None , **_lowerCAmelCase , ) -> None:
'''simple docstring'''
lowercase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_lowerCAmelCase , eos_token=_lowerCAmelCase , unk_token=_lowerCAmelCase , pad_token=_lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **_lowerCAmelCase , )
lowercase = vocab_file
lowercase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_lowerCAmelCase )
@property
def _a ( self ) -> List[Any]:
'''simple docstring'''
return self.sp_model.get_piece_size()
def _a ( self ) -> str:
'''simple docstring'''
lowercase = {self.convert_ids_to_tokens(_lowerCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase = self.__dict__.copy()
lowercase = None
return state
def __setstate__( self , _lowerCAmelCase ) -> Optional[int]:
'''simple docstring'''
lowercase = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
lowercase = {}
lowercase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _a ( self , _lowerCAmelCase ) -> List[str]:
'''simple docstring'''
return self.sp_model.encode(_lowerCAmelCase , out_type=_lowerCAmelCase )
def _a ( self , _lowerCAmelCase ) -> List[Any]:
'''simple docstring'''
return self.sp_model.piece_to_id(_lowerCAmelCase )
def _a ( self , _lowerCAmelCase ) -> str:
'''simple docstring'''
lowercase = self.sp_model.IdToPiece(_lowerCAmelCase )
return token
def _a ( self , _lowerCAmelCase ) -> Optional[Any]:
'''simple docstring'''
lowercase = []
lowercase = """"""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(_lowerCAmelCase ) + token
lowercase = []
else:
current_sub_tokens.append(_lowerCAmelCase )
out_string += self.sp_model.decode(_lowerCAmelCase )
return out_string.strip()
def _a ( self , _lowerCAmelCase , _lowerCAmelCase=None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def _a ( self , _lowerCAmelCase , _lowerCAmelCase = None , _lowerCAmelCase = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowerCAmelCase , token_ids_a=_lowerCAmelCase , already_has_special_tokens=_lowerCAmelCase )
lowercase = [1]
if token_ids_a is None:
return ([0] * len(_lowerCAmelCase )) + suffix_ones
return ([0] * len(_lowerCAmelCase )) + ([0] * len(_lowerCAmelCase )) + suffix_ones
def _a ( self , _lowerCAmelCase , _lowerCAmelCase = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(_lowerCAmelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowercase = os.path.join(
_lowerCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _lowerCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_lowerCAmelCase , """wb""" ) as fi:
lowercase = self.sp_model.serialized_model_proto()
fi.write(_lowerCAmelCase )
return (out_vocab_file,)
| 653 | 0 |
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConfig,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaForPreTraining,
WavaVecaProcessor,
logging,
)
from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification
logging.set_verbosity_info()
lowercase_ : int = logging.get_logger(__name__)
lowercase_ : str = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''adapter_layer''': '''encoder.layers.*.adapter_layer''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
'''pooling_layer.linear''': '''projector''',
'''pooling_layer.projection''': '''classifier''',
}
lowercase_ : Dict = [
'''lm_head''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
'''projector''',
'''classifier''',
]
def SCREAMING_SNAKE_CASE ( lowercase_ : List[Any] ):
lowercase = {}
with open(lowercase_ , """r""" ) as file:
for line_number, line in enumerate(lowercase_ ):
lowercase = line.strip()
if line:
lowercase = line.split()
lowercase = line_number
lowercase = words[0]
lowercase = value
return result
def SCREAMING_SNAKE_CASE ( lowercase_ : Optional[Any] , lowercase_ : str , lowercase_ : Tuple , lowercase_ : Dict , lowercase_ : List[str] ):
for attribute in key.split(""".""" ):
lowercase = getattr(lowercase_ , lowercase_ )
lowercase = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(lowercase_ ):
lowercase = PARAM_MAPPING[full_name.split(""".""" )[-1]]
lowercase = """param"""
if weight_type is not None and weight_type != "param":
lowercase = getattr(lowercase_ , lowercase_ ).shape
elif weight_type is not None and weight_type == "param":
lowercase = hf_pointer
for attribute in hf_param_name.split(""".""" ):
lowercase = getattr(lowercase_ , lowercase_ )
lowercase = shape_pointer.shape
# let's reduce dimension
lowercase = value[0]
else:
lowercase = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}""" )
if weight_type == "weight":
lowercase = value
elif weight_type == "weight_g":
lowercase = value
elif weight_type == "weight_v":
lowercase = value
elif weight_type == "bias":
lowercase = value
elif weight_type == "param":
for attribute in hf_param_name.split(""".""" ):
lowercase = getattr(lowercase_ , lowercase_ )
lowercase = value
else:
lowercase = value
logger.info(F"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def SCREAMING_SNAKE_CASE ( lowercase_ : List[str] , lowercase_ : List[Any] , lowercase_ : Dict , lowercase_ : str , lowercase_ : Optional[Any] ):
lowercase = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(lowercase_ ):
lowercase = PARAM_MAPPING[full_name.split(""".""" )[-1]]
lowercase = """param"""
if weight_type is not None and weight_type != "param":
lowercase = """.""".join([key, weight_type] )
elif weight_type is not None and weight_type == "param":
lowercase = """.""".join([key, hf_param_name] )
else:
lowercase = key
lowercase = value if """lm_head""" in full_key else value[0]
lowercase_ : Tuple = {
'''W_a''': '''linear_1.weight''',
'''W_b''': '''linear_2.weight''',
'''b_a''': '''linear_1.bias''',
'''b_b''': '''linear_2.bias''',
'''ln_W''': '''norm.weight''',
'''ln_b''': '''norm.bias''',
}
def SCREAMING_SNAKE_CASE ( lowercase_ : Optional[int] , lowercase_ : str , lowercase_ : List[str]=None , lowercase_ : Optional[int]=None ):
lowercase = False
for key, mapped_key in MAPPING.items():
lowercase = """wav2vec2.""" + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
lowercase = True
if "*" in mapped_key:
lowercase = name.split(lowercase_ )[0].split(""".""" )[-2]
lowercase = mapped_key.replace("""*""" , lowercase_ )
if "weight_g" in name:
lowercase = """weight_g"""
elif "weight_v" in name:
lowercase = """weight_v"""
elif "bias" in name:
lowercase = """bias"""
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
lowercase = """weight"""
else:
lowercase = None
if hf_dict is not None:
rename_dict(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
else:
set_recursively(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
return is_used
return is_used
def SCREAMING_SNAKE_CASE ( lowercase_ : List[Any] , lowercase_ : int , lowercase_ : Optional[int] ):
lowercase = []
lowercase = fairseq_model.state_dict()
lowercase = hf_model.wavaveca.feature_extractor
for name, value in fairseq_dict.items():
lowercase = False
if "conv_layers" in name:
load_conv_layer(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , hf_model.config.feat_extract_norm == """group""" , )
lowercase = True
else:
lowercase = load_wavaveca_layer(lowercase_ , lowercase_ , lowercase_ )
if not is_used:
unused_weights.append(lowercase_ )
logger.warning(F"""Unused weights: {unused_weights}""" )
def SCREAMING_SNAKE_CASE ( lowercase_ : Optional[int] , lowercase_ : Union[str, Any] , lowercase_ : Dict , lowercase_ : Optional[int] , lowercase_ : Optional[Any] ):
lowercase = full_name.split("""conv_layers.""" )[-1]
lowercase = name.split(""".""" )
lowercase = int(items[0] )
lowercase = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
lowercase = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
lowercase = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" )
lowercase = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" )
lowercase = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(lowercase_ )
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( lowercase_ : Tuple , lowercase_ : List[Any] , lowercase_ : List[str]=None , lowercase_ : str=None , lowercase_ : List[str]=True , lowercase_ : str=False ):
if config_path is not None:
lowercase = WavaVecaConfig.from_pretrained(lowercase_ )
else:
lowercase = WavaVecaConfig()
if is_seq_class:
lowercase = read_txt_into_dict(lowercase_ )
lowercase = idalabel
lowercase = WavaVecaForSequenceClassification(lowercase_ )
lowercase = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=lowercase_ , return_attention_mask=lowercase_ , )
feature_extractor.save_pretrained(lowercase_ )
elif is_finetuned:
if dict_path:
lowercase = Dictionary.load(lowercase_ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
lowercase = target_dict.pad_index
lowercase = target_dict.bos_index
lowercase = target_dict.eos_index
lowercase = len(target_dict.symbols )
lowercase = os.path.join(lowercase_ , """vocab.json""" )
if not os.path.isdir(lowercase_ ):
logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(lowercase_ ) )
return
os.makedirs(lowercase_ , exist_ok=lowercase_ )
lowercase = target_dict.indices
# fairseq has the <pad> and <s> switched
lowercase = 0
lowercase = 1
with open(lowercase_ , """w""" , encoding="""utf-8""" ) as vocab_handle:
json.dump(lowercase_ , lowercase_ )
lowercase = WavaVecaCTCTokenizer(
lowercase_ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=lowercase_ , )
lowercase = True if config.feat_extract_norm == """layer""" else False
lowercase = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=lowercase_ , return_attention_mask=lowercase_ , )
lowercase = WavaVecaProcessor(feature_extractor=lowercase_ , tokenizer=lowercase_ )
processor.save_pretrained(lowercase_ )
lowercase = WavaVecaForCTC(lowercase_ )
else:
lowercase = WavaVecaForPreTraining(lowercase_ )
if is_finetuned or is_seq_class:
lowercase , lowercase , lowercase = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
else:
lowercase = argparse.Namespace(task="""audio_pretraining""" )
lowercase = fairseq.tasks.setup_task(lowercase_ )
lowercase , lowercase , lowercase = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=lowercase_ )
lowercase = model[0].eval()
recursively_load_weights(lowercase_ , lowercase_ , not is_finetuned )
hf_wavavec.save_pretrained(lowercase_ )
if __name__ == "__main__":
lowercase_ : Dict = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
parser.add_argument(
'''--is_seq_class''',
action='''store_true''',
help='''Whether the model to convert is a fine-tuned sequence classification model or not''',
)
lowercase_ : Union[str, Any] = parser.parse_args()
lowercase_ : Optional[int] = not args.not_finetuned and not args.is_seq_class
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.dict_path,
is_finetuned,
args.is_seq_class,
)
| 719 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE ( ):
lowercase = []
lowercase = 1
while len(lowercase_ ) < 1E6:
constant.append(str(lowercase_ ) )
i += 1
lowercase = """""".join(lowercase_ )
return (
int(constant[0] )
* int(constant[9] )
* int(constant[99] )
* int(constant[999] )
* int(constant[9999] )
* int(constant[9_9999] )
* int(constant[99_9999] )
)
if __name__ == "__main__":
print(solution())
| 653 | 0 |
'''simple docstring'''
import tensorflow as tf
from ...tf_utils import shape_list
class __UpperCamelCase (tf.keras.layers.Layer ):
def __init__( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=1 , _lowerCAmelCase=False , **_lowerCAmelCase ) -> Tuple:
'''simple docstring'''
super().__init__(**_lowerCAmelCase )
lowercase = vocab_size
lowercase = d_embed
lowercase = d_proj
lowercase = cutoffs + [vocab_size]
lowercase = [0] + self.cutoffs
lowercase = div_val
lowercase = self.cutoffs[0]
lowercase = len(self.cutoffs ) - 1
lowercase = self.shortlist_size + self.n_clusters
lowercase = keep_order
lowercase = []
lowercase = []
def _a ( self , _lowerCAmelCase ) -> str:
'''simple docstring'''
if self.n_clusters > 0:
lowercase = self.add_weight(
shape=(self.n_clusters, self.d_embed) , initializer="""zeros""" , trainable=_lowerCAmelCase , name="""cluster_weight""" )
lowercase = self.add_weight(
shape=(self.n_clusters,) , initializer="""zeros""" , trainable=_lowerCAmelCase , name="""cluster_bias""" )
if self.div_val == 1:
for i in range(len(self.cutoffs ) ):
if self.d_proj != self.d_embed:
lowercase = self.add_weight(
shape=(self.d_embed, self.d_proj) , initializer="""zeros""" , trainable=_lowerCAmelCase , name=F"""out_projs_._{i}""" , )
self.out_projs.append(_lowerCAmelCase )
else:
self.out_projs.append(_lowerCAmelCase )
lowercase = self.add_weight(
shape=(self.vocab_size, self.d_embed) , initializer="""zeros""" , trainable=_lowerCAmelCase , name=F"""out_layers_._{i}_._weight""" , )
lowercase = self.add_weight(
shape=(self.vocab_size,) , initializer="""zeros""" , trainable=_lowerCAmelCase , name=F"""out_layers_._{i}_._bias""" , )
self.out_layers.append((weight, bias) )
else:
for i in range(len(self.cutoffs ) ):
lowercase , lowercase = self.cutoff_ends[i], self.cutoff_ends[i + 1]
lowercase = self.d_embed // (self.div_val**i)
lowercase = self.add_weight(
shape=(d_emb_i, self.d_proj) , initializer="""zeros""" , trainable=_lowerCAmelCase , name=F"""out_projs_._{i}""" )
self.out_projs.append(_lowerCAmelCase )
lowercase = self.add_weight(
shape=(r_idx - l_idx, d_emb_i) , initializer="""zeros""" , trainable=_lowerCAmelCase , name=F"""out_layers_._{i}_._weight""" , )
lowercase = self.add_weight(
shape=(r_idx - l_idx,) , initializer="""zeros""" , trainable=_lowerCAmelCase , name=F"""out_layers_._{i}_._bias""" , )
self.out_layers.append((weight, bias) )
super().build(_lowerCAmelCase )
@staticmethod
def _a ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None ) -> Optional[int]:
'''simple docstring'''
lowercase = x
if proj is not None:
lowercase = tf.einsum("""ibd,ed->ibe""" , _lowerCAmelCase , _lowerCAmelCase )
return tf.einsum("""ibd,nd->ibn""" , _lowerCAmelCase , _lowerCAmelCase ) + b
@staticmethod
def _a ( _lowerCAmelCase , _lowerCAmelCase ) -> Any:
'''simple docstring'''
lowercase = shape_list(_lowerCAmelCase )
lowercase = tf.range(lp_size[0] , dtype=target.dtype )
lowercase = tf.stack([r, target] , 1 )
return tf.gather_nd(_lowerCAmelCase , _lowerCAmelCase )
def _a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=True , _lowerCAmelCase=False ) -> Dict:
'''simple docstring'''
lowercase = 0
if self.n_clusters == 0:
lowercase = self._logit(_lowerCAmelCase , self.out_layers[0][0] , self.out_layers[0][1] , self.out_projs[0] )
if target is not None:
lowercase = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=_lowerCAmelCase , logits=_lowerCAmelCase )
lowercase = tf.nn.log_softmax(_lowerCAmelCase , axis=-1 )
else:
lowercase = shape_list(_lowerCAmelCase )
lowercase = []
lowercase = tf.zeros(hidden_sizes[:2] )
for i in range(len(self.cutoffs ) ):
lowercase , lowercase = self.cutoff_ends[i], self.cutoff_ends[i + 1]
if target is not None:
lowercase = (target >= l_idx) & (target < r_idx)
lowercase = tf.where(_lowerCAmelCase )
lowercase = tf.boolean_mask(_lowerCAmelCase , _lowerCAmelCase ) - l_idx
if self.div_val == 1:
lowercase = self.out_layers[0][0][l_idx:r_idx]
lowercase = self.out_layers[0][1][l_idx:r_idx]
else:
lowercase = self.out_layers[i][0]
lowercase = self.out_layers[i][1]
if i == 0:
lowercase = tf.concat([cur_W, self.cluster_weight] , 0 )
lowercase = tf.concat([cur_b, self.cluster_bias] , 0 )
lowercase = self._logit(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , self.out_projs[0] )
lowercase = tf.nn.log_softmax(_lowerCAmelCase )
out.append(head_logprob[..., : self.cutoffs[0]] )
if target is not None:
lowercase = tf.boolean_mask(_lowerCAmelCase , _lowerCAmelCase )
lowercase = self._gather_logprob(_lowerCAmelCase , _lowerCAmelCase )
else:
lowercase = self._logit(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , self.out_projs[i] )
lowercase = tf.nn.log_softmax(_lowerCAmelCase )
lowercase = self.cutoffs[0] + i - 1 # No probability for the head cluster
lowercase = head_logprob[..., cluster_prob_idx, None] + tail_logprob
out.append(_lowerCAmelCase )
if target is not None:
lowercase = tf.boolean_mask(_lowerCAmelCase , _lowerCAmelCase )
lowercase = tf.boolean_mask(_lowerCAmelCase , _lowerCAmelCase )
lowercase = self._gather_logprob(_lowerCAmelCase , _lowerCAmelCase )
cur_logprob += cur_head_logprob[:, self.cutoff_ends[1] + i - 1]
if target is not None:
loss += tf.scatter_nd(_lowerCAmelCase , -cur_logprob , shape_list(_lowerCAmelCase ) )
lowercase = tf.concat(_lowerCAmelCase , axis=-1 )
if target is not None:
if return_mean:
lowercase = tf.reduce_mean(_lowerCAmelCase )
# Add the training-time loss value to the layer using `self.add_loss()`.
self.add_loss(_lowerCAmelCase )
# Log the loss as a metric (we could log arbitrary metrics,
# including different metrics for training and inference.
self.add_metric(_lowerCAmelCase , name=self.name , aggregation="""mean""" if return_mean else """""" )
return out
| 720 |
'''simple docstring'''
import os
def SCREAMING_SNAKE_CASE ( ):
lowercase = os.path.join(os.path.dirname(lowercase_ ) , """num.txt""" )
with open(lowercase_ ) as file_hand:
return str(sum(int(lowercase_ ) for line in file_hand ) )[:10]
if __name__ == "__main__":
print(solution())
| 653 | 0 |
'''simple docstring'''
from abc import ABC, abstractmethod
from typing import Optional, Union
from .. import Dataset, DatasetDict, Features, IterableDataset, IterableDatasetDict, NamedSplit
from ..utils.typing import NestedDataStructureLike, PathLike
class __UpperCamelCase (_UpperCAmelCase ):
def __init__( self , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = False , _lowerCAmelCase = False , _lowerCAmelCase = None , **_lowerCAmelCase , ) -> int:
'''simple docstring'''
lowercase = path_or_paths
lowercase = split if split or isinstance(_lowerCAmelCase , _lowerCAmelCase ) else """train"""
lowercase = features
lowercase = cache_dir
lowercase = keep_in_memory
lowercase = streaming
lowercase = num_proc
lowercase = kwargs
@abstractmethod
def _a ( self ) -> Union[Dataset, DatasetDict, IterableDataset, IterableDatasetDict]:
'''simple docstring'''
pass
class __UpperCamelCase (_UpperCAmelCase ):
def __init__( self , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = False , _lowerCAmelCase = False , _lowerCAmelCase = None , **_lowerCAmelCase , ) -> List[str]:
'''simple docstring'''
lowercase = features
lowercase = cache_dir
lowercase = keep_in_memory
lowercase = streaming
lowercase = num_proc
lowercase = kwargs
@abstractmethod
def _a ( self ) -> Union[Dataset, IterableDataset]:
'''simple docstring'''
pass
| 721 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPanoramaPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
@skip_mps
class __UpperCamelCase (_UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
__A = StableDiffusionPanoramaPipeline
__A = TEXT_TO_IMAGE_PARAMS
__A = TEXT_TO_IMAGE_BATCH_PARAMS
__A = TEXT_TO_IMAGE_IMAGE_PARAMS
__A = TEXT_TO_IMAGE_IMAGE_PARAMS
def _a ( self ) -> Dict:
'''simple docstring'''
torch.manual_seed(0 )
lowercase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
lowercase = DDIMScheduler()
torch.manual_seed(0 )
lowercase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
lowercase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
lowercase = CLIPTextModel(_lowerCAmelCase )
lowercase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
lowercase = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def _a ( self , _lowerCAmelCase , _lowerCAmelCase=0 ) -> Optional[int]:
'''simple docstring'''
lowercase = torch.manual_seed(_lowerCAmelCase )
lowercase = {
"""prompt""": """a photo of the dolomites""",
"""generator""": generator,
# Setting height and width to None to prevent OOMs on CPU.
"""height""": None,
"""width""": None,
"""num_inference_steps""": 1,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def _a ( self ) -> int:
'''simple docstring'''
lowercase = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowercase = self.get_dummy_components()
lowercase = StableDiffusionPanoramaPipeline(**_lowerCAmelCase )
lowercase = sd_pipe.to(_lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
lowercase = self.get_dummy_inputs(_lowerCAmelCase )
lowercase = sd_pipe(**_lowerCAmelCase ).images
lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase = np.array([0.6186, 0.5374, 0.4915, 0.4135, 0.4114, 0.4563, 0.5128, 0.4977, 0.4757] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _a ( self ) -> Union[str, Any]:
'''simple docstring'''
super().test_inference_batch_consistent(batch_sizes=[1, 2] )
def _a ( self ) -> str:
'''simple docstring'''
super().test_inference_batch_single_identical(batch_size=2 , expected_max_diff=3.25E-3 )
def _a ( self ) -> List[Any]:
'''simple docstring'''
lowercase = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowercase = self.get_dummy_components()
lowercase = StableDiffusionPanoramaPipeline(**_lowerCAmelCase )
lowercase = sd_pipe.to(_lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
lowercase = self.get_dummy_inputs(_lowerCAmelCase )
lowercase = """french fries"""
lowercase = sd_pipe(**_lowerCAmelCase , negative_prompt=_lowerCAmelCase )
lowercase = output.images
lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase = np.array([0.6187, 0.5375, 0.4915, 0.4136, 0.4114, 0.4563, 0.5128, 0.4976, 0.4757] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _a ( self ) -> Tuple:
'''simple docstring'''
lowercase = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowercase = self.get_dummy_components()
lowercase = StableDiffusionPanoramaPipeline(**_lowerCAmelCase )
lowercase = sd_pipe.to(_lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
lowercase = self.get_dummy_inputs(_lowerCAmelCase )
lowercase = sd_pipe(**_lowerCAmelCase , view_batch_size=2 )
lowercase = output.images
lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase = np.array([0.6187, 0.5375, 0.4915, 0.4136, 0.4114, 0.4563, 0.5128, 0.4976, 0.4757] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _a ( self ) -> Any:
'''simple docstring'''
lowercase = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowercase = self.get_dummy_components()
lowercase = EulerAncestralDiscreteScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule="""scaled_linear""" )
lowercase = StableDiffusionPanoramaPipeline(**_lowerCAmelCase )
lowercase = sd_pipe.to(_lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
lowercase = self.get_dummy_inputs(_lowerCAmelCase )
lowercase = sd_pipe(**_lowerCAmelCase ).images
lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase = np.array([0.4024, 0.6510, 0.4901, 0.5378, 0.5813, 0.5622, 0.4795, 0.4467, 0.4952] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _a ( self ) -> Dict:
'''simple docstring'''
lowercase = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowercase = self.get_dummy_components()
lowercase = PNDMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , skip_prk_steps=_lowerCAmelCase )
lowercase = StableDiffusionPanoramaPipeline(**_lowerCAmelCase )
lowercase = sd_pipe.to(_lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
lowercase = self.get_dummy_inputs(_lowerCAmelCase )
lowercase = sd_pipe(**_lowerCAmelCase ).images
lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase = np.array([0.6391, 0.6291, 0.4861, 0.5134, 0.5552, 0.4578, 0.5032, 0.5023, 0.4539] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class __UpperCamelCase (unittest.TestCase ):
def _a ( self ) -> List[Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _a ( self , _lowerCAmelCase=0 ) -> Optional[int]:
'''simple docstring'''
lowercase = torch.manual_seed(_lowerCAmelCase )
lowercase = {
"""prompt""": """a photo of the dolomites""",
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def _a ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase = """stabilityai/stable-diffusion-2-base"""
lowercase = DDIMScheduler.from_pretrained(_lowerCAmelCase , subfolder="""scheduler""" )
lowercase = StableDiffusionPanoramaPipeline.from_pretrained(_lowerCAmelCase , scheduler=_lowerCAmelCase , safety_checker=_lowerCAmelCase )
pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
pipe.enable_attention_slicing()
lowercase = self.get_inputs()
lowercase = pipe(**_lowerCAmelCase ).images
lowercase = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 2048, 3)
lowercase = np.array(
[
0.3696_8392,
0.2702_5372,
0.3244_6766,
0.2837_9387,
0.3636_3274,
0.3073_3347,
0.2710_0027,
0.2705_4125,
0.2553_6096,
] )
assert np.abs(expected_slice - image_slice ).max() < 1E-2
def _a ( self ) -> str:
'''simple docstring'''
lowercase = StableDiffusionPanoramaPipeline.from_pretrained(
"""stabilityai/stable-diffusion-2-base""" , safety_checker=_lowerCAmelCase )
lowercase = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
pipe.enable_attention_slicing()
lowercase = self.get_inputs()
lowercase = pipe(**_lowerCAmelCase ).images
lowercase = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 2048, 3)
lowercase = np.array(
[
[
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
]
] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def _a ( self ) -> Any:
'''simple docstring'''
lowercase = 0
def callback_fn(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> None:
lowercase = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
lowercase = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 256)
lowercase = latents[0, -3:, -3:, -1]
lowercase = np.array(
[
0.1868_1869,
0.3390_7816,
0.536_1276,
0.1443_2865,
-0.0285_6611,
-0.7394_1123,
0.2339_7987,
0.4732_2682,
-0.3782_3164,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
elif step == 2:
lowercase = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 256)
lowercase = latents[0, -3:, -3:, -1]
lowercase = np.array(
[
0.1853_9645,
0.3398_7248,
0.537_8559,
0.1443_7142,
-0.0245_5261,
-0.733_8317,
0.2399_0755,
0.4735_6272,
-0.378_6505,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
lowercase = False
lowercase = """stabilityai/stable-diffusion-2-base"""
lowercase = DDIMScheduler.from_pretrained(_lowerCAmelCase , subfolder="""scheduler""" )
lowercase = StableDiffusionPanoramaPipeline.from_pretrained(_lowerCAmelCase , scheduler=_lowerCAmelCase , safety_checker=_lowerCAmelCase )
lowercase = pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
pipe.enable_attention_slicing()
lowercase = self.get_inputs()
pipe(**_lowerCAmelCase , callback=_lowerCAmelCase , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def _a ( self ) -> int:
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowercase = """stabilityai/stable-diffusion-2-base"""
lowercase = DDIMScheduler.from_pretrained(_lowerCAmelCase , subfolder="""scheduler""" )
lowercase = StableDiffusionPanoramaPipeline.from_pretrained(_lowerCAmelCase , scheduler=_lowerCAmelCase , safety_checker=_lowerCAmelCase )
lowercase = pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
lowercase = self.get_inputs()
lowercase = pipe(**_lowerCAmelCase )
lowercase = torch.cuda.max_memory_allocated()
# make sure that less than 5.2 GB is allocated
assert mem_bytes < 5.5 * 10**9
| 653 | 0 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE ( lowercase_ : Union[str, Any] , lowercase_ : str ):
lowercase = """"""
for i in table:
res += inp[i - 1]
return res
def SCREAMING_SNAKE_CASE ( lowercase_ : List[Any] ):
return data[1:] + data[0]
def SCREAMING_SNAKE_CASE ( lowercase_ : Tuple , lowercase_ : Dict ):
lowercase = """"""
for i in range(len(lowercase_ ) ):
if a[i] == b[i]:
res += "0"
else:
res += "1"
return res
def SCREAMING_SNAKE_CASE ( lowercase_ : str , lowercase_ : str ):
lowercase = int("""0b""" + data[0] + data[-1] , 2 )
lowercase = int("""0b""" + data[1:3] , 2 )
return bin(s[row][col] )[2:]
def SCREAMING_SNAKE_CASE ( lowercase_ : Dict , lowercase_ : Tuple , lowercase_ : Tuple , lowercase_ : str , lowercase_ : Any ):
lowercase = message[:4]
lowercase = message[4:]
lowercase = apply_table(lowercase_ , lowercase_ )
lowercase = xor(lowercase_ , lowercase_ )
lowercase = apply_sbox(lowercase_ , temp[:4] ) # noqa: E741
lowercase = apply_sbox(lowercase_ , temp[4:] )
lowercase = """0""" * (2 - len(lowercase_ )) + l # noqa: E741
lowercase = """0""" * (2 - len(lowercase_ )) + r
lowercase = apply_table(l + r , lowercase_ )
lowercase = xor(lowercase_ , lowercase_ )
return temp + right
if __name__ == "__main__":
lowercase_ : Tuple = input('''Enter 10 bit key: ''')
lowercase_ : Any = input('''Enter 8 bit message: ''')
lowercase_ : Dict = [6, 3, 7, 4, 8, 5, 10, 9]
lowercase_ : str = [3, 5, 2, 7, 4, 10, 1, 9, 8, 6]
lowercase_ : List[Any] = [2, 4, 3, 1]
lowercase_ : List[str] = [2, 6, 3, 1, 4, 8, 5, 7]
lowercase_ : Tuple = [4, 1, 3, 5, 7, 2, 8, 6]
lowercase_ : Optional[Any] = [4, 1, 2, 3, 2, 3, 4, 1]
lowercase_ : List[str] = [[1, 0, 3, 2], [3, 2, 1, 0], [0, 2, 1, 3], [3, 1, 3, 2]]
lowercase_ : List[Any] = [[0, 1, 2, 3], [2, 0, 1, 3], [3, 0, 1, 0], [2, 1, 0, 3]]
# key generation
lowercase_ : Union[str, Any] = apply_table(key, paa_table)
lowercase_ : Optional[Any] = temp[:5]
lowercase_ : int = temp[5:]
lowercase_ : List[str] = left_shift(left)
lowercase_ : int = left_shift(right)
lowercase_ : Tuple = apply_table(left + right, pa_table)
lowercase_ : List[str] = left_shift(left)
lowercase_ : Optional[Any] = left_shift(right)
lowercase_ : Union[str, Any] = left_shift(left)
lowercase_ : Union[str, Any] = left_shift(right)
lowercase_ : Optional[int] = apply_table(left + right, pa_table)
# encryption
lowercase_ : int = apply_table(message, IP)
lowercase_ : Dict = function(expansion, sa, sa, keya, temp)
lowercase_ : Any = temp[4:] + temp[:4]
lowercase_ : List[Any] = function(expansion, sa, sa, keya, temp)
lowercase_ : Tuple = apply_table(temp, IP_inv)
print('''Cipher text is:''', CT)
# decryption
lowercase_ : List[str] = apply_table(CT, IP)
lowercase_ : Optional[int] = function(expansion, sa, sa, keya, temp)
lowercase_ : Optional[Any] = temp[4:] + temp[:4]
lowercase_ : Optional[int] = function(expansion, sa, sa, keya, temp)
lowercase_ : Optional[Any] = apply_table(temp, IP_inv)
print('''Plain text after decypting is:''', PT)
| 700 |
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import SeqaSeqTrainer
from seqaseq_training_args import SeqaSeqTrainingArguments
import transformers
from transformers import (
AutoConfig,
AutoModelForSeqaSeqLM,
AutoTokenizer,
HfArgumentParser,
MBartTokenizer,
MBartTokenizerFast,
set_seed,
)
from transformers.trainer_utils import EvaluationStrategy, is_main_process
from transformers.training_args import ParallelMode
from utils import (
SeqaSeqDataCollator,
SeqaSeqDataset,
assert_all_frozen,
build_compute_metrics_fn,
check_output_dir,
freeze_embeds,
freeze_params,
lmap,
save_json,
use_task_specific_params,
write_txt_file,
)
lowercase_ : Tuple = logging.getLogger(__name__)
@dataclass
class __UpperCamelCase :
__A = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
__A = field(
default=_UpperCAmelCase , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
__A = field(
default=_UpperCAmelCase , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
__A = field(
default=_UpperCAmelCase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
__A = field(default=_UpperCAmelCase , metadata={'''help''': '''Whether tp freeze the encoder.'''} )
__A = field(default=_UpperCAmelCase , metadata={'''help''': '''Whether to freeze the embeddings.'''} )
@dataclass
class __UpperCamelCase :
__A = field(
metadata={'''help''': '''The input data dir. Should contain the .tsv files (or other data files) for the task.'''} )
__A = field(
default='''summarization''' , metadata={'''help''': '''Task name, summarization (or summarization_{dataset} for pegasus) or translation'''} , )
__A = field(
default=1024 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
__A = field(
default=128 , metadata={
'''help''': (
'''The maximum total sequence length for target text after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
__A = field(
default=142 , metadata={
'''help''': (
'''The maximum total sequence length for validation target text after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded. '''
'''This argument is also used to override the ``max_length`` param of ``model.generate``, which is used '''
'''during ``evaluate`` and ``predict``.'''
)
} , )
__A = field(
default=142 , metadata={
'''help''': (
'''The maximum total sequence length for test target text after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
__A = field(default=-1 , metadata={'''help''': '''# training examples. -1 means use all.'''} )
__A = field(default=-1 , metadata={'''help''': '''# validation examples. -1 means use all.'''} )
__A = field(default=-1 , metadata={'''help''': '''# test examples. -1 means use all.'''} )
__A = field(default=_UpperCAmelCase , metadata={'''help''': '''Source language id for translation.'''} )
__A = field(default=_UpperCAmelCase , metadata={'''help''': '''Target language id for translation.'''} )
__A = field(default=_UpperCAmelCase , metadata={'''help''': '''# num_beams to use for evaluation.'''} )
__A = field(
default=_UpperCAmelCase , metadata={'''help''': '''If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined.'''} , )
def SCREAMING_SNAKE_CASE ( lowercase_ : List[Any] , lowercase_ : int , lowercase_ : List[Any] ):
logger.info(F"""***** {split} metrics *****""" )
for key in sorted(metrics.keys() ):
logger.info(F""" {key} = {metrics[key]}""" )
save_json(lowercase_ , os.path.join(lowercase_ , F"""{split}_results.json""" ) )
def SCREAMING_SNAKE_CASE ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowercase = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowercase , lowercase , lowercase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowercase , lowercase , lowercase = parser.parse_args_into_dataclasses()
check_output_dir(lowercase_ )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"""Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s""" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info("""Training/evaluation parameters %s""" , lowercase_ )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowercase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
lowercase = ("""encoder_layerdrop""", """decoder_layerdrop""", """dropout""", """attention_dropout""")
for p in extra_model_params:
if getattr(lowercase_ , lowercase_ , lowercase_ ):
assert hasattr(lowercase_ , lowercase_ ), F"""({config.__class__.__name__}) doesn't have a `{p}` attribute"""
setattr(lowercase_ , lowercase_ , getattr(lowercase_ , lowercase_ ) )
lowercase = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
lowercase = AutoModelForSeqaSeqLM.from_pretrained(
model_args.model_name_or_path , from_tf=""".ckpt""" in model_args.model_name_or_path , config=lowercase_ , cache_dir=model_args.cache_dir , )
# use task specific params
use_task_specific_params(lowercase_ , data_args.task )
# set num_beams for evaluation
if data_args.eval_beams is None:
lowercase = model.config.num_beams
# set decoder_start_token_id for MBart
if model.config.decoder_start_token_id is None and isinstance(lowercase_ , (MBartTokenizer, MBartTokenizerFast) ):
assert (
data_args.tgt_lang is not None and data_args.src_lang is not None
), "mBart requires --tgt_lang and --src_lang"
if isinstance(lowercase_ , lowercase_ ):
lowercase = tokenizer.lang_code_to_id[data_args.tgt_lang]
else:
lowercase = tokenizer.convert_tokens_to_ids(data_args.tgt_lang )
if model_args.freeze_embeds:
freeze_embeds(lowercase_ )
if model_args.freeze_encoder:
freeze_params(model.get_encoder() )
assert_all_frozen(model.get_encoder() )
lowercase = SeqaSeqDataset
# Get datasets
lowercase = (
dataset_class(
lowercase_ , type_path="""train""" , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or """""" , )
if training_args.do_train
else None
)
lowercase = (
dataset_class(
lowercase_ , type_path="""val""" , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or """""" , )
if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO
else None
)
lowercase = (
dataset_class(
lowercase_ , type_path="""test""" , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or """""" , )
if training_args.do_predict
else None
)
# Initialize our Trainer
lowercase = (
build_compute_metrics_fn(data_args.task , lowercase_ ) if training_args.predict_with_generate else None
)
lowercase = SeqaSeqTrainer(
model=lowercase_ , args=lowercase_ , data_args=lowercase_ , train_dataset=lowercase_ , eval_dataset=lowercase_ , data_collator=SeqaSeqDataCollator(
lowercase_ , lowercase_ , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=lowercase_ , tokenizer=lowercase_ , )
lowercase = {}
# Training
if training_args.do_train:
logger.info("""*** Train ***""" )
lowercase = trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
lowercase = train_result.metrics
lowercase = data_args.n_train
trainer.save_model() # this also saves the tokenizer
if trainer.is_world_process_zero():
handle_metrics("""train""" , lowercase_ , training_args.output_dir )
all_metrics.update(lowercase_ )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , """trainer_state.json""" ) )
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
lowercase = trainer.evaluate(metric_key_prefix="""val""" )
lowercase = data_args.n_val
lowercase = round(metrics["""val_loss"""] , 4 )
if trainer.is_world_process_zero():
handle_metrics("""val""" , lowercase_ , training_args.output_dir )
all_metrics.update(lowercase_ )
if training_args.do_predict:
logger.info("""*** Predict ***""" )
lowercase = trainer.predict(test_dataset=lowercase_ , metric_key_prefix="""test""" )
lowercase = test_output.metrics
lowercase = data_args.n_test
if trainer.is_world_process_zero():
lowercase = round(metrics["""test_loss"""] , 4 )
handle_metrics("""test""" , lowercase_ , training_args.output_dir )
all_metrics.update(lowercase_ )
if training_args.predict_with_generate:
lowercase = tokenizer.batch_decode(
test_output.predictions , skip_special_tokens=lowercase_ , clean_up_tokenization_spaces=lowercase_ )
lowercase = lmap(str.strip , lowercase_ )
write_txt_file(lowercase_ , os.path.join(training_args.output_dir , """test_generations.txt""" ) )
if trainer.is_world_process_zero():
save_json(lowercase_ , os.path.join(training_args.output_dir , """all_results.json""" ) )
return all_metrics
def SCREAMING_SNAKE_CASE ( lowercase_ : Dict ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 653 | 0 |
'''simple docstring'''
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __UpperCamelCase (_UpperCAmelCase ):
__A = ['''image_processor''', '''tokenizer''']
__A = '''BlipImageProcessor'''
__A = ('''BertTokenizer''', '''BertTokenizerFast''')
def __init__( self , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]:
'''simple docstring'''
lowercase = False
super().__init__(_lowerCAmelCase , _lowerCAmelCase )
lowercase = self.image_processor
def __call__( self , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = True , _lowerCAmelCase = False , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = 0 , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = False , _lowerCAmelCase = False , _lowerCAmelCase = False , _lowerCAmelCase = False , _lowerCAmelCase = False , _lowerCAmelCase = True , _lowerCAmelCase = None , **_lowerCAmelCase , ) -> BatchEncoding:
'''simple docstring'''
if images is None and text is None:
raise ValueError("""You have to specify either images or text.""" )
# Get only text
if images is None:
lowercase = self.tokenizer
lowercase = self.tokenizer(
text=_lowerCAmelCase , add_special_tokens=_lowerCAmelCase , padding=_lowerCAmelCase , truncation=_lowerCAmelCase , max_length=_lowerCAmelCase , stride=_lowerCAmelCase , pad_to_multiple_of=_lowerCAmelCase , return_attention_mask=_lowerCAmelCase , return_overflowing_tokens=_lowerCAmelCase , return_special_tokens_mask=_lowerCAmelCase , return_offsets_mapping=_lowerCAmelCase , return_token_type_ids=_lowerCAmelCase , return_length=_lowerCAmelCase , verbose=_lowerCAmelCase , return_tensors=_lowerCAmelCase , **_lowerCAmelCase , )
return text_encoding
# add pixel_values
lowercase = self.image_processor(_lowerCAmelCase , return_tensors=_lowerCAmelCase )
if text is not None:
lowercase = self.tokenizer(
text=_lowerCAmelCase , add_special_tokens=_lowerCAmelCase , padding=_lowerCAmelCase , truncation=_lowerCAmelCase , max_length=_lowerCAmelCase , stride=_lowerCAmelCase , pad_to_multiple_of=_lowerCAmelCase , return_attention_mask=_lowerCAmelCase , return_overflowing_tokens=_lowerCAmelCase , return_special_tokens_mask=_lowerCAmelCase , return_offsets_mapping=_lowerCAmelCase , return_token_type_ids=_lowerCAmelCase , return_length=_lowerCAmelCase , verbose=_lowerCAmelCase , return_tensors=_lowerCAmelCase , **_lowerCAmelCase , )
else:
lowercase = None
if text_encoding is not None:
encoding_image_processor.update(_lowerCAmelCase )
return encoding_image_processor
def _a ( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> int:
'''simple docstring'''
return self.tokenizer.batch_decode(*_lowerCAmelCase , **_lowerCAmelCase )
def _a ( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Any:
'''simple docstring'''
return self.tokenizer.decode(*_lowerCAmelCase , **_lowerCAmelCase )
@property
def _a ( self ) -> Dict:
'''simple docstring'''
lowercase = self.tokenizer.model_input_names
lowercase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 701 |
'''simple docstring'''
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING
lowercase_ : Union[str, Any] = logging.get_logger(__name__)
@add_end_docstrings(_UpperCAmelCase )
class __UpperCamelCase (_UpperCAmelCase ):
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Dict:
'''simple docstring'''
super().__init__(*_lowerCAmelCase , **_lowerCAmelCase )
requires_backends(self , """vision""" )
self.check_model_type(
TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == """tf""" else MODEL_FOR_VISION_2_SEQ_MAPPING )
def _a ( self , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=None ) -> str:
'''simple docstring'''
lowercase = {}
lowercase = {}
if prompt is not None:
lowercase = prompt
if generate_kwargs is not None:
lowercase = generate_kwargs
if max_new_tokens is not None:
if "generate_kwargs" not in forward_kwargs:
lowercase = {}
if "max_new_tokens" in forward_kwargs["generate_kwargs"]:
raise ValueError(
"""'max_new_tokens' is defined twice, once in 'generate_kwargs' and once as a direct parameter,"""
""" please use only one""" )
lowercase = max_new_tokens
return preprocess_params, forward_kwargs, {}
def __call__( self , _lowerCAmelCase , **_lowerCAmelCase ) -> Any:
'''simple docstring'''
return super().__call__(_lowerCAmelCase , **_lowerCAmelCase )
def _a ( self , _lowerCAmelCase , _lowerCAmelCase=None ) -> List[str]:
'''simple docstring'''
lowercase = load_image(_lowerCAmelCase )
if prompt is not None:
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ):
raise ValueError(
F"""Received an invalid text input, got - {type(_lowerCAmelCase )} - but expected a single string. """
"""Note also that one single text can be provided for conditional image to text generation.""" )
lowercase = self.model.config.model_type
if model_type == "git":
lowercase = self.image_processor(images=_lowerCAmelCase , return_tensors=self.framework )
lowercase = self.tokenizer(text=_lowerCAmelCase , add_special_tokens=_lowerCAmelCase ).input_ids
lowercase = [self.tokenizer.cls_token_id] + input_ids
lowercase = torch.tensor(_lowerCAmelCase ).unsqueeze(0 )
model_inputs.update({"""input_ids""": input_ids} )
elif model_type == "pix2struct":
lowercase = self.image_processor(images=_lowerCAmelCase , header_text=_lowerCAmelCase , return_tensors=self.framework )
elif model_type != "vision-encoder-decoder":
# vision-encoder-decoder does not support conditional generation
lowercase = self.image_processor(images=_lowerCAmelCase , return_tensors=self.framework )
lowercase = self.tokenizer(_lowerCAmelCase , return_tensors=self.framework )
model_inputs.update(_lowerCAmelCase )
else:
raise ValueError(F"""Model type {model_type} does not support conditional text generation""" )
else:
lowercase = self.image_processor(images=_lowerCAmelCase , return_tensors=self.framework )
if self.model.config.model_type == "git" and prompt is None:
lowercase = None
return model_inputs
def _a ( self , _lowerCAmelCase , _lowerCAmelCase=None ) -> Union[str, Any]:
'''simple docstring'''
if (
"input_ids" in model_inputs
and isinstance(model_inputs["""input_ids"""] , _lowerCAmelCase )
and all(x is None for x in model_inputs["""input_ids"""] )
):
lowercase = None
if generate_kwargs is None:
lowercase = {}
# FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py`
# parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas
# the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name`
# in the `_prepare_model_inputs` method.
lowercase = model_inputs.pop(self.model.main_input_name )
lowercase = self.model.generate(_lowerCAmelCase , **_lowerCAmelCase , **_lowerCAmelCase )
return model_outputs
def _a ( self , _lowerCAmelCase ) -> List[str]:
'''simple docstring'''
lowercase = []
for output_ids in model_outputs:
lowercase = {
"""generated_text""": self.tokenizer.decode(
_lowerCAmelCase , skip_special_tokens=_lowerCAmelCase , )
}
records.append(_lowerCAmelCase )
return records
| 653 | 0 |
from PIL import Image
def SCREAMING_SNAKE_CASE ( lowercase_ : Image , lowercase_ : float ):
def brightness(lowercase_ : int ) -> float:
return 128 + level + (c - 128)
if not -255.0 <= level <= 255.0:
raise ValueError("""level must be between -255.0 (black) and 255.0 (white)""" )
return img.point(lowercase_ )
if __name__ == "__main__":
# Load image
with Image.open('''image_data/lena.jpg''') as img:
# Change brightness to 100
lowercase_ : Dict = change_brightness(img, 100)
brigt_img.save('''image_data/lena_brightness.png''', format='''png''')
| 702 |
'''simple docstring'''
from ... import PretrainedConfig
lowercase_ : int = {
'''sijunhe/nezha-cn-base''': '''https://huggingface.co/sijunhe/nezha-cn-base/resolve/main/config.json''',
}
class __UpperCamelCase (_UpperCAmelCase ):
__A = NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP
__A = '''nezha'''
def __init__( self , _lowerCAmelCase=2_1128 , _lowerCAmelCase=768 , _lowerCAmelCase=12 , _lowerCAmelCase=12 , _lowerCAmelCase=3072 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=512 , _lowerCAmelCase=64 , _lowerCAmelCase=2 , _lowerCAmelCase=0.02 , _lowerCAmelCase=1E-12 , _lowerCAmelCase=0.1 , _lowerCAmelCase=0 , _lowerCAmelCase=2 , _lowerCAmelCase=3 , _lowerCAmelCase=True , **_lowerCAmelCase , ) -> int:
'''simple docstring'''
super().__init__(pad_token_id=_lowerCAmelCase , bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , **_lowerCAmelCase )
lowercase = vocab_size
lowercase = hidden_size
lowercase = num_hidden_layers
lowercase = num_attention_heads
lowercase = hidden_act
lowercase = intermediate_size
lowercase = hidden_dropout_prob
lowercase = attention_probs_dropout_prob
lowercase = max_position_embeddings
lowercase = max_relative_position
lowercase = type_vocab_size
lowercase = initializer_range
lowercase = layer_norm_eps
lowercase = classifier_dropout
lowercase = use_cache
| 653 | 0 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import RoFormerConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roformer.modeling_flax_roformer import (
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
)
class __UpperCamelCase (unittest.TestCase ):
def __init__( self , _lowerCAmelCase , _lowerCAmelCase=13 , _lowerCAmelCase=7 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=99 , _lowerCAmelCase=32 , _lowerCAmelCase=5 , _lowerCAmelCase=4 , _lowerCAmelCase=37 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=512 , _lowerCAmelCase=16 , _lowerCAmelCase=2 , _lowerCAmelCase=0.02 , _lowerCAmelCase=4 , ) -> Any:
'''simple docstring'''
lowercase = parent
lowercase = batch_size
lowercase = seq_length
lowercase = is_training
lowercase = use_attention_mask
lowercase = use_token_type_ids
lowercase = use_labels
lowercase = vocab_size
lowercase = hidden_size
lowercase = num_hidden_layers
lowercase = num_attention_heads
lowercase = intermediate_size
lowercase = hidden_act
lowercase = hidden_dropout_prob
lowercase = attention_probs_dropout_prob
lowercase = max_position_embeddings
lowercase = type_vocab_size
lowercase = type_sequence_label_size
lowercase = initializer_range
lowercase = num_choices
def _a ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase = None
if self.use_attention_mask:
lowercase = random_attention_mask([self.batch_size, self.seq_length] )
lowercase = None
if self.use_token_type_ids:
lowercase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowerCAmelCase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def _a ( self ) -> List[Any]:
'''simple docstring'''
lowercase = self.prepare_config_and_inputs()
lowercase , lowercase , lowercase , lowercase = config_and_inputs
lowercase = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
@require_flax
class __UpperCamelCase (_UpperCAmelCase , unittest.TestCase ):
__A = True
__A = (
(
FlaxRoFormerModel,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
)
if is_flax_available()
else ()
)
def _a ( self ) -> str:
'''simple docstring'''
lowercase = FlaxRoFormerModelTester(self )
@slow
def _a ( self ) -> str:
'''simple docstring'''
for model_class_name in self.all_model_classes:
lowercase = model_class_name.from_pretrained("""junnyu/roformer_chinese_small""" , from_pt=_lowerCAmelCase )
lowercase = model(np.ones((1, 1) ) )
self.assertIsNotNone(_lowerCAmelCase )
@require_flax
class __UpperCamelCase (unittest.TestCase ):
@slow
def _a ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase = FlaxRoFormerForMaskedLM.from_pretrained("""junnyu/roformer_chinese_base""" )
lowercase = jnp.array([[0, 1, 2, 3, 4, 5]] )
lowercase = model(_lowerCAmelCase )[0]
lowercase = 5_0000
lowercase = (1, 6, vocab_size)
self.assertEqual(output.shape , _lowerCAmelCase )
lowercase = jnp.array(
[[[-0.1205, -1.0265, 0.2922], [-1.5134, 0.1974, 0.1519], [-5.0135, -3.9003, -0.8404]]] )
self.assertTrue(jnp.allclose(output[:, :3, :3] , _lowerCAmelCase , atol=1E-4 ) )
| 703 |
'''simple docstring'''
import json
import logging
import os
import socket
import git
import numpy as np
import torch
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - PID: %(process)d - %(message)s''',
datefmt='''%m/%d/%Y %H:%M:%S''',
level=logging.INFO,
)
lowercase_ : Tuple = logging.getLogger(__name__)
def SCREAMING_SNAKE_CASE ( lowercase_ : str ):
lowercase = git.Repo(search_parent_directories=lowercase_ )
lowercase = {
"""repo_id""": str(lowercase_ ),
"""repo_sha""": str(repo.head.object.hexsha ),
"""repo_branch""": str(repo.active_branch ),
}
with open(os.path.join(lowercase_ , """git_log.json""" ) , """w""" ) as f:
json.dump(lowercase_ , lowercase_ , indent=4 )
def SCREAMING_SNAKE_CASE ( lowercase_ : str ):
if params.n_gpu <= 0:
lowercase = 0
lowercase = -1
lowercase = True
lowercase = False
return
assert torch.cuda.is_available()
logger.info("""Initializing GPUs""" )
if params.n_gpu > 1:
assert params.local_rank != -1
lowercase = int(os.environ["""WORLD_SIZE"""] )
lowercase = int(os.environ["""N_GPU_NODE"""] )
lowercase = int(os.environ["""RANK"""] )
# number of nodes / node ID
lowercase = params.world_size // params.n_gpu_per_node
lowercase = params.global_rank // params.n_gpu_per_node
lowercase = True
assert params.n_nodes == int(os.environ["""N_NODES"""] )
assert params.node_id == int(os.environ["""NODE_RANK"""] )
# local job (single GPU)
else:
assert params.local_rank == -1
lowercase = 1
lowercase = 0
lowercase = 0
lowercase = 0
lowercase = 1
lowercase = 1
lowercase = False
# sanity checks
assert params.n_nodes >= 1
assert 0 <= params.node_id < params.n_nodes
assert 0 <= params.local_rank <= params.global_rank < params.world_size
assert params.world_size == params.n_nodes * params.n_gpu_per_node
# define whether this is the master process / if we are in multi-node distributed mode
lowercase = params.node_id == 0 and params.local_rank == 0
lowercase = params.n_nodes > 1
# summary
lowercase = F"""--- Global rank: {params.global_rank} - """
logger.info(PREFIX + """Number of nodes: %i""" % params.n_nodes )
logger.info(PREFIX + """Node ID : %i""" % params.node_id )
logger.info(PREFIX + """Local rank : %i""" % params.local_rank )
logger.info(PREFIX + """World size : %i""" % params.world_size )
logger.info(PREFIX + """GPUs per node : %i""" % params.n_gpu_per_node )
logger.info(PREFIX + """Master : %s""" % str(params.is_master ) )
logger.info(PREFIX + """Multi-node : %s""" % str(params.multi_node ) )
logger.info(PREFIX + """Multi-GPU : %s""" % str(params.multi_gpu ) )
logger.info(PREFIX + """Hostname : %s""" % socket.gethostname() )
# set GPU device
torch.cuda.set_device(params.local_rank )
# initialize multi-GPU
if params.multi_gpu:
logger.info("""Initializing PyTorch distributed""" )
torch.distributed.init_process_group(
init_method="""env://""" , backend="""nccl""" , )
def SCREAMING_SNAKE_CASE ( lowercase_ : Optional[Any] ):
np.random.seed(args.seed )
torch.manual_seed(args.seed )
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed )
| 653 | 0 |
'''simple docstring'''
import math
lowercase_ : Any = 10
lowercase_ : Any = 7
lowercase_ : str = BALLS_PER_COLOUR * NUM_COLOURS
def SCREAMING_SNAKE_CASE ( lowercase_ : int = 20 ):
lowercase = math.comb(lowercase_ , lowercase_ )
lowercase = math.comb(NUM_BALLS - BALLS_PER_COLOUR , lowercase_ )
lowercase = NUM_COLOURS * (1 - missing_colour / total)
return F"""{result:.9f}"""
if __name__ == "__main__":
print(solution(20))
| 704 |
'''simple docstring'''
from __future__ import annotations
import os
from typing import Any
import requests
lowercase_ : List[str] = '''https://api.github.com'''
# https://docs.github.com/en/free-pro-team@latest/rest/reference/users#get-the-authenticated-user
lowercase_ : Any = BASE_URL + '''/user'''
# https://github.com/settings/tokens
lowercase_ : Union[str, Any] = os.environ.get('''USER_TOKEN''', '''''')
def SCREAMING_SNAKE_CASE ( lowercase_ : str ):
lowercase = {
"""Authorization""": F"""token {auth_token}""",
"""Accept""": """application/vnd.github.v3+json""",
}
return requests.get(lowercase_ , headers=lowercase_ ).json()
if __name__ == "__main__": # pragma: no cover
if USER_TOKEN:
for key, value in fetch_github_info(USER_TOKEN).items():
print(f'''{key}: {value}''')
else:
raise ValueError('''\'USER_TOKEN\' field cannot be empty.''')
| 653 | 0 |
def SCREAMING_SNAKE_CASE ( lowercase_ : list ):
def merge(lowercase_ : list , lowercase_ : list ) -> list:
def _merge():
while left and right:
yield (left if left[0] <= right[0] else right).pop(0 )
yield from left
yield from right
return list(_merge() )
if len(lowercase_ ) <= 1:
return collection
lowercase = len(lowercase_ ) // 2
return merge(merge_sort(collection[:mid] ) , merge_sort(collection[mid:] ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
lowercase_ : Tuple = input('''Enter numbers separated by a comma:\n''').strip()
lowercase_ : Union[str, Any] = [int(item) for item in user_input.split(''',''')]
print(*merge_sort(unsorted), sep=''',''')
| 705 |
'''simple docstring'''
import logging
import os
import sys
import warnings
from dataclasses import dataclass, field
from random import randint
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import DatasetDict, load_dataset
import transformers
from transformers import (
AutoConfig,
AutoFeatureExtractor,
AutoModelForAudioClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
lowercase_ : Union[str, Any] = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.31.0''')
require_version('''datasets>=1.14.0''', '''To fix: pip install -r examples/pytorch/audio-classification/requirements.txt''')
def SCREAMING_SNAKE_CASE ( lowercase_ : np.ndarray , lowercase_ : float , lowercase_ : int = 1_6000 ):
lowercase = int(round(sample_rate * max_length ) )
if len(lowercase_ ) <= sample_length:
return wav
lowercase = randint(0 , len(lowercase_ ) - sample_length - 1 )
return wav[random_offset : random_offset + sample_length]
@dataclass
class __UpperCamelCase :
__A = field(default=_UpperCAmelCase , metadata={'''help''': '''Name of a dataset from the datasets package'''} )
__A = field(
default=_UpperCAmelCase , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} )
__A = field(
default=_UpperCAmelCase , metadata={'''help''': '''A file containing the training audio paths and labels.'''} )
__A = field(
default=_UpperCAmelCase , metadata={'''help''': '''A file containing the validation audio paths and labels.'''} )
__A = field(
default='''train''' , metadata={
'''help''': '''The name of the training data set split to use (via the datasets library). Defaults to \'train\''''
} , )
__A = field(
default='''validation''' , metadata={
'''help''': (
'''The name of the training data set split to use (via the datasets library). Defaults to \'validation\''''
)
} , )
__A = field(
default='''audio''' , metadata={'''help''': '''The name of the dataset column containing the audio data. Defaults to \'audio\''''} , )
__A = field(
default='''label''' , metadata={'''help''': '''The name of the dataset column containing the labels. Defaults to \'label\''''} )
__A = field(
default=_UpperCAmelCase , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of training examples to this '''
'''value if set.'''
)
} , )
__A = field(
default=_UpperCAmelCase , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of evaluation examples to this '''
'''value if set.'''
)
} , )
__A = field(
default=20 , metadata={'''help''': '''Audio clips will be randomly cut to this length during training if the value is set.'''} , )
@dataclass
class __UpperCamelCase :
__A = field(
default='''facebook/wav2vec2-base''' , metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} , )
__A = field(
default=_UpperCAmelCase , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
__A = field(
default=_UpperCAmelCase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from the Hub'''} )
__A = field(
default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , )
__A = field(
default=_UpperCAmelCase , metadata={'''help''': '''Name or path of preprocessor config.'''} )
__A = field(
default=_UpperCAmelCase , metadata={'''help''': '''Whether to freeze the feature encoder layers of the model.'''} )
__A = field(
default=_UpperCAmelCase , metadata={'''help''': '''Whether to generate an attention mask in the feature extractor.'''} )
__A = field(
default=_UpperCAmelCase , metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} , )
__A = field(
default=_UpperCAmelCase , metadata={'''help''': '''Whether to freeze the feature extractor layers of the model.'''} )
__A = field(
default=_UpperCAmelCase , metadata={'''help''': '''Will enable to load a pretrained model whose head dimensions are different.'''} , )
def _a ( self ) -> List[Any]:
'''simple docstring'''
if not self.freeze_feature_extractor and self.freeze_feature_encoder:
warnings.warn(
"""The argument `--freeze_feature_extractor` is deprecated and """
"""will be removed in a future version. Use `--freeze_feature_encoder`"""
"""instead. Setting `freeze_feature_encoder==True`.""" , _lowerCAmelCase , )
if self.freeze_feature_extractor and not self.freeze_feature_encoder:
raise ValueError(
"""The argument `--freeze_feature_extractor` is deprecated and """
"""should not be used in combination with `--freeze_feature_encoder`."""
"""Only make use of `--freeze_feature_encoder`.""" )
def SCREAMING_SNAKE_CASE ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowercase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowercase , lowercase , lowercase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowercase , lowercase , lowercase = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("""run_audio_classification""" , lowercase_ , lowercase_ )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
lowercase = training_args.get_process_log_level()
logger.setLevel(lowercase_ )
transformers.utils.logging.set_verbosity(lowercase_ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu} """
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Set seed before initializing model.
set_seed(training_args.seed )
# Detecting last checkpoint.
lowercase = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowercase = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
"""Use --overwrite_output_dir to train from scratch.""" )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Initialize our dataset and prepare it for the audio classification task.
lowercase = DatasetDict()
lowercase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.train_split_name , use_auth_token=True if model_args.use_auth_token else None , )
lowercase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.eval_split_name , use_auth_token=True if model_args.use_auth_token else None , )
if data_args.audio_column_name not in raw_datasets["train"].column_names:
raise ValueError(
F"""--audio_column_name {data_args.audio_column_name} not found in dataset '{data_args.dataset_name}'. """
"""Make sure to set `--audio_column_name` to the correct audio column - one of """
F"""{', '.join(raw_datasets['train'].column_names )}.""" )
if data_args.label_column_name not in raw_datasets["train"].column_names:
raise ValueError(
F"""--label_column_name {data_args.label_column_name} not found in dataset '{data_args.dataset_name}'. """
"""Make sure to set `--label_column_name` to the correct text column - one of """
F"""{', '.join(raw_datasets['train'].column_names )}.""" )
# Setting `return_attention_mask=True` is the way to get a correctly masked mean-pooling over
# transformer outputs in the classifier, but it doesn't always lead to better accuracy
lowercase = AutoFeatureExtractor.from_pretrained(
model_args.feature_extractor_name or model_args.model_name_or_path , return_attention_mask=model_args.attention_mask , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# `datasets` takes care of automatically loading and resampling the audio,
# so we just need to set the correct target sampling rate.
lowercase = raw_datasets.cast_column(
data_args.audio_column_name , datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate ) )
lowercase = feature_extractor.model_input_names[0]
def train_transforms(lowercase_ : int ):
lowercase = []
for audio in batch[data_args.audio_column_name]:
lowercase = random_subsample(
audio["""array"""] , max_length=data_args.max_length_seconds , sample_rate=feature_extractor.sampling_rate )
subsampled_wavs.append(lowercase_ )
lowercase = feature_extractor(lowercase_ , sampling_rate=feature_extractor.sampling_rate )
lowercase = {model_input_name: inputs.get(lowercase_ )}
lowercase = list(batch[data_args.label_column_name] )
return output_batch
def val_transforms(lowercase_ : Dict ):
lowercase = [audio["""array"""] for audio in batch[data_args.audio_column_name]]
lowercase = feature_extractor(lowercase_ , sampling_rate=feature_extractor.sampling_rate )
lowercase = {model_input_name: inputs.get(lowercase_ )}
lowercase = list(batch[data_args.label_column_name] )
return output_batch
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
lowercase = raw_datasets["""train"""].features[data_args.label_column_name].names
lowercase , lowercase = {}, {}
for i, label in enumerate(lowercase_ ):
lowercase = str(lowercase_ )
lowercase = label
# Load the accuracy metric from the datasets package
lowercase = evaluate.load("""accuracy""" )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with
# `predictions` and `label_ids` fields) and has to return a dictionary string to float.
def compute_metrics(lowercase_ : Tuple ):
lowercase = np.argmax(eval_pred.predictions , axis=1 )
return metric.compute(predictions=lowercase_ , references=eval_pred.label_ids )
lowercase = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path , num_labels=len(lowercase_ ) , labelaid=lowercase_ , idalabel=lowercase_ , finetuning_task="""audio-classification""" , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
lowercase = AutoModelForAudioClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=lowercase_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
# freeze the convolutional waveform encoder
if model_args.freeze_feature_encoder:
model.freeze_feature_encoder()
if training_args.do_train:
if data_args.max_train_samples is not None:
lowercase = (
raw_datasets["""train"""].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
raw_datasets["train"].set_transform(lowercase_ , output_all_columns=lowercase_ )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
lowercase = (
raw_datasets["""eval"""].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
raw_datasets["eval"].set_transform(lowercase_ , output_all_columns=lowercase_ )
# Initialize our trainer
lowercase = Trainer(
model=lowercase_ , args=lowercase_ , train_dataset=raw_datasets["""train"""] if training_args.do_train else None , eval_dataset=raw_datasets["""eval"""] if training_args.do_eval else None , compute_metrics=lowercase_ , tokenizer=lowercase_ , )
# Training
if training_args.do_train:
lowercase = None
if training_args.resume_from_checkpoint is not None:
lowercase = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowercase = last_checkpoint
lowercase = trainer.train(resume_from_checkpoint=lowercase_ )
trainer.save_model()
trainer.log_metrics("""train""" , train_result.metrics )
trainer.save_metrics("""train""" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
lowercase = trainer.evaluate()
trainer.log_metrics("""eval""" , lowercase_ )
trainer.save_metrics("""eval""" , lowercase_ )
# Write model card and (optionally) push to hub
lowercase = {
"""finetuned_from""": model_args.model_name_or_path,
"""tasks""": """audio-classification""",
"""dataset""": data_args.dataset_name,
"""tags""": ["""audio-classification"""],
}
if training_args.push_to_hub:
trainer.push_to_hub(**lowercase_ )
else:
trainer.create_model_card(**lowercase_ )
if __name__ == "__main__":
main()
| 653 | 0 |
'''simple docstring'''
from ...utils import is_torch_available, is_transformers_available
if is_transformers_available() and is_torch_available():
from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
| 706 |
'''simple docstring'''
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_tf_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_tf_available():
import tensorflow as tf
lowercase_ : Union[str, Any] = logging.get_logger(__name__)
@dataclass
class __UpperCamelCase (_UpperCAmelCase ):
__A = [
'''no_inference''',
'''no_cuda''',
'''no_tpu''',
'''no_speed''',
'''no_memory''',
'''no_env_print''',
'''no_multi_process''',
]
def __init__( self , **_lowerCAmelCase ) -> Optional[int]:
'''simple docstring'''
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
lowercase = deprecated_arg[3:]
lowercase = not kwargs.pop(_lowerCAmelCase )
logger.warning(
F"""{deprecated_arg} is depreciated. Please use --no-{positive_arg} or"""
F""" {positive_arg}={kwargs[positive_arg]}""" )
lowercase = kwargs.pop("""tpu_name""" , self.tpu_name )
lowercase = kwargs.pop("""device_idx""" , self.device_idx )
lowercase = kwargs.pop("""eager_mode""" , self.eager_mode )
lowercase = kwargs.pop("""use_xla""" , self.use_xla )
super().__init__(**_lowerCAmelCase )
__A = field(
default=_UpperCAmelCase , metadata={'''help''': '''Name of TPU'''} , )
__A = field(
default=0 , metadata={'''help''': '''CPU / GPU device index. Defaults to 0.'''} , )
__A = field(default=_UpperCAmelCase , metadata={'''help''': '''Benchmark models in eager model.'''} )
__A = field(
default=_UpperCAmelCase , metadata={
'''help''': '''Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`.'''
} , )
@cached_property
def _a ( self ) -> Tuple["tf.distribute.cluster_resolver.TPUClusterResolver"]:
'''simple docstring'''
requires_backends(self , ["""tf"""] )
lowercase = None
if self.tpu:
try:
if self.tpu_name:
lowercase = tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name )
else:
lowercase = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
lowercase = None
return tpu
@cached_property
def _a ( self ) -> Tuple["tf.distribute.Strategy", "tf.distribute.cluster_resolver.TPUClusterResolver"]:
'''simple docstring'''
requires_backends(self , ["""tf"""] )
if self.is_tpu:
tf.config.experimental_connect_to_cluster(self._setup_tpu )
tf.tpu.experimental.initialize_tpu_system(self._setup_tpu )
lowercase = tf.distribute.TPUStrategy(self._setup_tpu )
else:
# currently no multi gpu is allowed
if self.is_gpu:
# TODO: Currently only single GPU is supported
tf.config.set_visible_devices(self.gpu_list[self.device_idx] , """GPU""" )
lowercase = tf.distribute.OneDeviceStrategy(device=F"""/gpu:{self.device_idx}""" )
else:
tf.config.set_visible_devices([] , """GPU""" ) # disable GPU
lowercase = tf.distribute.OneDeviceStrategy(device=F"""/cpu:{self.device_idx}""" )
return strategy
@property
def _a ( self ) -> bool:
'''simple docstring'''
requires_backends(self , ["""tf"""] )
return self._setup_tpu is not None
@property
def _a ( self ) -> "tf.distribute.Strategy":
'''simple docstring'''
requires_backends(self , ["""tf"""] )
return self._setup_strategy
@property
def _a ( self ) -> Tuple:
'''simple docstring'''
requires_backends(self , ["""tf"""] )
return tf.config.list_physical_devices("""GPU""" )
@property
def _a ( self ) -> int:
'''simple docstring'''
requires_backends(self , ["""tf"""] )
if self.cuda:
return len(self.gpu_list )
return 0
@property
def _a ( self ) -> bool:
'''simple docstring'''
return self.n_gpu > 0
| 653 | 0 |
'''simple docstring'''
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import VideoMAEConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEModel,
)
from transformers.models.videomae.modeling_videomae import VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class __UpperCamelCase :
def __init__( self , _lowerCAmelCase , _lowerCAmelCase=13 , _lowerCAmelCase=10 , _lowerCAmelCase=3 , _lowerCAmelCase=2 , _lowerCAmelCase=2 , _lowerCAmelCase=2 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=32 , _lowerCAmelCase=5 , _lowerCAmelCase=4 , _lowerCAmelCase=37 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=10 , _lowerCAmelCase=0.02 , _lowerCAmelCase=0.9 , _lowerCAmelCase=None , ) -> List[Any]:
'''simple docstring'''
lowercase = parent
lowercase = batch_size
lowercase = image_size
lowercase = num_channels
lowercase = patch_size
lowercase = tubelet_size
lowercase = num_frames
lowercase = is_training
lowercase = use_labels
lowercase = hidden_size
lowercase = num_hidden_layers
lowercase = num_attention_heads
lowercase = intermediate_size
lowercase = hidden_act
lowercase = hidden_dropout_prob
lowercase = attention_probs_dropout_prob
lowercase = type_sequence_label_size
lowercase = initializer_range
lowercase = mask_ratio
lowercase = scope
# in VideoMAE, the number of tokens equals num_frames/tubelet_size * num_patches per frame
lowercase = (image_size // patch_size) ** 2
lowercase = (num_frames // tubelet_size) * self.num_patches_per_frame
# use this variable to define bool_masked_pos
lowercase = int(mask_ratio * self.seq_length )
def _a ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
lowercase = None
if self.use_labels:
lowercase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase = self.get_config()
return config, pixel_values, labels
def _a ( self ) -> List[str]:
'''simple docstring'''
return VideoMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , tubelet_size=self.tubelet_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_lowerCAmelCase , initializer_range=self.initializer_range , )
def _a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> str:
'''simple docstring'''
lowercase = VideoMAEModel(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
lowercase = model(_lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> List[Any]:
'''simple docstring'''
lowercase = VideoMAEForPreTraining(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
lowercase = torch.ones((self.num_masks,) )
lowercase = torch.cat([mask, torch.zeros(self.seq_length - mask.size(0 ) )] )
lowercase = mask.expand(self.batch_size , -1 ).bool()
lowercase = model(_lowerCAmelCase , _lowerCAmelCase )
# model only returns predictions for masked patches
lowercase = mask.sum().item()
lowercase = 3 * self.tubelet_size * self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_masked_patches, decoder_num_labels) )
def _a ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase = self.prepare_config_and_inputs()
lowercase , lowercase , lowercase = config_and_inputs
lowercase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class __UpperCamelCase (_UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
__A = (
(VideoMAEModel, VideoMAEForPreTraining, VideoMAEForVideoClassification) if is_torch_available() else ()
)
__A = (
{'''feature-extraction''': VideoMAEModel, '''video-classification''': VideoMAEForVideoClassification}
if is_torch_available()
else {}
)
__A = False
__A = False
__A = False
__A = False
def _a ( self ) -> Dict:
'''simple docstring'''
lowercase = VideoMAEModelTester(self )
lowercase = ConfigTester(self , config_class=_lowerCAmelCase , has_text_modality=_lowerCAmelCase , hidden_size=37 )
def _a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=False ) -> Tuple:
'''simple docstring'''
lowercase = copy.deepcopy(_lowerCAmelCase )
if model_class == VideoMAEForPreTraining:
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
lowercase = torch.ones((self.model_tester.num_masks,) )
lowercase = torch.cat([mask, torch.zeros(self.model_tester.seq_length - mask.size(0 ) )] )
lowercase = mask.expand(self.model_tester.batch_size , -1 ).bool()
lowercase = bool_masked_pos.to(_lowerCAmelCase )
if return_labels:
if model_class in [
*get_values(_lowerCAmelCase ),
]:
lowercase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_lowerCAmelCase )
return inputs_dict
def _a ( self ) -> List[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="""VideoMAE does not use inputs_embeds""" )
def _a ( self ) -> str:
'''simple docstring'''
pass
def _a ( self ) -> Dict:
'''simple docstring'''
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase = model_class(_lowerCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowercase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_lowerCAmelCase , nn.Linear ) )
def _a ( self ) -> List[str]:
'''simple docstring'''
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase = model_class(_lowerCAmelCase )
lowercase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase = [*signature.parameters.keys()]
lowercase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _lowerCAmelCase )
def _a ( self ) -> List[Any]:
'''simple docstring'''
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCAmelCase )
def _a ( self ) -> Dict:
'''simple docstring'''
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*_lowerCAmelCase )
@slow
def _a ( self ) -> int:
'''simple docstring'''
for model_name in VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase = VideoMAEModel.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
def _a ( self ) -> Optional[int]:
'''simple docstring'''
if not self.has_attentions:
pass
else:
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
lowercase = True
for model_class in self.all_model_classes:
lowercase = self.model_tester.seq_length - self.model_tester.num_masks
lowercase = (
num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
)
lowercase = True
lowercase = False
lowercase = True
lowercase = model_class(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
with torch.no_grad():
lowercase = model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) )
lowercase = outputs.attentions
self.assertEqual(len(_lowerCAmelCase ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowercase = True
lowercase = model_class(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
with torch.no_grad():
lowercase = model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) )
lowercase = outputs.attentions
self.assertEqual(len(_lowerCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
lowercase = len(_lowerCAmelCase )
# Check attention is always last and order is fine
lowercase = True
lowercase = True
lowercase = model_class(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
with torch.no_grad():
lowercase = model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) )
self.assertEqual(out_len + 1 , len(_lowerCAmelCase ) )
lowercase = outputs.attentions
self.assertEqual(len(_lowerCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def _a ( self ) -> Dict:
'''simple docstring'''
def check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
lowercase = model_class(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
with torch.no_grad():
lowercase = model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) )
lowercase = outputs.hidden_states
lowercase = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(_lowerCAmelCase ) , _lowerCAmelCase )
lowercase = self.model_tester.seq_length - self.model_tester.num_masks
lowercase = num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase = True
check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase = True
check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def _a ( self ) -> List[Any]:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( ):
lowercase = hf_hub_download(
repo_id="""hf-internal-testing/spaghetti-video""" , filename="""eating_spaghetti.npy""" , repo_type="""dataset""" )
lowercase = np.load(lowercase_ )
return list(lowercase_ )
@require_torch
@require_vision
class __UpperCamelCase (unittest.TestCase ):
@cached_property
def _a ( self ) -> str:
'''simple docstring'''
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def _a ( self ) -> Optional[int]:
'''simple docstring'''
lowercase = VideoMAEForVideoClassification.from_pretrained("""MCG-NJU/videomae-base-finetuned-kinetics""" ).to(
_lowerCAmelCase )
lowercase = self.default_image_processor
lowercase = prepare_video()
lowercase = image_processor(_lowerCAmelCase , return_tensors="""pt""" ).to(_lowerCAmelCase )
# forward pass
with torch.no_grad():
lowercase = model(**_lowerCAmelCase )
# verify the logits
lowercase = torch.Size((1, 400) )
self.assertEqual(outputs.logits.shape , _lowerCAmelCase )
lowercase = torch.tensor([0.3669, -0.0688, -0.2421] ).to(_lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _lowerCAmelCase , atol=1E-4 ) )
@slow
def _a ( self ) -> List[Any]:
'''simple docstring'''
lowercase = VideoMAEForPreTraining.from_pretrained("""MCG-NJU/videomae-base-short""" ).to(_lowerCAmelCase )
lowercase = self.default_image_processor
lowercase = prepare_video()
lowercase = image_processor(_lowerCAmelCase , return_tensors="""pt""" ).to(_lowerCAmelCase )
# add boolean mask, indicating which patches to mask
lowercase = hf_hub_download(repo_id="""hf-internal-testing/bool-masked-pos""" , filename="""bool_masked_pos.pt""" )
lowercase = torch.load(_lowerCAmelCase )
# forward pass
with torch.no_grad():
lowercase = model(**_lowerCAmelCase )
# verify the logits
lowercase = torch.Size([1, 1408, 1536] )
lowercase = torch.tensor(
[[0.7994, 0.9612, 0.8508], [0.7401, 0.8958, 0.8302], [0.5862, 0.7468, 0.7325]] , device=_lowerCAmelCase )
self.assertEqual(outputs.logits.shape , _lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , _lowerCAmelCase , atol=1E-4 ) )
# verify the loss (`config.norm_pix_loss` = `True`)
lowercase = torch.tensor([0.5142] , device=_lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.loss , _lowerCAmelCase , atol=1E-4 ) )
# verify the loss (`config.norm_pix_loss` = `False`)
lowercase = VideoMAEForPreTraining.from_pretrained("""MCG-NJU/videomae-base-short""" , norm_pix_loss=_lowerCAmelCase ).to(
_lowerCAmelCase )
with torch.no_grad():
lowercase = model(**_lowerCAmelCase )
lowercase = torch.tensor(torch.tensor([0.6469] ) , device=_lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.loss , _lowerCAmelCase , atol=1E-4 ) )
| 707 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ : Any = logging.get_logger(__name__)
lowercase_ : str = {
'''sayakpaul/vit-msn-base''': '''https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json''',
# See all ViT MSN models at https://huggingface.co/models?filter=vit_msn
}
class __UpperCamelCase (_UpperCAmelCase ):
__A = '''vit_msn'''
def __init__( self , _lowerCAmelCase=768 , _lowerCAmelCase=12 , _lowerCAmelCase=12 , _lowerCAmelCase=3072 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.02 , _lowerCAmelCase=1E-06 , _lowerCAmelCase=224 , _lowerCAmelCase=16 , _lowerCAmelCase=3 , _lowerCAmelCase=True , **_lowerCAmelCase , ) -> List[Any]:
'''simple docstring'''
super().__init__(**_lowerCAmelCase )
lowercase = hidden_size
lowercase = num_hidden_layers
lowercase = num_attention_heads
lowercase = intermediate_size
lowercase = hidden_act
lowercase = hidden_dropout_prob
lowercase = attention_probs_dropout_prob
lowercase = initializer_range
lowercase = layer_norm_eps
lowercase = image_size
lowercase = patch_size
lowercase = num_channels
lowercase = qkv_bias
| 653 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ : str = logging.get_logger(__name__)
lowercase_ : Optional[int] = {
'''weiweishi/roc-bert-base-zh''': '''https://huggingface.co/weiweishi/roc-bert-base-zh/resolve/main/config.json''',
}
class __UpperCamelCase (_UpperCAmelCase ):
__A = '''roc_bert'''
def __init__( self , _lowerCAmelCase=3_0522 , _lowerCAmelCase=768 , _lowerCAmelCase=12 , _lowerCAmelCase=12 , _lowerCAmelCase=3072 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=512 , _lowerCAmelCase=2 , _lowerCAmelCase=0.02 , _lowerCAmelCase=1E-12 , _lowerCAmelCase=True , _lowerCAmelCase=0 , _lowerCAmelCase="absolute" , _lowerCAmelCase=None , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=768 , _lowerCAmelCase=910 , _lowerCAmelCase=512 , _lowerCAmelCase=2_4858 , _lowerCAmelCase=True , **_lowerCAmelCase , ) -> List[str]:
'''simple docstring'''
lowercase = vocab_size
lowercase = max_position_embeddings
lowercase = hidden_size
lowercase = num_hidden_layers
lowercase = num_attention_heads
lowercase = intermediate_size
lowercase = hidden_act
lowercase = hidden_dropout_prob
lowercase = attention_probs_dropout_prob
lowercase = initializer_range
lowercase = type_vocab_size
lowercase = layer_norm_eps
lowercase = use_cache
lowercase = enable_pronunciation
lowercase = enable_shape
lowercase = pronunciation_embed_dim
lowercase = pronunciation_vocab_size
lowercase = shape_embed_dim
lowercase = shape_vocab_size
lowercase = concat_input
lowercase = position_embedding_type
lowercase = classifier_dropout
super().__init__(pad_token_id=_lowerCAmelCase , **_lowerCAmelCase )
| 708 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE ( lowercase_ : Union[str, Any] , lowercase_ : str ):
lowercase = """"""
for i in table:
res += inp[i - 1]
return res
def SCREAMING_SNAKE_CASE ( lowercase_ : List[Any] ):
return data[1:] + data[0]
def SCREAMING_SNAKE_CASE ( lowercase_ : Tuple , lowercase_ : Dict ):
lowercase = """"""
for i in range(len(lowercase_ ) ):
if a[i] == b[i]:
res += "0"
else:
res += "1"
return res
def SCREAMING_SNAKE_CASE ( lowercase_ : str , lowercase_ : str ):
lowercase = int("""0b""" + data[0] + data[-1] , 2 )
lowercase = int("""0b""" + data[1:3] , 2 )
return bin(s[row][col] )[2:]
def SCREAMING_SNAKE_CASE ( lowercase_ : Dict , lowercase_ : Tuple , lowercase_ : Tuple , lowercase_ : str , lowercase_ : Any ):
lowercase = message[:4]
lowercase = message[4:]
lowercase = apply_table(lowercase_ , lowercase_ )
lowercase = xor(lowercase_ , lowercase_ )
lowercase = apply_sbox(lowercase_ , temp[:4] ) # noqa: E741
lowercase = apply_sbox(lowercase_ , temp[4:] )
lowercase = """0""" * (2 - len(lowercase_ )) + l # noqa: E741
lowercase = """0""" * (2 - len(lowercase_ )) + r
lowercase = apply_table(l + r , lowercase_ )
lowercase = xor(lowercase_ , lowercase_ )
return temp + right
if __name__ == "__main__":
lowercase_ : Tuple = input('''Enter 10 bit key: ''')
lowercase_ : Any = input('''Enter 8 bit message: ''')
lowercase_ : Dict = [6, 3, 7, 4, 8, 5, 10, 9]
lowercase_ : str = [3, 5, 2, 7, 4, 10, 1, 9, 8, 6]
lowercase_ : List[Any] = [2, 4, 3, 1]
lowercase_ : List[str] = [2, 6, 3, 1, 4, 8, 5, 7]
lowercase_ : Tuple = [4, 1, 3, 5, 7, 2, 8, 6]
lowercase_ : Optional[Any] = [4, 1, 2, 3, 2, 3, 4, 1]
lowercase_ : List[str] = [[1, 0, 3, 2], [3, 2, 1, 0], [0, 2, 1, 3], [3, 1, 3, 2]]
lowercase_ : List[Any] = [[0, 1, 2, 3], [2, 0, 1, 3], [3, 0, 1, 0], [2, 1, 0, 3]]
# key generation
lowercase_ : Union[str, Any] = apply_table(key, paa_table)
lowercase_ : Optional[Any] = temp[:5]
lowercase_ : int = temp[5:]
lowercase_ : List[str] = left_shift(left)
lowercase_ : int = left_shift(right)
lowercase_ : Tuple = apply_table(left + right, pa_table)
lowercase_ : List[str] = left_shift(left)
lowercase_ : Optional[Any] = left_shift(right)
lowercase_ : Union[str, Any] = left_shift(left)
lowercase_ : Union[str, Any] = left_shift(right)
lowercase_ : Optional[int] = apply_table(left + right, pa_table)
# encryption
lowercase_ : int = apply_table(message, IP)
lowercase_ : Dict = function(expansion, sa, sa, keya, temp)
lowercase_ : Any = temp[4:] + temp[:4]
lowercase_ : List[Any] = function(expansion, sa, sa, keya, temp)
lowercase_ : Tuple = apply_table(temp, IP_inv)
print('''Cipher text is:''', CT)
# decryption
lowercase_ : List[str] = apply_table(CT, IP)
lowercase_ : Optional[int] = function(expansion, sa, sa, keya, temp)
lowercase_ : Optional[Any] = temp[4:] + temp[:4]
lowercase_ : Optional[int] = function(expansion, sa, sa, keya, temp)
lowercase_ : Optional[Any] = apply_table(temp, IP_inv)
print('''Plain text after decypting is:''', PT)
| 653 | 0 |
import warnings
from ...utils import logging
from .image_processing_videomae import VideoMAEImageProcessor
lowercase_ : Optional[Any] = logging.get_logger(__name__)
class __UpperCamelCase (_UpperCAmelCase ):
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> None:
'''simple docstring'''
warnings.warn(
"""The class VideoMAEFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use VideoMAEImageProcessor instead.""" , _lowerCAmelCase , )
super().__init__(*_lowerCAmelCase , **_lowerCAmelCase )
| 709 |
'''simple docstring'''
import json
import os
import tempfile
import transformers
import datasets
from utils import generate_example_dataset, get_duration
lowercase_ : int = 50_0000
lowercase_ , lowercase_ : Union[str, Any] = os.path.split(__file__)
lowercase_ : Optional[Any] = os.path.join(RESULTS_BASEPATH, '''results''', RESULTS_FILENAME.replace('''.py''', '''.json'''))
@get_duration
def SCREAMING_SNAKE_CASE ( lowercase_ : datasets.Dataset , **lowercase_ : Dict ):
lowercase = dataset.map(**lowercase_ )
@get_duration
def SCREAMING_SNAKE_CASE ( lowercase_ : datasets.Dataset , **lowercase_ : Optional[int] ):
lowercase = dataset.filter(**lowercase_ )
def SCREAMING_SNAKE_CASE ( ):
lowercase = {"""num examples""": SPEED_TEST_N_EXAMPLES}
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase = datasets.Features({"""text""": datasets.Value("""string""" ), """numbers""": datasets.Value("""float32""" )} )
lowercase = generate_example_dataset(
os.path.join(lowercase_ , """dataset.arrow""" ) , lowercase_ , num_examples=lowercase_ )
lowercase = transformers.AutoTokenizer.from_pretrained("""bert-base-cased""" , use_fast=lowercase_ )
def tokenize(lowercase_ : Dict ):
return tokenizer(examples["""text"""] )
lowercase = map(lowercase_ )
lowercase = map(lowercase_ , batched=lowercase_ )
lowercase = map(lowercase_ , function=lambda lowercase_ : None , batched=lowercase_ )
with dataset.formatted_as(type="""numpy""" ):
lowercase = map(lowercase_ , function=lambda lowercase_ : None , batched=lowercase_ )
with dataset.formatted_as(type="""pandas""" ):
lowercase = map(lowercase_ , function=lambda lowercase_ : None , batched=lowercase_ )
with dataset.formatted_as(type="""torch""" , columns="""numbers""" ):
lowercase = map(lowercase_ , function=lambda lowercase_ : None , batched=lowercase_ )
with dataset.formatted_as(type="""tensorflow""" , columns="""numbers""" ):
lowercase = map(lowercase_ , function=lambda lowercase_ : None , batched=lowercase_ )
lowercase = map(lowercase_ , function=lowercase_ , batched=lowercase_ )
lowercase = filter(lowercase_ )
# Activate later when tokenizer support batched inputs
# with dataset.formatted_as(type='numpy'):
# times[func.__name__ + " fast-tokenizer batched numpy"] = func(dataset, function=tokenize, batched=True)
with open(lowercase_ , """wb""" ) as f:
f.write(json.dumps(lowercase_ ).encode("""utf-8""" ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_map_filter()
| 653 | 0 |
'''simple docstring'''
import os
import zipfile
import requests
from get_ci_error_statistics import download_artifact, get_artifacts_links
def SCREAMING_SNAKE_CASE ( lowercase_ : Union[str, Any] , lowercase_ : Tuple=7 ):
lowercase = None
if token is not None:
lowercase = {"""Accept""": """application/vnd.github+json""", """Authorization""": F"""Bearer {token}"""}
# The id of a workflow (not of a workflow run)
lowercase = """636036"""
lowercase = F"""https://api.github.com/repos/huggingface/transformers/actions/workflows/{workflow_id}/runs"""
# On `main` branch + event being `schedule` + not returning PRs + only `num_runs` results
url += F"""?branch=main&event=schedule&exclude_pull_requests=true&per_page={num_runs}"""
lowercase = requests.get(lowercase_ , headers=lowercase_ ).json()
return result["workflow_runs"]
def SCREAMING_SNAKE_CASE ( lowercase_ : int ):
lowercase = get_daily_ci_runs(lowercase_ )
lowercase = None
for workflow_run in workflow_runs:
if workflow_run["status"] == "completed":
lowercase = workflow_run["""id"""]
break
return workflow_run_id
def SCREAMING_SNAKE_CASE ( lowercase_ : Any , lowercase_ : Optional[Any] , lowercase_ : List[str] ):
lowercase = get_last_daily_ci_runs(lowercase_ )
if workflow_run_id is not None:
lowercase = get_artifacts_links(worflow_run_id=lowercase_ , token=lowercase_ )
for artifact_name in artifact_names:
if artifact_name in artifacts_links:
lowercase = artifacts_links[artifact_name]
download_artifact(
artifact_name=lowercase_ , artifact_url=lowercase_ , output_dir=lowercase_ , token=lowercase_ )
def SCREAMING_SNAKE_CASE ( lowercase_ : str , lowercase_ : Optional[Any] , lowercase_ : str ):
get_last_daily_ci_artifacts(lowercase_ , lowercase_ , lowercase_ )
lowercase = {}
for artifact_name in artifact_names:
lowercase = os.path.join(lowercase_ , F"""{artifact_name}.zip""" )
if os.path.isfile(lowercase_ ):
lowercase = {}
with zipfile.ZipFile(lowercase_ ) as z:
for filename in z.namelist():
if not os.path.isdir(lowercase_ ):
# read the file
with z.open(lowercase_ ) as f:
lowercase = f.read().decode("""UTF-8""" )
return results
| 710 |
'''simple docstring'''
from random import shuffle
import tensorflow as tf
from numpy import array
def SCREAMING_SNAKE_CASE ( lowercase_ : List[str] , lowercase_ : Optional[int] ):
lowercase = int(lowercase_ )
assert noofclusters < len(lowercase_ )
# Find out the dimensionality
lowercase = len(vectors[0] )
# Will help select random centroids from among the available vectors
lowercase = list(range(len(lowercase_ ) ) )
shuffle(lowercase_ )
# GRAPH OF COMPUTATION
# We initialize a new graph and set it as the default during each run
# of this algorithm. This ensures that as this function is called
# multiple times, the default graph doesn't keep getting crowded with
# unused ops and Variables from previous function calls.
lowercase = tf.Graph()
with graph.as_default():
# SESSION OF COMPUTATION
lowercase = tf.Session()
##CONSTRUCTING THE ELEMENTS OF COMPUTATION
##First lets ensure we have a Variable vector for each centroid,
##initialized to one of the vectors from the available data points
lowercase = [
tf.Variable(vectors[vector_indices[i]] ) for i in range(lowercase_ )
]
##These nodes will assign the centroid Variables the appropriate
##values
lowercase = tf.placeholder("""float64""" , [dim] )
lowercase = []
for centroid in centroids:
cent_assigns.append(tf.assign(lowercase_ , lowercase_ ) )
##Variables for cluster assignments of individual vectors(initialized
##to 0 at first)
lowercase = [tf.Variable(0 ) for i in range(len(lowercase_ ) )]
##These nodes will assign an assignment Variable the appropriate
##value
lowercase = tf.placeholder("""int32""" )
lowercase = []
for assignment in assignments:
cluster_assigns.append(tf.assign(lowercase_ , lowercase_ ) )
##Now lets construct the node that will compute the mean
# The placeholder for the input
lowercase = tf.placeholder("""float""" , [None, dim] )
# The Node/op takes the input and computes a mean along the 0th
# dimension, i.e. the list of input vectors
lowercase = tf.reduce_mean(lowercase_ , 0 )
##Node for computing Euclidean distances
# Placeholders for input
lowercase = tf.placeholder("""float""" , [dim] )
lowercase = tf.placeholder("""float""" , [dim] )
lowercase = tf.sqrt(tf.reduce_sum(tf.pow(tf.sub(lowercase_ , lowercase_ ) , 2 ) ) )
##This node will figure out which cluster to assign a vector to,
##based on Euclidean distances of the vector from the centroids.
# Placeholder for input
lowercase = tf.placeholder("""float""" , [noofclusters] )
lowercase = tf.argmin(lowercase_ , 0 )
##INITIALIZING STATE VARIABLES
##This will help initialization of all Variables defined with respect
##to the graph. The Variable-initializer should be defined after
##all the Variables have been constructed, so that each of them
##will be included in the initialization.
lowercase = tf.initialize_all_variables()
# Initialize all variables
sess.run(lowercase_ )
##CLUSTERING ITERATIONS
# Now perform the Expectation-Maximization steps of K-Means clustering
# iterations. To keep things simple, we will only do a set number of
# iterations, instead of using a Stopping Criterion.
lowercase = 100
for _ in range(lowercase_ ):
##EXPECTATION STEP
##Based on the centroid locations till last iteration, compute
##the _expected_ centroid assignments.
# Iterate over each vector
for vector_n in range(len(lowercase_ ) ):
lowercase = vectors[vector_n]
# Compute Euclidean distance between this vector and each
# centroid. Remember that this list cannot be named
#'centroid_distances', since that is the input to the
# cluster assignment node.
lowercase = [
sess.run(lowercase_ , feed_dict={va: vect, va: sess.run(lowercase_ )} )
for centroid in centroids
]
# Now use the cluster assignment node, with the distances
# as the input
lowercase = sess.run(
lowercase_ , feed_dict={centroid_distances: distances} )
# Now assign the value to the appropriate state variable
sess.run(
cluster_assigns[vector_n] , feed_dict={assignment_value: assignment} )
##MAXIMIZATION STEP
# Based on the expected state computed from the Expectation Step,
# compute the locations of the centroids so as to maximize the
# overall objective of minimizing within-cluster Sum-of-Squares
for cluster_n in range(lowercase_ ):
# Collect all the vectors assigned to this cluster
lowercase = [
vectors[i]
for i in range(len(lowercase_ ) )
if sess.run(assignments[i] ) == cluster_n
]
# Compute new centroid location
lowercase = sess.run(
lowercase_ , feed_dict={mean_input: array(lowercase_ )} )
# Assign value to appropriate variable
sess.run(
cent_assigns[cluster_n] , feed_dict={centroid_value: new_location} )
# Return centroids and assignments
lowercase = sess.run(lowercase_ )
lowercase = sess.run(lowercase_ )
return centroids, assignments
| 653 | 0 |
'''simple docstring'''
import socket
def SCREAMING_SNAKE_CASE ( ):
lowercase = socket.socket(socket.AF_INET , socket.SOCK_STREAM )
lowercase = socket.gethostname()
lowercase = 1_2312
sock.connect((host, port) )
sock.send(B"""Hello server!""" )
with open("""Received_file""" , """wb""" ) as out_file:
print("""File opened""" )
print("""Receiving data...""" )
while True:
lowercase = sock.recv(1024 )
if not data:
break
out_file.write(lowercase_ )
print("""Successfully received the file""" )
sock.close()
print("""Connection closed""" )
if __name__ == "__main__":
main()
| 711 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE ( lowercase_ : int , lowercase_ : int , lowercase_ : list[list[int]] ):
def update_area_of_max_square(lowercase_ : int , lowercase_ : int ) -> int:
# BASE CASE
if row >= rows or col >= cols:
return 0
lowercase = update_area_of_max_square(lowercase_ , col + 1 )
lowercase = update_area_of_max_square(row + 1 , col + 1 )
lowercase = update_area_of_max_square(row + 1 , lowercase_ )
if mat[row][col]:
lowercase = 1 + min([right, diagonal, down] )
lowercase = max(largest_square_area[0] , lowercase_ )
return sub_problem_sol
else:
return 0
lowercase = [0]
update_area_of_max_square(0 , 0 )
return largest_square_area[0]
def SCREAMING_SNAKE_CASE ( lowercase_ : int , lowercase_ : int , lowercase_ : list[list[int]] ):
def update_area_of_max_square_using_dp_array(
lowercase_ : int , lowercase_ : int , lowercase_ : list[list[int]] ) -> int:
if row >= rows or col >= cols:
return 0
if dp_array[row][col] != -1:
return dp_array[row][col]
lowercase = update_area_of_max_square_using_dp_array(lowercase_ , col + 1 , lowercase_ )
lowercase = update_area_of_max_square_using_dp_array(row + 1 , col + 1 , lowercase_ )
lowercase = update_area_of_max_square_using_dp_array(row + 1 , lowercase_ , lowercase_ )
if mat[row][col]:
lowercase = 1 + min([right, diagonal, down] )
lowercase = max(largest_square_area[0] , lowercase_ )
lowercase = sub_problem_sol
return sub_problem_sol
else:
return 0
lowercase = [0]
lowercase = [[-1] * cols for _ in range(lowercase_ )]
update_area_of_max_square_using_dp_array(0 , 0 , lowercase_ )
return largest_square_area[0]
def SCREAMING_SNAKE_CASE ( lowercase_ : int , lowercase_ : int , lowercase_ : list[list[int]] ):
lowercase = [[0] * (cols + 1) for _ in range(rows + 1 )]
lowercase = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
lowercase = dp_array[row][col + 1]
lowercase = dp_array[row + 1][col + 1]
lowercase = dp_array[row + 1][col]
if mat[row][col] == 1:
lowercase = 1 + min(lowercase_ , lowercase_ , lowercase_ )
lowercase = max(dp_array[row][col] , lowercase_ )
else:
lowercase = 0
return largest_square_area
def SCREAMING_SNAKE_CASE ( lowercase_ : int , lowercase_ : int , lowercase_ : list[list[int]] ):
lowercase = [0] * (cols + 1)
lowercase = [0] * (cols + 1)
lowercase = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
lowercase = current_row[col + 1]
lowercase = next_row[col + 1]
lowercase = next_row[col]
if mat[row][col] == 1:
lowercase = 1 + min(lowercase_ , lowercase_ , lowercase_ )
lowercase = max(current_row[col] , lowercase_ )
else:
lowercase = 0
lowercase = current_row
return largest_square_area
if __name__ == "__main__":
import doctest
doctest.testmod()
print(largest_square_area_in_matrix_bottom_up(2, 2, [[1, 1], [1, 1]]))
| 653 | 0 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTConfig,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowercase_ : List[str] = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE ( lowercase_ : int ):
lowercase = MobileViTConfig()
# size of the architecture
if "mobilevit_s" in mobilevit_name:
lowercase = [144, 192, 240]
lowercase = [16, 32, 64, 96, 128, 160, 640]
elif "mobilevit_xs" in mobilevit_name:
lowercase = [96, 120, 144]
lowercase = [16, 32, 48, 64, 80, 96, 384]
elif "mobilevit_xxs" in mobilevit_name:
lowercase = [64, 80, 96]
lowercase = [16, 16, 24, 48, 64, 80, 320]
lowercase = 0.05
lowercase = 2.0
if mobilevit_name.startswith("""deeplabv3_""" ):
lowercase = 512
lowercase = 16
lowercase = 21
lowercase = """pascal-voc-id2label.json"""
else:
lowercase = 1000
lowercase = """imagenet-1k-id2label.json"""
lowercase = """huggingface/label-files"""
lowercase = json.load(open(hf_hub_download(lowercase_ , lowercase_ , repo_type="""dataset""" ) , """r""" ) )
lowercase = {int(lowercase_ ): v for k, v in idalabel.items()}
lowercase = idalabel
lowercase = {v: k for k, v in idalabel.items()}
return config
def SCREAMING_SNAKE_CASE ( lowercase_ : Any , lowercase_ : Any=False ):
for i in range(1 , 6 ):
if F"""layer_{i}.""" in name:
lowercase = name.replace(F"""layer_{i}.""" , F"""encoder.layer.{i - 1}.""" )
if "conv_1." in name:
lowercase = name.replace("""conv_1.""" , """conv_stem.""" )
if ".block." in name:
lowercase = name.replace(""".block.""" , """.""" )
if "exp_1x1" in name:
lowercase = name.replace("""exp_1x1""" , """expand_1x1""" )
if "red_1x1" in name:
lowercase = name.replace("""red_1x1""" , """reduce_1x1""" )
if ".local_rep.conv_3x3." in name:
lowercase = name.replace(""".local_rep.conv_3x3.""" , """.conv_kxk.""" )
if ".local_rep.conv_1x1." in name:
lowercase = name.replace(""".local_rep.conv_1x1.""" , """.conv_1x1.""" )
if ".norm." in name:
lowercase = name.replace(""".norm.""" , """.normalization.""" )
if ".conv." in name:
lowercase = name.replace(""".conv.""" , """.convolution.""" )
if ".conv_proj." in name:
lowercase = name.replace(""".conv_proj.""" , """.conv_projection.""" )
for i in range(0 , 2 ):
for j in range(0 , 4 ):
if F""".{i}.{j}.""" in name:
lowercase = name.replace(F""".{i}.{j}.""" , F""".{i}.layer.{j}.""" )
for i in range(2 , 6 ):
for j in range(0 , 4 ):
if F""".{i}.{j}.""" in name:
lowercase = name.replace(F""".{i}.{j}.""" , F""".{i}.""" )
if "expand_1x1" in name:
lowercase = name.replace("""expand_1x1""" , """downsampling_layer.expand_1x1""" )
if "conv_3x3" in name:
lowercase = name.replace("""conv_3x3""" , """downsampling_layer.conv_3x3""" )
if "reduce_1x1" in name:
lowercase = name.replace("""reduce_1x1""" , """downsampling_layer.reduce_1x1""" )
for i in range(2 , 5 ):
if F""".global_rep.{i}.weight""" in name:
lowercase = name.replace(F""".global_rep.{i}.weight""" , """.layernorm.weight""" )
if F""".global_rep.{i}.bias""" in name:
lowercase = name.replace(F""".global_rep.{i}.bias""" , """.layernorm.bias""" )
if ".global_rep." in name:
lowercase = name.replace(""".global_rep.""" , """.transformer.""" )
if ".pre_norm_mha.0." in name:
lowercase = name.replace(""".pre_norm_mha.0.""" , """.layernorm_before.""" )
if ".pre_norm_mha.1.out_proj." in name:
lowercase = name.replace(""".pre_norm_mha.1.out_proj.""" , """.attention.output.dense.""" )
if ".pre_norm_ffn.0." in name:
lowercase = name.replace(""".pre_norm_ffn.0.""" , """.layernorm_after.""" )
if ".pre_norm_ffn.1." in name:
lowercase = name.replace(""".pre_norm_ffn.1.""" , """.intermediate.dense.""" )
if ".pre_norm_ffn.4." in name:
lowercase = name.replace(""".pre_norm_ffn.4.""" , """.output.dense.""" )
if ".transformer." in name:
lowercase = name.replace(""".transformer.""" , """.transformer.layer.""" )
if ".aspp_layer." in name:
lowercase = name.replace(""".aspp_layer.""" , """.""" )
if ".aspp_pool." in name:
lowercase = name.replace(""".aspp_pool.""" , """.""" )
if "seg_head." in name:
lowercase = name.replace("""seg_head.""" , """segmentation_head.""" )
if "segmentation_head.classifier.classifier." in name:
lowercase = name.replace("""segmentation_head.classifier.classifier.""" , """segmentation_head.classifier.""" )
if "classifier.fc." in name:
lowercase = name.replace("""classifier.fc.""" , """classifier.""" )
elif (not base_model) and ("segmentation_head." not in name):
lowercase = """mobilevit.""" + name
return name
def SCREAMING_SNAKE_CASE ( lowercase_ : Optional[Any] , lowercase_ : List[Any] , lowercase_ : str=False ):
if base_model:
lowercase = """"""
else:
lowercase = """mobilevit."""
for key in orig_state_dict.copy().keys():
lowercase = orig_state_dict.pop(lowercase_ )
if key[:8] == "encoder.":
lowercase = key[8:]
if "qkv" in key:
lowercase = key.split(""".""" )
lowercase = int(key_split[0][6:] ) - 1
lowercase = int(key_split[3] )
lowercase = model.get_submodule(F"""{model_prefix}encoder.layer.{layer_num}""" )
lowercase = layer.transformer.layer[transformer_num].attention.attention.all_head_size
lowercase = (
F"""{model_prefix}encoder.layer.{layer_num}.transformer.layer.{transformer_num}.attention.attention."""
)
if "weight" in key:
lowercase = val[:dim, :]
lowercase = val[dim : dim * 2, :]
lowercase = val[-dim:, :]
else:
lowercase = val[:dim]
lowercase = val[dim : dim * 2]
lowercase = val[-dim:]
else:
lowercase = val
return orig_state_dict
def SCREAMING_SNAKE_CASE ( ):
lowercase = """http://images.cocodataset.org/val2017/000000039769.jpg"""
lowercase = Image.open(requests.get(lowercase_ , stream=lowercase_ ).raw )
return im
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( lowercase_ : Dict , lowercase_ : List[Any] , lowercase_ : Any , lowercase_ : List[str]=False ):
lowercase = get_mobilevit_config(lowercase_ )
# load original state_dict
lowercase = torch.load(lowercase_ , map_location="""cpu""" )
# load 🤗 model
if mobilevit_name.startswith("""deeplabv3_""" ):
lowercase = MobileViTForSemanticSegmentation(lowercase_ ).eval()
else:
lowercase = MobileViTForImageClassification(lowercase_ ).eval()
lowercase = convert_state_dict(lowercase_ , lowercase_ )
model.load_state_dict(lowercase_ )
# Check outputs on an image, prepared by MobileViTImageProcessor
lowercase = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 )
lowercase = image_processor(images=prepare_img() , return_tensors="""pt""" )
lowercase = model(**lowercase_ )
lowercase = outputs.logits
if mobilevit_name.startswith("""deeplabv3_""" ):
assert logits.shape == (1, 21, 32, 32)
if mobilevit_name == "deeplabv3_mobilevit_s":
lowercase = torch.tensor(
[
[[6.2_065, 6.1_292, 6.2_070], [6.1_079, 6.1_254, 6.1_747], [6.0_042, 6.1_071, 6.1_034]],
[[-6.9_253, -6.8_653, -7.0_398], [-7.3_218, -7.3_983, -7.3_670], [-7.1_961, -7.2_482, -7.1_569]],
[[-4.4_723, -4.4_348, -4.3_769], [-5.3_629, -5.4_632, -5.4_598], [-5.1_587, -5.3_402, -5.5_059]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xs":
lowercase = torch.tensor(
[
[[5.4_449, 5.5_733, 5.6_314], [5.1_815, 5.3_930, 5.5_963], [5.1_656, 5.4_333, 5.4_853]],
[[-9.4_423, -9.7_766, -9.6_714], [-9.1_581, -9.5_720, -9.5_519], [-9.1_006, -9.6_458, -9.5_703]],
[[-7.7_721, -7.3_716, -7.1_583], [-8.4_599, -8.0_624, -7.7_944], [-8.4_172, -7.8_366, -7.5_025]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xxs":
lowercase = torch.tensor(
[
[[6.9_811, 6.9_743, 7.3_123], [7.1_777, 7.1_931, 7.3_938], [7.5_633, 7.8_050, 7.8_901]],
[[-10.5_536, -10.2_332, -10.2_924], [-10.2_336, -9.8_624, -9.5_964], [-10.8_840, -10.8_158, -10.6_659]],
[[-3.4_938, -3.0_631, -2.8_620], [-3.4_205, -2.8_135, -2.6_875], [-3.4_179, -2.7_945, -2.8_750]],
] )
else:
raise ValueError(F"""Unknown mobilevit_name: {mobilevit_name}""" )
assert torch.allclose(logits[0, :3, :3, :3] , lowercase_ , atol=1E-4 )
else:
assert logits.shape == (1, 1000)
if mobilevit_name == "mobilevit_s":
lowercase = torch.tensor([-0.9_866, 0.2_392, -1.1_241] )
elif mobilevit_name == "mobilevit_xs":
lowercase = torch.tensor([-2.4_761, -0.9_399, -1.9_587] )
elif mobilevit_name == "mobilevit_xxs":
lowercase = torch.tensor([-1.9_364, -1.2_327, -0.4_653] )
else:
raise ValueError(F"""Unknown mobilevit_name: {mobilevit_name}""" )
assert torch.allclose(logits[0, :3] , lowercase_ , atol=1E-4 )
Path(lowercase_ ).mkdir(exist_ok=lowercase_ )
print(F"""Saving model {mobilevit_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowercase_ )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(lowercase_ )
if push_to_hub:
lowercase = {
"""mobilevit_s""": """mobilevit-small""",
"""mobilevit_xs""": """mobilevit-x-small""",
"""mobilevit_xxs""": """mobilevit-xx-small""",
"""deeplabv3_mobilevit_s""": """deeplabv3-mobilevit-small""",
"""deeplabv3_mobilevit_xs""": """deeplabv3-mobilevit-x-small""",
"""deeplabv3_mobilevit_xxs""": """deeplabv3-mobilevit-xx-small""",
}
print("""Pushing to the hub...""" )
lowercase = model_mapping[mobilevit_name]
image_processor.push_to_hub(lowercase_ , organization="""apple""" )
model.push_to_hub(lowercase_ , organization="""apple""" )
if __name__ == "__main__":
lowercase_ : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--mobilevit_name''',
default='''mobilevit_s''',
type=str,
help=(
'''Name of the MobileViT model you\'d like to convert. Should be one of \'mobilevit_s\', \'mobilevit_xs\','''
''' \'mobilevit_xxs\', \'deeplabv3_mobilevit_s\', \'deeplabv3_mobilevit_xs\', \'deeplabv3_mobilevit_xxs\'.'''
),
)
parser.add_argument(
'''--checkpoint_path''', required=True, type=str, help='''Path to the original state dict (.pt file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
lowercase_ : List[str] = parser.parse_args()
convert_movilevit_checkpoint(
args.mobilevit_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 712 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ : Optional[Any] = logging.get_logger(__name__)
lowercase_ : int = {
'''bigcode/gpt_bigcode-santacoder''': '''https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json''',
}
class __UpperCamelCase (_UpperCAmelCase ):
__A = '''gpt_bigcode'''
__A = ['''past_key_values''']
__A = {
'''hidden_size''': '''n_embd''',
'''max_position_embeddings''': '''n_positions''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self , _lowerCAmelCase=5_0257 , _lowerCAmelCase=1024 , _lowerCAmelCase=768 , _lowerCAmelCase=12 , _lowerCAmelCase=12 , _lowerCAmelCase=None , _lowerCAmelCase="gelu_pytorch_tanh" , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=1E-5 , _lowerCAmelCase=0.02 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=5_0256 , _lowerCAmelCase=5_0256 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , **_lowerCAmelCase , ) -> Optional[int]:
'''simple docstring'''
lowercase = vocab_size
lowercase = n_positions
lowercase = n_embd
lowercase = n_layer
lowercase = n_head
lowercase = n_inner
lowercase = activation_function
lowercase = resid_pdrop
lowercase = embd_pdrop
lowercase = attn_pdrop
lowercase = layer_norm_epsilon
lowercase = initializer_range
lowercase = scale_attn_weights
lowercase = use_cache
lowercase = attention_softmax_in_fpaa
lowercase = scale_attention_softmax_in_fpaa
lowercase = multi_query
lowercase = bos_token_id
lowercase = eos_token_id
super().__init__(bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , **_lowerCAmelCase )
| 653 | 0 |
'''simple docstring'''
import os
import re
import warnings
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
if TYPE_CHECKING:
from ...tokenization_utils_base import TextInput
from ...utils import logging
lowercase_ : int = logging.get_logger(__name__)
lowercase_ : Tuple = {'''vocab_file''': '''spiece.model'''}
lowercase_ : Any = {
'''vocab_file''': {
'''t5-small''': '''https://huggingface.co/t5-small/resolve/main/spiece.model''',
'''t5-base''': '''https://huggingface.co/t5-base/resolve/main/spiece.model''',
'''t5-large''': '''https://huggingface.co/t5-large/resolve/main/spiece.model''',
'''t5-3b''': '''https://huggingface.co/t5-3b/resolve/main/spiece.model''',
'''t5-11b''': '''https://huggingface.co/t5-11b/resolve/main/spiece.model''',
}
}
# TODO(PVP) - this should be removed in Transformers v5
lowercase_ : Optional[Any] = {
'''t5-small''': 512,
'''t5-base''': 512,
'''t5-large''': 512,
'''t5-3b''': 512,
'''t5-11b''': 512,
}
lowercase_ : int = '''▁'''
class __UpperCamelCase (_UpperCAmelCase ):
__A = VOCAB_FILES_NAMES
__A = PRETRAINED_VOCAB_FILES_MAP
__A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__A = ['''input_ids''', '''attention_mask''']
def __init__( self , _lowerCAmelCase , _lowerCAmelCase="</s>" , _lowerCAmelCase="<unk>" , _lowerCAmelCase="<pad>" , _lowerCAmelCase=100 , _lowerCAmelCase=None , _lowerCAmelCase = None , _lowerCAmelCase=True , **_lowerCAmelCase , ) -> None:
'''simple docstring'''
if extra_ids > 0 and additional_special_tokens is None:
lowercase = [F"""<extra_id_{i}>""" for i in range(_lowerCAmelCase )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
lowercase = len(set(filter(lambda _lowerCAmelCase : bool("""extra_id""" in str(_lowerCAmelCase ) ) , _lowerCAmelCase ) ) )
if extra_tokens != extra_ids:
raise ValueError(
F"""Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are"""
""" provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids"""
""" tokens""" )
if legacy:
logger.warning_once(
F"""You are using the legacy behaviour of the {self.__class__}. This means that tokens that come after special tokens will not be properly handled. We recommend you to"""
""" read the related pull request available at https://github.com/huggingface/transformers/pull/24565""" )
lowercase = legacy
lowercase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=_lowerCAmelCase , unk_token=_lowerCAmelCase , pad_token=_lowerCAmelCase , extra_ids=_lowerCAmelCase , additional_special_tokens=_lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , legacy=_lowerCAmelCase , **_lowerCAmelCase , )
lowercase = vocab_file
lowercase = extra_ids
lowercase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_lowerCAmelCase )
@staticmethod
def _a ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Dict:
'''simple docstring'''
if pretrained_model_name_or_path in TaTokenizer.max_model_input_sizes:
lowercase = TaTokenizer.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
"""This tokenizer was incorrectly instantiated with a model max length of"""
F""" {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this"""
""" behavior is kept to avoid breaking backwards compatibility when padding/encoding with"""
""" `truncation is True`.\n- Be aware that you SHOULD NOT rely on"""
F""" {pretrained_model_name_or_path} automatically truncating your input to"""
F""" {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences"""
F""" longer than {deprecated_max_model_length} you can either instantiate this tokenizer with"""
""" `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please"""
""" instantiate this tokenizer with `model_max_length` set to your preferred value.""" , _lowerCAmelCase , )
return max_model_length
@property
def _a ( self ) -> Union[str, Any]:
'''simple docstring'''
return self.sp_model.get_piece_size() + self._extra_ids
def _a ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase = {self.convert_ids_to_tokens(_lowerCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _a ( self , _lowerCAmelCase , _lowerCAmelCase = None , _lowerCAmelCase = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowerCAmelCase , token_ids_a=_lowerCAmelCase , already_has_special_tokens=_lowerCAmelCase )
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(_lowerCAmelCase )) + [1]
return ([0] * len(_lowerCAmelCase )) + [1] + ([0] * len(_lowerCAmelCase )) + [1]
def _a ( self ) -> List[Any]:
'''simple docstring'''
return list(
set(filter(lambda _lowerCAmelCase : bool(re.search(r"""<extra_id_\d+>""" , _lowerCAmelCase ) ) is not None , self.additional_special_tokens ) ) )
def _a ( self ) -> List[str]:
'''simple docstring'''
return [self._convert_token_to_id(_lowerCAmelCase ) for token in self.get_sentinel_tokens()]
def _a ( self , _lowerCAmelCase ) -> List[int]:
'''simple docstring'''
if len(_lowerCAmelCase ) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
F"""This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated"""
""" eos tokens being added.""" )
return token_ids
else:
return token_ids + [self.eos_token_id]
def _a ( self , _lowerCAmelCase , _lowerCAmelCase = None ) -> List[int]:
'''simple docstring'''
lowercase = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def _a ( self , _lowerCAmelCase , _lowerCAmelCase = None ) -> List[int]:
'''simple docstring'''
lowercase = self._add_eos_if_not_present(_lowerCAmelCase )
if token_ids_a is None:
return token_ids_a
else:
lowercase = self._add_eos_if_not_present(_lowerCAmelCase )
return token_ids_a + token_ids_a
def __getstate__( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase = self.__dict__.copy()
lowercase = None
return state
def __setstate__( self , _lowerCAmelCase ) -> str:
'''simple docstring'''
lowercase = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
lowercase = {}
lowercase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _a ( self , _lowerCAmelCase , **_lowerCAmelCase ) -> List[str]:
'''simple docstring'''
if not self.legacy:
lowercase = SPIECE_UNDERLINE + text.replace(_lowerCAmelCase , """ """ )
return super().tokenize(_lowerCAmelCase , **_lowerCAmelCase )
def _a ( self , _lowerCAmelCase , **_lowerCAmelCase ) -> List[Any]:
'''simple docstring'''
if not self.legacy:
lowercase = text.startswith(_lowerCAmelCase )
if is_first:
lowercase = text[1:]
lowercase = self.sp_model.encode(_lowerCAmelCase , out_type=_lowerCAmelCase )
if not self.legacy and not is_first and not text.startswith(""" """ ) and tokens[0].startswith(_lowerCAmelCase ):
lowercase = ([tokens[0][1:]] if len(tokens[0] ) > 1 else []) + tokens[1:]
return tokens
def _a ( self , _lowerCAmelCase ) -> int:
'''simple docstring'''
if token.startswith("""<extra_id_""" ):
lowercase = re.match(r"""<extra_id_(\d+)>""" , _lowerCAmelCase )
lowercase = int(match.group(1 ) )
return self.vocab_size - num - 1
return self.sp_model.piece_to_id(_lowerCAmelCase )
def _a ( self , _lowerCAmelCase ) -> Optional[int]:
'''simple docstring'''
if index < self.sp_model.get_piece_size():
lowercase = self.sp_model.IdToPiece(_lowerCAmelCase )
else:
lowercase = F"""<extra_id_{self.vocab_size - 1 - index}>"""
return token
def _a ( self , _lowerCAmelCase ) -> List[str]:
'''simple docstring'''
lowercase = []
lowercase = """"""
lowercase = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(_lowerCAmelCase ) + token
lowercase = True
lowercase = []
else:
current_sub_tokens.append(_lowerCAmelCase )
lowercase = False
out_string += self.sp_model.decode(_lowerCAmelCase )
return out_string.strip()
def _a ( self , _lowerCAmelCase , _lowerCAmelCase = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(_lowerCAmelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowercase = os.path.join(
_lowerCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _lowerCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_lowerCAmelCase , """wb""" ) as fi:
lowercase = self.sp_model.serialized_model_proto()
fi.write(_lowerCAmelCase )
return (out_vocab_file,)
| 713 |
'''simple docstring'''
import requests
def SCREAMING_SNAKE_CASE ( lowercase_ : str , lowercase_ : str ):
lowercase = {"""Content-Type""": """application/json"""}
lowercase = requests.post(lowercase_ , json={"""text""": message_body} , headers=lowercase_ )
if response.status_code != 200:
lowercase = (
"""Request to slack returned an error """
F"""{response.status_code}, the response is:\n{response.text}"""
)
raise ValueError(lowercase_ )
if __name__ == "__main__":
# Set the slack url to the one provided by Slack when you create the webhook at
# https://my.slack.com/services/new/incoming-webhook/
send_slack_message('''<YOUR MESSAGE BODY>''', '''<SLACK CHANNEL URL>''')
| 653 | 0 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE ( lowercase_ : int , lowercase_ : list[int] , lowercase_ : int ):
def count_of_possible_combinations(lowercase_ : int ) -> int:
if target < 0:
return 0
if target == 0:
return 1
return sum(count_of_possible_combinations(target - item ) for item in array )
return count_of_possible_combinations(lowercase_ )
def SCREAMING_SNAKE_CASE ( lowercase_ : int , lowercase_ : list[int] , lowercase_ : int ):
def count_of_possible_combinations_with_dp_array(
lowercase_ : int , lowercase_ : list[int] ) -> int:
if target < 0:
return 0
if target == 0:
return 1
if dp_array[target] != -1:
return dp_array[target]
lowercase = sum(
count_of_possible_combinations_with_dp_array(target - item , lowercase_ )
for item in array )
lowercase = answer
return answer
lowercase = [-1] * (target + 1)
return count_of_possible_combinations_with_dp_array(lowercase_ , lowercase_ )
def SCREAMING_SNAKE_CASE ( lowercase_ : int , lowercase_ : list[int] , lowercase_ : int ):
lowercase = [0] * (target + 1)
lowercase = 1
for i in range(1 , target + 1 ):
for j in range(lowercase_ ):
if i - array[j] >= 0:
dp_array[i] += dp_array[i - array[j]]
return dp_array[target]
if __name__ == "__main__":
import doctest
doctest.testmod()
lowercase_ : Tuple = 3
lowercase_ : Any = 5
lowercase_ : Dict = [1, 2, 5]
print(combination_sum_iv(n, array, target))
| 714 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTConfig,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowercase_ : List[str] = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE ( lowercase_ : int ):
lowercase = MobileViTConfig()
# size of the architecture
if "mobilevit_s" in mobilevit_name:
lowercase = [144, 192, 240]
lowercase = [16, 32, 64, 96, 128, 160, 640]
elif "mobilevit_xs" in mobilevit_name:
lowercase = [96, 120, 144]
lowercase = [16, 32, 48, 64, 80, 96, 384]
elif "mobilevit_xxs" in mobilevit_name:
lowercase = [64, 80, 96]
lowercase = [16, 16, 24, 48, 64, 80, 320]
lowercase = 0.05
lowercase = 2.0
if mobilevit_name.startswith("""deeplabv3_""" ):
lowercase = 512
lowercase = 16
lowercase = 21
lowercase = """pascal-voc-id2label.json"""
else:
lowercase = 1000
lowercase = """imagenet-1k-id2label.json"""
lowercase = """huggingface/label-files"""
lowercase = json.load(open(hf_hub_download(lowercase_ , lowercase_ , repo_type="""dataset""" ) , """r""" ) )
lowercase = {int(lowercase_ ): v for k, v in idalabel.items()}
lowercase = idalabel
lowercase = {v: k for k, v in idalabel.items()}
return config
def SCREAMING_SNAKE_CASE ( lowercase_ : Any , lowercase_ : Any=False ):
for i in range(1 , 6 ):
if F"""layer_{i}.""" in name:
lowercase = name.replace(F"""layer_{i}.""" , F"""encoder.layer.{i - 1}.""" )
if "conv_1." in name:
lowercase = name.replace("""conv_1.""" , """conv_stem.""" )
if ".block." in name:
lowercase = name.replace(""".block.""" , """.""" )
if "exp_1x1" in name:
lowercase = name.replace("""exp_1x1""" , """expand_1x1""" )
if "red_1x1" in name:
lowercase = name.replace("""red_1x1""" , """reduce_1x1""" )
if ".local_rep.conv_3x3." in name:
lowercase = name.replace(""".local_rep.conv_3x3.""" , """.conv_kxk.""" )
if ".local_rep.conv_1x1." in name:
lowercase = name.replace(""".local_rep.conv_1x1.""" , """.conv_1x1.""" )
if ".norm." in name:
lowercase = name.replace(""".norm.""" , """.normalization.""" )
if ".conv." in name:
lowercase = name.replace(""".conv.""" , """.convolution.""" )
if ".conv_proj." in name:
lowercase = name.replace(""".conv_proj.""" , """.conv_projection.""" )
for i in range(0 , 2 ):
for j in range(0 , 4 ):
if F""".{i}.{j}.""" in name:
lowercase = name.replace(F""".{i}.{j}.""" , F""".{i}.layer.{j}.""" )
for i in range(2 , 6 ):
for j in range(0 , 4 ):
if F""".{i}.{j}.""" in name:
lowercase = name.replace(F""".{i}.{j}.""" , F""".{i}.""" )
if "expand_1x1" in name:
lowercase = name.replace("""expand_1x1""" , """downsampling_layer.expand_1x1""" )
if "conv_3x3" in name:
lowercase = name.replace("""conv_3x3""" , """downsampling_layer.conv_3x3""" )
if "reduce_1x1" in name:
lowercase = name.replace("""reduce_1x1""" , """downsampling_layer.reduce_1x1""" )
for i in range(2 , 5 ):
if F""".global_rep.{i}.weight""" in name:
lowercase = name.replace(F""".global_rep.{i}.weight""" , """.layernorm.weight""" )
if F""".global_rep.{i}.bias""" in name:
lowercase = name.replace(F""".global_rep.{i}.bias""" , """.layernorm.bias""" )
if ".global_rep." in name:
lowercase = name.replace(""".global_rep.""" , """.transformer.""" )
if ".pre_norm_mha.0." in name:
lowercase = name.replace(""".pre_norm_mha.0.""" , """.layernorm_before.""" )
if ".pre_norm_mha.1.out_proj." in name:
lowercase = name.replace(""".pre_norm_mha.1.out_proj.""" , """.attention.output.dense.""" )
if ".pre_norm_ffn.0." in name:
lowercase = name.replace(""".pre_norm_ffn.0.""" , """.layernorm_after.""" )
if ".pre_norm_ffn.1." in name:
lowercase = name.replace(""".pre_norm_ffn.1.""" , """.intermediate.dense.""" )
if ".pre_norm_ffn.4." in name:
lowercase = name.replace(""".pre_norm_ffn.4.""" , """.output.dense.""" )
if ".transformer." in name:
lowercase = name.replace(""".transformer.""" , """.transformer.layer.""" )
if ".aspp_layer." in name:
lowercase = name.replace(""".aspp_layer.""" , """.""" )
if ".aspp_pool." in name:
lowercase = name.replace(""".aspp_pool.""" , """.""" )
if "seg_head." in name:
lowercase = name.replace("""seg_head.""" , """segmentation_head.""" )
if "segmentation_head.classifier.classifier." in name:
lowercase = name.replace("""segmentation_head.classifier.classifier.""" , """segmentation_head.classifier.""" )
if "classifier.fc." in name:
lowercase = name.replace("""classifier.fc.""" , """classifier.""" )
elif (not base_model) and ("segmentation_head." not in name):
lowercase = """mobilevit.""" + name
return name
def SCREAMING_SNAKE_CASE ( lowercase_ : Optional[Any] , lowercase_ : List[Any] , lowercase_ : str=False ):
if base_model:
lowercase = """"""
else:
lowercase = """mobilevit."""
for key in orig_state_dict.copy().keys():
lowercase = orig_state_dict.pop(lowercase_ )
if key[:8] == "encoder.":
lowercase = key[8:]
if "qkv" in key:
lowercase = key.split(""".""" )
lowercase = int(key_split[0][6:] ) - 1
lowercase = int(key_split[3] )
lowercase = model.get_submodule(F"""{model_prefix}encoder.layer.{layer_num}""" )
lowercase = layer.transformer.layer[transformer_num].attention.attention.all_head_size
lowercase = (
F"""{model_prefix}encoder.layer.{layer_num}.transformer.layer.{transformer_num}.attention.attention."""
)
if "weight" in key:
lowercase = val[:dim, :]
lowercase = val[dim : dim * 2, :]
lowercase = val[-dim:, :]
else:
lowercase = val[:dim]
lowercase = val[dim : dim * 2]
lowercase = val[-dim:]
else:
lowercase = val
return orig_state_dict
def SCREAMING_SNAKE_CASE ( ):
lowercase = """http://images.cocodataset.org/val2017/000000039769.jpg"""
lowercase = Image.open(requests.get(lowercase_ , stream=lowercase_ ).raw )
return im
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( lowercase_ : Dict , lowercase_ : List[Any] , lowercase_ : Any , lowercase_ : List[str]=False ):
lowercase = get_mobilevit_config(lowercase_ )
# load original state_dict
lowercase = torch.load(lowercase_ , map_location="""cpu""" )
# load 🤗 model
if mobilevit_name.startswith("""deeplabv3_""" ):
lowercase = MobileViTForSemanticSegmentation(lowercase_ ).eval()
else:
lowercase = MobileViTForImageClassification(lowercase_ ).eval()
lowercase = convert_state_dict(lowercase_ , lowercase_ )
model.load_state_dict(lowercase_ )
# Check outputs on an image, prepared by MobileViTImageProcessor
lowercase = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 )
lowercase = image_processor(images=prepare_img() , return_tensors="""pt""" )
lowercase = model(**lowercase_ )
lowercase = outputs.logits
if mobilevit_name.startswith("""deeplabv3_""" ):
assert logits.shape == (1, 21, 32, 32)
if mobilevit_name == "deeplabv3_mobilevit_s":
lowercase = torch.tensor(
[
[[6.2_065, 6.1_292, 6.2_070], [6.1_079, 6.1_254, 6.1_747], [6.0_042, 6.1_071, 6.1_034]],
[[-6.9_253, -6.8_653, -7.0_398], [-7.3_218, -7.3_983, -7.3_670], [-7.1_961, -7.2_482, -7.1_569]],
[[-4.4_723, -4.4_348, -4.3_769], [-5.3_629, -5.4_632, -5.4_598], [-5.1_587, -5.3_402, -5.5_059]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xs":
lowercase = torch.tensor(
[
[[5.4_449, 5.5_733, 5.6_314], [5.1_815, 5.3_930, 5.5_963], [5.1_656, 5.4_333, 5.4_853]],
[[-9.4_423, -9.7_766, -9.6_714], [-9.1_581, -9.5_720, -9.5_519], [-9.1_006, -9.6_458, -9.5_703]],
[[-7.7_721, -7.3_716, -7.1_583], [-8.4_599, -8.0_624, -7.7_944], [-8.4_172, -7.8_366, -7.5_025]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xxs":
lowercase = torch.tensor(
[
[[6.9_811, 6.9_743, 7.3_123], [7.1_777, 7.1_931, 7.3_938], [7.5_633, 7.8_050, 7.8_901]],
[[-10.5_536, -10.2_332, -10.2_924], [-10.2_336, -9.8_624, -9.5_964], [-10.8_840, -10.8_158, -10.6_659]],
[[-3.4_938, -3.0_631, -2.8_620], [-3.4_205, -2.8_135, -2.6_875], [-3.4_179, -2.7_945, -2.8_750]],
] )
else:
raise ValueError(F"""Unknown mobilevit_name: {mobilevit_name}""" )
assert torch.allclose(logits[0, :3, :3, :3] , lowercase_ , atol=1E-4 )
else:
assert logits.shape == (1, 1000)
if mobilevit_name == "mobilevit_s":
lowercase = torch.tensor([-0.9_866, 0.2_392, -1.1_241] )
elif mobilevit_name == "mobilevit_xs":
lowercase = torch.tensor([-2.4_761, -0.9_399, -1.9_587] )
elif mobilevit_name == "mobilevit_xxs":
lowercase = torch.tensor([-1.9_364, -1.2_327, -0.4_653] )
else:
raise ValueError(F"""Unknown mobilevit_name: {mobilevit_name}""" )
assert torch.allclose(logits[0, :3] , lowercase_ , atol=1E-4 )
Path(lowercase_ ).mkdir(exist_ok=lowercase_ )
print(F"""Saving model {mobilevit_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowercase_ )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(lowercase_ )
if push_to_hub:
lowercase = {
"""mobilevit_s""": """mobilevit-small""",
"""mobilevit_xs""": """mobilevit-x-small""",
"""mobilevit_xxs""": """mobilevit-xx-small""",
"""deeplabv3_mobilevit_s""": """deeplabv3-mobilevit-small""",
"""deeplabv3_mobilevit_xs""": """deeplabv3-mobilevit-x-small""",
"""deeplabv3_mobilevit_xxs""": """deeplabv3-mobilevit-xx-small""",
}
print("""Pushing to the hub...""" )
lowercase = model_mapping[mobilevit_name]
image_processor.push_to_hub(lowercase_ , organization="""apple""" )
model.push_to_hub(lowercase_ , organization="""apple""" )
if __name__ == "__main__":
lowercase_ : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--mobilevit_name''',
default='''mobilevit_s''',
type=str,
help=(
'''Name of the MobileViT model you\'d like to convert. Should be one of \'mobilevit_s\', \'mobilevit_xs\','''
''' \'mobilevit_xxs\', \'deeplabv3_mobilevit_s\', \'deeplabv3_mobilevit_xs\', \'deeplabv3_mobilevit_xxs\'.'''
),
)
parser.add_argument(
'''--checkpoint_path''', required=True, type=str, help='''Path to the original state dict (.pt file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
lowercase_ : List[str] = parser.parse_args()
convert_movilevit_checkpoint(
args.mobilevit_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 653 | 0 |
'''simple docstring'''
import importlib
import shutil
import threading
import warnings
from typing import List
import fsspec
import fsspec.asyn
from . import compression
from .hffilesystem import HfFileSystem
lowercase_ : Any = importlib.util.find_spec('''s3fs''') is not None
if _has_safs:
from .safilesystem import SaFileSystem # noqa: F401
lowercase_ : List[compression.BaseCompressedFileFileSystem] = [
compression.BzaFileSystem,
compression.GzipFileSystem,
compression.LzaFileSystem,
compression.XzFileSystem,
compression.ZstdFileSystem,
]
# Register custom filesystems
for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]:
if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class:
warnings.warn(f'''A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.''')
fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True)
def SCREAMING_SNAKE_CASE ( lowercase_ : str ):
if "://" in dataset_path:
lowercase = dataset_path.split("""://""" )[1]
return dataset_path
def SCREAMING_SNAKE_CASE ( lowercase_ : fsspec.AbstractFileSystem ):
if fs is not None and fs.protocol != "file":
return True
else:
return False
def SCREAMING_SNAKE_CASE ( lowercase_ : fsspec.AbstractFileSystem , lowercase_ : str , lowercase_ : str ):
lowercase = not is_remote_filesystem(lowercase_ )
if is_local:
# LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory
shutil.move(fs._strip_protocol(lowercase_ ) , fs._strip_protocol(lowercase_ ) )
else:
fs.mv(lowercase_ , lowercase_ , recursive=lowercase_ )
def SCREAMING_SNAKE_CASE ( ):
if hasattr(fsspec.asyn , """reset_lock""" ):
# for future fsspec>2022.05.0
fsspec.asyn.reset_lock()
else:
lowercase = None
lowercase = None
lowercase = threading.Lock()
| 715 |
'''simple docstring'''
import copy
import inspect
import unittest
from transformers import PretrainedConfig, SwiftFormerConfig
from transformers.testing_utils import (
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwiftFormerForImageClassification, SwiftFormerModel
from transformers.models.swiftformer.modeling_swiftformer import SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __UpperCamelCase :
def __init__( self , _lowerCAmelCase , _lowerCAmelCase=13 , _lowerCAmelCase=3 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=224 , _lowerCAmelCase=1000 , _lowerCAmelCase=[3, 3, 6, 4] , _lowerCAmelCase=[48, 56, 112, 220] , ) -> List[str]:
'''simple docstring'''
lowercase = parent
lowercase = batch_size
lowercase = num_channels
lowercase = is_training
lowercase = use_labels
lowercase = hidden_dropout_prob
lowercase = attention_probs_dropout_prob
lowercase = num_labels
lowercase = image_size
lowercase = layer_depths
lowercase = embed_dims
def _a ( self ) -> Tuple:
'''simple docstring'''
lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase = None
if self.use_labels:
lowercase = ids_tensor([self.batch_size] , self.num_labels )
lowercase = self.get_config()
return config, pixel_values, labels
def _a ( self ) -> int:
'''simple docstring'''
return SwiftFormerConfig(
depths=self.layer_depths , embed_dims=self.embed_dims , mlp_ratio=4 , downsamples=[True, True, True, True] , hidden_act="""gelu""" , num_labels=self.num_labels , down_patch_size=3 , down_stride=2 , down_pad=1 , drop_rate=0.0 , drop_path_rate=0.0 , use_layer_scale=_lowerCAmelCase , layer_scale_init_value=1E-5 , )
def _a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]:
'''simple docstring'''
lowercase = SwiftFormerModel(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
lowercase = model(_lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dims[-1], 7, 7) )
def _a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]:
'''simple docstring'''
lowercase = self.num_labels
lowercase = SwiftFormerForImageClassification(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
lowercase = model(_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
lowercase = SwiftFormerForImageClassification(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase = model(_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _a ( self ) -> Optional[Any]:
'''simple docstring'''
((lowercase) , (lowercase) , (lowercase)) = self.prepare_config_and_inputs()
lowercase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class __UpperCamelCase (_UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
__A = (SwiftFormerModel, SwiftFormerForImageClassification) if is_torch_available() else ()
__A = (
{'''feature-extraction''': SwiftFormerModel, '''image-classification''': SwiftFormerForImageClassification}
if is_torch_available()
else {}
)
__A = False
__A = False
__A = False
__A = False
__A = False
def _a ( self ) -> Dict:
'''simple docstring'''
lowercase = SwiftFormerModelTester(self )
lowercase = ConfigTester(
self , config_class=_lowerCAmelCase , has_text_modality=_lowerCAmelCase , hidden_size=37 , num_attention_heads=12 , num_hidden_layers=12 , )
def _a ( self ) -> List[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="""SwiftFormer does not use inputs_embeds""" )
def _a ( self ) -> List[str]:
'''simple docstring'''
pass
def _a ( self ) -> Dict:
'''simple docstring'''
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase = model_class(_lowerCAmelCase )
lowercase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_lowerCAmelCase , nn.Linear ) )
def _a ( self ) -> int:
'''simple docstring'''
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase = model_class(_lowerCAmelCase )
lowercase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase = [*signature.parameters.keys()]
lowercase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _lowerCAmelCase )
def _a ( self ) -> List[str]:
'''simple docstring'''
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCAmelCase )
def _a ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowerCAmelCase )
@slow
def _a ( self ) -> Any:
'''simple docstring'''
for model_name in SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase = SwiftFormerModel.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
@unittest.skip(reason="""SwiftFormer does not output attentions""" )
def _a ( self ) -> Optional[Any]:
'''simple docstring'''
pass
def _a ( self ) -> Union[str, Any]:
'''simple docstring'''
def check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
lowercase = model_class(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
with torch.no_grad():
lowercase = model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) )
lowercase = outputs.hidden_states
lowercase = 8
self.assertEqual(len(_lowerCAmelCase ) , _lowerCAmelCase ) # TODO
# SwiftFormer's feature maps are of shape (batch_size, embed_dims, height, width)
# with the width and height being successively divided by 2, after every 2 blocks
for i in range(len(_lowerCAmelCase ) ):
self.assertEqual(
hidden_states[i].shape , torch.Size(
[
self.model_tester.batch_size,
self.model_tester.embed_dims[i // 2],
(self.model_tester.image_size // 4) // 2 ** (i // 2),
(self.model_tester.image_size // 4) // 2 ** (i // 2),
] ) , )
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase = True
check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase = True
check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
def _a ( self ) -> Dict:
'''simple docstring'''
def _config_zero_init(_lowerCAmelCase ):
lowercase = copy.deepcopy(_lowerCAmelCase )
for key in configs_no_init.__dict__.keys():
if "_range" in key or "_std" in key or "initializer_factor" in key or "layer_scale" in key:
setattr(_lowerCAmelCase , _lowerCAmelCase , 1E-10 )
if isinstance(getattr(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) , _lowerCAmelCase ):
lowercase = _config_zero_init(getattr(_lowerCAmelCase , _lowerCAmelCase ) )
setattr(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
return configs_no_init
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
lowercase = _config_zero_init(_lowerCAmelCase )
for model_class in self.all_model_classes:
lowercase = model_class(config=_lowerCAmelCase )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9) / 1E9).round().item() , [0.0, 1.0] , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def _a ( self ) -> Any:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( ):
lowercase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class __UpperCamelCase (unittest.TestCase ):
@cached_property
def _a ( self ) -> List[str]:
'''simple docstring'''
return ViTImageProcessor.from_pretrained("""MBZUAI/swiftformer-xs""" ) if is_vision_available() else None
@slow
def _a ( self ) -> List[Any]:
'''simple docstring'''
lowercase = SwiftFormerForImageClassification.from_pretrained("""MBZUAI/swiftformer-xs""" ).to(_lowerCAmelCase )
lowercase = self.default_image_processor
lowercase = prepare_img()
lowercase = image_processor(images=_lowerCAmelCase , return_tensors="""pt""" ).to(_lowerCAmelCase )
# forward pass
with torch.no_grad():
lowercase = model(**_lowerCAmelCase )
# verify the logits
lowercase = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _lowerCAmelCase )
lowercase = torch.tensor([[-2.17_03E00, 2.11_07E00, -2.08_11E00]] ).to(_lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _lowerCAmelCase , atol=1E-4 ) )
| 653 | 0 |
'''simple docstring'''
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from huggingface_hub import HfFolder, Repository, create_repo, delete_repo
from requests.exceptions import HTTPError
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
PROCESSOR_MAPPING,
TOKENIZER_MAPPING,
AutoConfig,
AutoFeatureExtractor,
AutoProcessor,
AutoTokenizer,
BertTokenizer,
ProcessorMixin,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
)
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
from transformers.tokenization_utils import TOKENIZER_CONFIG_FILE
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_tokenizers_available
sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
from test_module.custom_processing import CustomProcessor # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
lowercase_ : Dict = get_tests_dir('''fixtures/dummy_feature_extractor_config.json''')
lowercase_ : Tuple = get_tests_dir('''fixtures/vocab.json''')
lowercase_ : Optional[int] = get_tests_dir('''fixtures''')
class __UpperCamelCase (unittest.TestCase ):
__A = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''bla''', '''blou''']
def _a ( self ) -> Optional[int]:
'''simple docstring'''
lowercase = 0
def _a ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase = AutoProcessor.from_pretrained("""facebook/wav2vec2-base-960h""" )
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase )
def _a ( self ) -> int:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase = WavaVecaConfig()
lowercase = AutoProcessor.from_pretrained("""facebook/wav2vec2-base-960h""" )
# save in new folder
model_config.save_pretrained(_lowerCAmelCase )
processor.save_pretrained(_lowerCAmelCase )
lowercase = AutoProcessor.from_pretrained(_lowerCAmelCase )
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase )
def _a ( self ) -> str:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
# copy relevant files
copyfile(_lowerCAmelCase , os.path.join(_lowerCAmelCase , _lowerCAmelCase ) )
copyfile(_lowerCAmelCase , os.path.join(_lowerCAmelCase , """vocab.json""" ) )
lowercase = AutoProcessor.from_pretrained(_lowerCAmelCase )
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase )
def _a ( self ) -> List[str]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase = WavaVecaFeatureExtractor()
lowercase = AutoTokenizer.from_pretrained("""facebook/wav2vec2-base-960h""" )
lowercase = WavaVecaProcessor(_lowerCAmelCase , _lowerCAmelCase )
# save in new folder
processor.save_pretrained(_lowerCAmelCase )
# drop `processor_class` in tokenizer
with open(os.path.join(_lowerCAmelCase , _lowerCAmelCase ) , """r""" ) as f:
lowercase = json.load(_lowerCAmelCase )
config_dict.pop("""processor_class""" )
with open(os.path.join(_lowerCAmelCase , _lowerCAmelCase ) , """w""" ) as f:
f.write(json.dumps(_lowerCAmelCase ) )
lowercase = AutoProcessor.from_pretrained(_lowerCAmelCase )
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase )
def _a ( self ) -> Tuple:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase = WavaVecaFeatureExtractor()
lowercase = AutoTokenizer.from_pretrained("""facebook/wav2vec2-base-960h""" )
lowercase = WavaVecaProcessor(_lowerCAmelCase , _lowerCAmelCase )
# save in new folder
processor.save_pretrained(_lowerCAmelCase )
# drop `processor_class` in feature extractor
with open(os.path.join(_lowerCAmelCase , _lowerCAmelCase ) , """r""" ) as f:
lowercase = json.load(_lowerCAmelCase )
config_dict.pop("""processor_class""" )
with open(os.path.join(_lowerCAmelCase , _lowerCAmelCase ) , """w""" ) as f:
f.write(json.dumps(_lowerCAmelCase ) )
lowercase = AutoProcessor.from_pretrained(_lowerCAmelCase )
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase )
def _a ( self ) -> List[str]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase = WavaVecaConfig(processor_class="""Wav2Vec2Processor""" )
model_config.save_pretrained(_lowerCAmelCase )
# copy relevant files
copyfile(_lowerCAmelCase , os.path.join(_lowerCAmelCase , """vocab.json""" ) )
# create emtpy sample processor
with open(os.path.join(_lowerCAmelCase , _lowerCAmelCase ) , """w""" ) as f:
f.write("""{}""" )
lowercase = AutoProcessor.from_pretrained(_lowerCAmelCase )
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase )
def _a ( self ) -> int:
'''simple docstring'''
with self.assertRaises(_lowerCAmelCase ):
lowercase = AutoProcessor.from_pretrained("""hf-internal-testing/test_dynamic_processor""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(_lowerCAmelCase ):
lowercase = AutoProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_processor""" , trust_remote_code=_lowerCAmelCase )
lowercase = AutoProcessor.from_pretrained("""hf-internal-testing/test_dynamic_processor""" , trust_remote_code=_lowerCAmelCase )
self.assertTrue(processor.special_attribute_present )
self.assertEqual(processor.__class__.__name__ , """NewProcessor""" )
lowercase = processor.feature_extractor
self.assertTrue(feature_extractor.special_attribute_present )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
lowercase = processor.tokenizer
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
# Test we can also load the slow version
lowercase = AutoProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_processor""" , trust_remote_code=_lowerCAmelCase , use_fast=_lowerCAmelCase )
lowercase = new_processor.tokenizer
self.assertTrue(new_tokenizer.special_attribute_present )
self.assertEqual(new_tokenizer.__class__.__name__ , """NewTokenizer""" )
else:
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
def _a ( self ) -> Any:
'''simple docstring'''
try:
AutoConfig.register("""custom""" , _lowerCAmelCase )
AutoFeatureExtractor.register(_lowerCAmelCase , _lowerCAmelCase )
AutoTokenizer.register(_lowerCAmelCase , slow_tokenizer_class=_lowerCAmelCase )
AutoProcessor.register(_lowerCAmelCase , _lowerCAmelCase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_lowerCAmelCase ):
AutoProcessor.register(_lowerCAmelCase , _lowerCAmelCase )
# Now that the config is registered, it can be used as any other config with the auto-API
lowercase = CustomFeatureExtractor.from_pretrained(_lowerCAmelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase = os.path.join(_lowerCAmelCase , """vocab.txt""" )
with open(_lowerCAmelCase , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) )
lowercase = CustomTokenizer(_lowerCAmelCase )
lowercase = CustomProcessor(_lowerCAmelCase , _lowerCAmelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(_lowerCAmelCase )
lowercase = AutoProcessor.from_pretrained(_lowerCAmelCase )
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def _a ( self ) -> int:
'''simple docstring'''
class __UpperCamelCase (_UpperCAmelCase ):
__A = False
class __UpperCamelCase (_UpperCAmelCase ):
__A = False
class __UpperCamelCase (_UpperCAmelCase ):
__A = '''AutoFeatureExtractor'''
__A = '''AutoTokenizer'''
__A = False
try:
AutoConfig.register("""custom""" , _lowerCAmelCase )
AutoFeatureExtractor.register(_lowerCAmelCase , _lowerCAmelCase )
AutoTokenizer.register(_lowerCAmelCase , slow_tokenizer_class=_lowerCAmelCase )
AutoProcessor.register(_lowerCAmelCase , _lowerCAmelCase )
# If remote code is not set, the default is to use local classes.
lowercase = AutoProcessor.from_pretrained("""hf-internal-testing/test_dynamic_processor""" )
self.assertEqual(processor.__class__.__name__ , """NewProcessor""" )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote code is disabled, we load the local ones.
lowercase = AutoProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_processor""" , trust_remote_code=_lowerCAmelCase )
self.assertEqual(processor.__class__.__name__ , """NewProcessor""" )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub.
lowercase = AutoProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_processor""" , trust_remote_code=_lowerCAmelCase )
self.assertEqual(processor.__class__.__name__ , """NewProcessor""" )
self.assertTrue(processor.special_attribute_present )
self.assertTrue(processor.feature_extractor.special_attribute_present )
self.assertTrue(processor.tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def _a ( self ) -> str:
'''simple docstring'''
lowercase = AutoProcessor.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
self.assertEqual(processor.__class__.__name__ , """BertTokenizerFast""" )
def _a ( self ) -> List[str]:
'''simple docstring'''
lowercase = AutoProcessor.from_pretrained("""hf-internal-testing/tiny-random-convnext""" )
self.assertEqual(processor.__class__.__name__ , """ConvNextImageProcessor""" )
@is_staging_test
class __UpperCamelCase (unittest.TestCase ):
__A = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''bla''', '''blou''']
@classmethod
def _a ( cls ) -> Union[str, Any]:
'''simple docstring'''
lowercase = TOKEN
HfFolder.save_token(_lowerCAmelCase )
@classmethod
def _a ( cls ) -> Optional[Any]:
'''simple docstring'''
try:
delete_repo(token=cls._token , repo_id="""test-processor""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""valid_org/test-processor-org""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""test-dynamic-processor""" )
except HTTPError:
pass
def _a ( self ) -> str:
'''simple docstring'''
lowercase = WavaVecaProcessor.from_pretrained(_lowerCAmelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(_lowerCAmelCase , """test-processor""" ) , push_to_hub=_lowerCAmelCase , use_auth_token=self._token )
lowercase = WavaVecaProcessor.from_pretrained(F"""{USER}/test-processor""" )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(_lowerCAmelCase , getattr(new_processor.feature_extractor , _lowerCAmelCase ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def _a ( self ) -> List[str]:
'''simple docstring'''
lowercase = WavaVecaProcessor.from_pretrained(_lowerCAmelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(_lowerCAmelCase , """test-processor-org""" ) , push_to_hub=_lowerCAmelCase , use_auth_token=self._token , organization="""valid_org""" , )
lowercase = WavaVecaProcessor.from_pretrained("""valid_org/test-processor-org""" )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(_lowerCAmelCase , getattr(new_processor.feature_extractor , _lowerCAmelCase ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def _a ( self ) -> Tuple:
'''simple docstring'''
CustomFeatureExtractor.register_for_auto_class()
CustomTokenizer.register_for_auto_class()
CustomProcessor.register_for_auto_class()
lowercase = CustomFeatureExtractor.from_pretrained(_lowerCAmelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase = os.path.join(_lowerCAmelCase , """vocab.txt""" )
with open(_lowerCAmelCase , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) )
lowercase = CustomTokenizer(_lowerCAmelCase )
lowercase = CustomProcessor(_lowerCAmelCase , _lowerCAmelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
create_repo(F"""{USER}/test-dynamic-processor""" , token=self._token )
lowercase = Repository(_lowerCAmelCase , clone_from=F"""{USER}/test-dynamic-processor""" , token=self._token )
processor.save_pretrained(_lowerCAmelCase )
# This has added the proper auto_map field to the feature extractor config
self.assertDictEqual(
processor.feature_extractor.auto_map , {
"""AutoFeatureExtractor""": """custom_feature_extraction.CustomFeatureExtractor""",
"""AutoProcessor""": """custom_processing.CustomProcessor""",
} , )
# This has added the proper auto_map field to the tokenizer config
with open(os.path.join(_lowerCAmelCase , """tokenizer_config.json""" ) ) as f:
lowercase = json.load(_lowerCAmelCase )
self.assertDictEqual(
tokenizer_config["""auto_map"""] , {
"""AutoTokenizer""": ["""custom_tokenization.CustomTokenizer""", None],
"""AutoProcessor""": """custom_processing.CustomProcessor""",
} , )
# The code has been copied from fixtures
self.assertTrue(os.path.isfile(os.path.join(_lowerCAmelCase , """custom_feature_extraction.py""" ) ) )
self.assertTrue(os.path.isfile(os.path.join(_lowerCAmelCase , """custom_tokenization.py""" ) ) )
self.assertTrue(os.path.isfile(os.path.join(_lowerCAmelCase , """custom_processing.py""" ) ) )
repo.push_to_hub()
lowercase = AutoProcessor.from_pretrained(F"""{USER}/test-dynamic-processor""" , trust_remote_code=_lowerCAmelCase )
# Can't make an isinstance check because the new_processor is from the CustomProcessor class of a dynamic module
self.assertEqual(new_processor.__class__.__name__ , """CustomProcessor""" )
| 716 |
'''simple docstring'''
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def SCREAMING_SNAKE_CASE ( ):
lowercase = HfArgumentParser(lowercase_ )
lowercase = parser.parse_args_into_dataclasses()[0]
lowercase = TensorFlowBenchmark(args=lowercase_ )
try:
lowercase = parser.parse_args_into_dataclasses()[0]
except ValueError as e:
lowercase = """Arg --no_{0} is no longer used, please use --no-{0} instead."""
lowercase = """ """.join(str(lowercase_ ).split(""" """ )[:-1] )
lowercase = """"""
lowercase = eval(str(lowercase_ ).split(""" """ )[-1] )
lowercase = []
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:] )
else:
wrong_args.append(lowercase_ )
if len(lowercase_ ) > 0:
lowercase = full_error_msg + begin_error_msg + str(lowercase_ )
raise ValueError(lowercase_ )
benchmark.run()
if __name__ == "__main__":
main()
| 653 | 0 |
'''simple docstring'''
import random
import timeit
from functools import wraps
from typing import Callable, Optional
from ..configuration_utils import PretrainedConfig
from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING
from ..utils import is_pyanvml_available, is_tf_available, logging
from .benchmark_utils import (
Benchmark,
Memory,
MemorySummary,
measure_peak_memory_cpu,
start_memory_tracing,
stop_memory_tracing,
)
if is_tf_available():
import tensorflow as tf
from tensorflow.python.framework.errors_impl import ResourceExhaustedError
from .benchmark_args_tf import TensorFlowBenchmarkArguments
if is_pyanvml_available():
import pyanvml.pyanvml as nvml
lowercase_ : Dict = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE ( lowercase_ : bool , lowercase_ : bool ):
def run_func(lowercase_ : Union[str, Any] ):
@wraps(lowercase_ )
def run_in_eager_mode(*lowercase_ : List[Any] , **lowercase_ : Optional[int] ):
return func(*lowercase_ , **lowercase_ )
@wraps(lowercase_ )
@tf.function(experimental_compile=lowercase_ )
def run_in_graph_mode(*lowercase_ : str , **lowercase_ : Union[str, Any] ):
return func(*lowercase_ , **lowercase_ )
if do_eager_mode is True:
if use_xla is not False:
raise ValueError(
"""Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`.""" )
return run_in_eager_mode
else:
return run_in_graph_mode
return run_func
def SCREAMING_SNAKE_CASE ( lowercase_ : int , lowercase_ : int , lowercase_ : int ):
lowercase = random.Random()
lowercase = [rng.randint(0 , vocab_size - 1 ) for i in range(batch_size * sequence_length )]
return tf.constant(lowercase_ , shape=(batch_size, sequence_length) , dtype=tf.intaa )
class __UpperCamelCase (_UpperCAmelCase ):
__A = 42
__A = 42
__A = '''TensorFlow'''
@property
def _a ( self ) -> Optional[Any]:
'''simple docstring'''
return tf.__version__
def _a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> float:
'''simple docstring'''
lowercase = self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
lowercase = self._prepare_inference_func(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
return self._measure_speed(_inference )
def _a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> float:
'''simple docstring'''
lowercase = self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
lowercase = self._prepare_train_func(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
return self._measure_speed(_train )
def _a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> [Memory, Optional[MemorySummary]]:
'''simple docstring'''
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , _lowerCAmelCase )
lowercase = self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
lowercase = self._prepare_inference_func(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
return self._measure_memory(_inference )
def _a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> [Memory, Optional[MemorySummary]]:
'''simple docstring'''
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , _lowerCAmelCase )
lowercase = self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
lowercase = self._prepare_train_func(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
return self._measure_memory(_train )
def _a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Callable[[], None]:
'''simple docstring'''
lowercase = self.config_dict[model_name]
if self.args.fpaa:
raise NotImplementedError("""Mixed precision is currently not supported.""" )
lowercase = (
hasattr(_lowerCAmelCase , """architectures""" )
and isinstance(config.architectures , _lowerCAmelCase )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
lowercase = """TF""" + config.architectures[0] # prepend 'TF' for tensorflow model
lowercase = __import__("""transformers""" , fromlist=[model_class] )
lowercase = getattr(_lowerCAmelCase , _lowerCAmelCase )
lowercase = model_cls(_lowerCAmelCase )
except ImportError:
raise ImportError(
F"""{model_class} does not exist. If you just want to test the pretrained model, you might want to"""
""" set `--only_pretrain_model` or `args.only_pretrain_model=True`.""" )
else:
lowercase = TF_MODEL_MAPPING[config.__class__](_lowerCAmelCase )
# encoder-decoder has vocab size saved differently
lowercase = config.vocab_size if hasattr(_lowerCAmelCase , """vocab_size""" ) else config.encoder.vocab_size
lowercase = random_input_ids(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_forward():
return model(_lowerCAmelCase , decoder_input_ids=_lowerCAmelCase , training=_lowerCAmelCase )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_forward():
return model(_lowerCAmelCase , training=_lowerCAmelCase )
lowercase = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward
return _inference
def _a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Callable[[], None]:
'''simple docstring'''
lowercase = self.config_dict[model_name]
if self.args.eager_mode is not False:
raise ValueError("""Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`.""" )
if self.args.fpaa:
raise NotImplementedError("""Mixed precision is currently not supported.""" )
lowercase = (
hasattr(_lowerCAmelCase , """architectures""" )
and isinstance(config.architectures , _lowerCAmelCase )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
lowercase = """TF""" + config.architectures[0] # prepend 'TF' for tensorflow model
lowercase = __import__("""transformers""" , fromlist=[model_class] )
lowercase = getattr(_lowerCAmelCase , _lowerCAmelCase )
lowercase = model_cls(_lowerCAmelCase )
except ImportError:
raise ImportError(
F"""{model_class} does not exist. If you just want to test the pretrained model, you might want to"""
""" set `--only_pretrain_model` or `args.only_pretrain_model=True`.""" )
else:
lowercase = TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](_lowerCAmelCase )
# encoder-decoder has vocab size saved differently
lowercase = config.vocab_size if hasattr(_lowerCAmelCase , """vocab_size""" ) else config.encoder.vocab_size
lowercase = random_input_ids(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_train():
lowercase = model(_lowerCAmelCase , decoder_input_ids=_lowerCAmelCase , labels=_lowerCAmelCase , training=_lowerCAmelCase )[0]
lowercase = tf.gradients(_lowerCAmelCase , model.trainable_variables )
return gradients
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_train():
lowercase = model(_lowerCAmelCase , labels=_lowerCAmelCase , training=_lowerCAmelCase )[0]
lowercase = tf.gradients(_lowerCAmelCase , model.trainable_variables )
return gradients
lowercase = encoder_decoder_train if config.is_encoder_decoder else encoder_train
return _train
def _a ( self , _lowerCAmelCase ) -> float:
'''simple docstring'''
with self.args.strategy.scope():
try:
if self.args.is_tpu or self.args.use_xla:
# run additional 10 times to stabilize compilation for tpu
logger.info("""Do inference on TPU. Running model 5 times to stabilize compilation""" )
timeit.repeat(_lowerCAmelCase , repeat=1 , number=5 )
# as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average
lowercase = timeit.repeat(
_lowerCAmelCase , repeat=self.args.repeat , number=10 , )
return min(_lowerCAmelCase ) / 10.0
except ResourceExhaustedError as e:
self.print_fn(F"""Doesn't fit on GPU. {e}""" )
def _a ( self , _lowerCAmelCase ) -> [Memory, MemorySummary]:
'''simple docstring'''
logger.info(
"""Note that TensorFlow allocates more memory than """
"""it might need to speed up computation. """
"""The memory reported here corresponds to the memory """
"""reported by `nvidia-smi`, which can vary depending """
"""on total available memory on the GPU that is used.""" )
with self.args.strategy.scope():
try:
if self.args.trace_memory_line_by_line:
if not self.args.eager_mode:
raise ValueError(
"""`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory"""
""" consumption line by line.""" )
lowercase = start_memory_tracing("""transformers""" )
if self.args.is_tpu:
# tpu
raise NotImplementedError(
"""Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking"""
""" with `args.memory=False`""" )
elif self.args.is_gpu:
# gpu
if not is_pyanvml_available():
logger.warning(
"""py3nvml not installed, we won't log GPU memory usage. """
"""Install py3nvml (pip install py3nvml) to log information about GPU.""" )
lowercase = """N/A"""
else:
logger.info(
"""Measuring total GPU usage on GPU device. Make sure to not have additional processes"""
""" running on the same GPU.""" )
# init nvml
nvml.nvmlInit()
func()
lowercase = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx )
lowercase = nvml.nvmlDeviceGetMemoryInfo(_lowerCAmelCase )
lowercase = meminfo.used
lowercase = Memory(_lowerCAmelCase )
# shutdown nvml
nvml.nvmlShutdown()
else:
# cpu
if self.args.trace_memory_line_by_line:
logger.info(
"""When enabling line by line tracing, the max peak memory for CPU is inaccurate in"""
""" TensorFlow.""" )
lowercase = None
else:
lowercase = measure_peak_memory_cpu(_lowerCAmelCase )
lowercase = Memory(_lowerCAmelCase ) if isinstance(_lowerCAmelCase , _lowerCAmelCase ) else memory_bytes
if self.args.trace_memory_line_by_line:
lowercase = stop_memory_tracing(_lowerCAmelCase )
if memory is None:
lowercase = summary.total
else:
lowercase = None
return memory, summary
except ResourceExhaustedError as e:
self.print_fn(F"""Doesn't fit on GPU. {e}""" )
return "N/A", None
| 717 |
'''simple docstring'''
# coding=utf-8
# Copyright 2023 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import platform
import sys
lowercase_ : List[str] = '''3'''
print('''Python version:''', sys.version)
print('''OS platform:''', platform.platform())
print('''OS architecture:''', platform.machine())
try:
import torch
print('''Torch version:''', torch.__version__)
print('''Cuda available:''', torch.cuda.is_available())
print('''Cuda version:''', torch.version.cuda)
print('''CuDNN version:''', torch.backends.cudnn.version())
print('''Number of GPUs available:''', torch.cuda.device_count())
except ImportError:
print('''Torch version:''', None)
try:
import transformers
print('''transformers version:''', transformers.__version__)
except ImportError:
print('''transformers version:''', None)
| 653 | 0 |
'''simple docstring'''
import argparse
import json
import os
from collections import OrderedDict
import numpy as np
import tensorflow as tf
import torch
def SCREAMING_SNAKE_CASE ( lowercase_ : Union[str, Any] ):
lowercase = os.path.join(args.tf_model_dir , """parameters.json""" )
lowercase = json.loads(open(lowercase_ ).read() )
if not params:
raise ValueError(
F"""It seems that the json file at {parameter_file} is empty. Make sure you have a correct json file.""" )
if not args.output.endswith(""".pt""" ):
lowercase = args.output + """.pt"""
lowercase = OrderedDict()
with tf.device("""/CPU:0""" ):
lowercase = tf.train.load_checkpoint(args.tf_model_dir )
lowercase = reader.get_variable_to_shape_map()
for key_name in shapes.keys():
lowercase = reader.get_tensor(lowercase_ ).astype(np.floataa )
if key_name.endswith("""/adam_m""" ) or key_name.endswith("""/adam_v""" ):
continue
if key_name.startswith("""pasts/""" ):
if key_name.startswith("""pasts/mlp""" ):
lowercase = int(key_name[9] )
elif key_name.startswith("""pasts/out""" ):
lowercase = 8
lowercase = """model.sqout.%d.weight""" % (player * 2) # enter to nn.Sequencial with Tanh, so 2 at a time
lowercase = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowercase = torch.tensor(lowercase_ )
elif key_name.startswith("""model/moe""" ):
lowercase = int(key_name[9:].split("""/""" )[0] )
if key_name.endswith("""/switch_gating/kernel""" ):
lowercase = """model.blocks.%d.feed_forward.mlp.router.classifier.weight""" % player
lowercase = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowercase = torch.tensor(lowercase_ )
elif key_name.endswith("""/softmlp/kernel""" ):
lowercase = """model.blocks.%d.feed_forward.soft_bypass_mlp.weight""" % player
lowercase = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowercase = torch.tensor(lowercase_ )
elif key_name.endswith("""/wo/kernel""" ) or key_name.endswith("""/wi/kernel""" ):
lowercase = key_name[-9:-7]
for i in range(16 ):
lowercase = """model.blocks.%d.feed_forward.mlp.experts.expert_%d.%s.weight""" % (player, i, nlayer)
lowercase = (
vnp[i].transpose([1, 0] ).copy()
) # In Mesh-Tensorflow, it is one array, so it is divided
lowercase = torch.tensor(lowercase_ )
elif key_name.startswith("""model/mlp""" ):
lowercase = int(key_name[9:].split("""/""" )[0] )
if key_name.endswith("""/p1/kernel""" ):
lowercase = """model.blocks.%d.feed_forward.mlp.wi.weight""" % player
lowercase = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowercase = torch.tensor(lowercase_ )
elif key_name.endswith("""/p1/bias""" ):
lowercase = """model.blocks.%d.feed_forward.mlp.wi.bias""" % player
lowercase = vnp.copy() # same because it is one dimensional
lowercase = torch.tensor(lowercase_ )
elif key_name.endswith("""/p2/kernel""" ):
lowercase = """model.blocks.%d.feed_forward.mlp.wo.weight""" % player
lowercase = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowercase = torch.tensor(lowercase_ )
elif key_name.endswith("""/p2/bias""" ):
lowercase = """model.blocks.%d.feed_forward.mlp.wo.bias""" % player
lowercase = vnp.copy() # same because it is one dimensional
lowercase = torch.tensor(lowercase_ )
elif key_name.startswith("""model/ln""" ):
lowercase = int(key_name[8:].split("""/""" )[0] )
if key_name.endswith("""/b""" ):
lowercase = """model.blocks.%d.feed_forward.norm.bias""" % player
lowercase = vnp.copy() # same because it is one dimensional
lowercase = torch.tensor(lowercase_ )
elif key_name.endswith("""/g""" ):
lowercase = """model.blocks.%d.feed_forward.norm.weight""" % player
lowercase = vnp.copy() # same because it is one dimensional
lowercase = torch.tensor(lowercase_ )
elif key_name.startswith("""model/att""" ):
lowercase = int(key_name[9:].split("""/""" )[0] )
if key_name.endswith("""/qkv/kernel""" ):
lowercase = vnp.copy() # Compute same dimension as Mesh-tensorflow using einsum
lowercase = state[:, 0, :, :]
lowercase = state[:, 1, :, :]
lowercase = state[:, 2, :, :]
lowercase = (
state_q.reshape([state_q.shape[0], state_q.shape[1] * state_q.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
lowercase = (
state_k.reshape([state_k.shape[0], state_k.shape[1] * state_k.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
lowercase = (
state_v.reshape([state_v.shape[0], state_v.shape[1] * state_v.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
lowercase = """model.blocks.%d.self_attn.self_attn.q_proj.weight""" % player
lowercase = torch.tensor(lowercase_ )
lowercase = """model.blocks.%d.self_attn.self_attn.k_proj.weight""" % player
lowercase = torch.tensor(lowercase_ )
lowercase = """model.blocks.%d.self_attn.self_attn.v_proj.weight""" % player
lowercase = torch.tensor(lowercase_ )
elif key_name.endswith("""/o/kernel""" ):
lowercase = """model.blocks.%d.self_attn.self_attn.out_proj.weight""" % player
lowercase = (
vnp.reshape([vnp.shape[0] * vnp.shape[1], vnp.shape[2]] ).transpose([1, 0] ).copy()
) # Mesh-Tensorflow is a diagonal matrix
lowercase = torch.tensor(lowercase_ )
elif key_name.startswith("""model/an""" ):
lowercase = int(key_name[8:].split("""/""" )[0] )
if key_name.endswith("""/b""" ):
lowercase = """model.blocks.%d.self_attn.norm.bias""" % player
lowercase = vnp.copy() # same because it is one dimensional
lowercase = torch.tensor(lowercase_ )
elif key_name.endswith("""/g""" ):
lowercase = """model.blocks.%d.self_attn.norm.weight""" % player
lowercase = vnp.copy() # same because it is one dimensional
lowercase = torch.tensor(lowercase_ )
elif (
key_name.startswith("""model/wte""" )
or key_name.startswith("""model/wpe""" )
or key_name.startswith("""model/ete""" )
):
lowercase = {"""wte""": """embed_tokens""", """wpe""": """position_embeddings""", """ete""": """extra_position_embeddings"""}[
key_name[-3:]
]
lowercase = """model.%s.weight""" % nlayer
lowercase = vnp.copy() # same in embedded
lowercase = torch.tensor(lowercase_ )
if key_name.startswith("""model/wte""" ):
lowercase = """lm_head.weight"""
lowercase = vnp.copy() # same in embedded
lowercase = torch.tensor(lowercase_ )
elif key_name.startswith("""model/wob""" ):
lowercase = """final_logits_bias"""
lowercase = vnp.copy() # same in embedded
lowercase = state.reshape((1, -1) )
lowercase = torch.tensor(lowercase_ )
elif key_name == "model/dense/kernel":
lowercase = """model.last_project.weight"""
lowercase = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowercase = torch.tensor(lowercase_ )
elif key_name == "model/dense_1/bias":
lowercase = """model.last_project.bias"""
lowercase = vnp.copy() # same because it is one dimensional
lowercase = torch.tensor(lowercase_ )
torch.save(lowercase_ , args.output )
if __name__ == "__main__":
lowercase_ : int = argparse.ArgumentParser(
description='''model converter.''', formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument('''--tf_model_dir''', metavar='''PATH''', type=str, required=True, help='''import model''')
parser.add_argument('''--output''', metavar='''PATH''', type=str, required=True, help='''output model''')
lowercase_ : str = parser.parse_args()
convert_tf_gptsan_to_pt(args)
| 718 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowercase_ : Optional[Any] = logging.get_logger(__name__)
lowercase_ : int = {'''vocab_file''': '''spm_char.model'''}
lowercase_ : int = {
'''vocab_file''': {
'''microsoft/speecht5_asr''': '''https://huggingface.co/microsoft/speecht5_asr/resolve/main/spm_char.model''',
'''microsoft/speecht5_tts''': '''https://huggingface.co/microsoft/speecht5_tts/resolve/main/spm_char.model''',
'''microsoft/speecht5_vc''': '''https://huggingface.co/microsoft/speecht5_vc/resolve/main/spm_char.model''',
}
}
lowercase_ : Optional[Any] = {
'''microsoft/speecht5_asr''': 1024,
'''microsoft/speecht5_tts''': 1024,
'''microsoft/speecht5_vc''': 1024,
}
class __UpperCamelCase (_UpperCAmelCase ):
__A = VOCAB_FILES_NAMES
__A = PRETRAINED_VOCAB_FILES_MAP
__A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__A = ['''input_ids''', '''attention_mask''']
def __init__( self , _lowerCAmelCase , _lowerCAmelCase="<s>" , _lowerCAmelCase="</s>" , _lowerCAmelCase="<unk>" , _lowerCAmelCase="<pad>" , _lowerCAmelCase = None , **_lowerCAmelCase , ) -> None:
'''simple docstring'''
lowercase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_lowerCAmelCase , eos_token=_lowerCAmelCase , unk_token=_lowerCAmelCase , pad_token=_lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **_lowerCAmelCase , )
lowercase = vocab_file
lowercase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_lowerCAmelCase )
@property
def _a ( self ) -> List[Any]:
'''simple docstring'''
return self.sp_model.get_piece_size()
def _a ( self ) -> str:
'''simple docstring'''
lowercase = {self.convert_ids_to_tokens(_lowerCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase = self.__dict__.copy()
lowercase = None
return state
def __setstate__( self , _lowerCAmelCase ) -> Optional[int]:
'''simple docstring'''
lowercase = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
lowercase = {}
lowercase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _a ( self , _lowerCAmelCase ) -> List[str]:
'''simple docstring'''
return self.sp_model.encode(_lowerCAmelCase , out_type=_lowerCAmelCase )
def _a ( self , _lowerCAmelCase ) -> List[Any]:
'''simple docstring'''
return self.sp_model.piece_to_id(_lowerCAmelCase )
def _a ( self , _lowerCAmelCase ) -> str:
'''simple docstring'''
lowercase = self.sp_model.IdToPiece(_lowerCAmelCase )
return token
def _a ( self , _lowerCAmelCase ) -> Optional[Any]:
'''simple docstring'''
lowercase = []
lowercase = """"""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(_lowerCAmelCase ) + token
lowercase = []
else:
current_sub_tokens.append(_lowerCAmelCase )
out_string += self.sp_model.decode(_lowerCAmelCase )
return out_string.strip()
def _a ( self , _lowerCAmelCase , _lowerCAmelCase=None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def _a ( self , _lowerCAmelCase , _lowerCAmelCase = None , _lowerCAmelCase = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowerCAmelCase , token_ids_a=_lowerCAmelCase , already_has_special_tokens=_lowerCAmelCase )
lowercase = [1]
if token_ids_a is None:
return ([0] * len(_lowerCAmelCase )) + suffix_ones
return ([0] * len(_lowerCAmelCase )) + ([0] * len(_lowerCAmelCase )) + suffix_ones
def _a ( self , _lowerCAmelCase , _lowerCAmelCase = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(_lowerCAmelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowercase = os.path.join(
_lowerCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _lowerCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_lowerCAmelCase , """wb""" ) as fi:
lowercase = self.sp_model.serialized_model_proto()
fi.write(_lowerCAmelCase )
return (out_vocab_file,)
| 653 | 0 |
'''simple docstring'''
import argparse
import collections
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import TaConfig, TaEncoderModel, TaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def SCREAMING_SNAKE_CASE ( lowercase_ : List[Any] , lowercase_ : Union[str, Any] , lowercase_ : int , lowercase_ : int="attention" ):
lowercase = params[F"""{prefix}/layers_{i}/{layer_name}/key/kernel"""]
lowercase = params[F"""{prefix}/layers_{i}/{layer_name}/out/kernel"""]
lowercase = params[F"""{prefix}/layers_{i}/{layer_name}/query/kernel"""]
lowercase = params[F"""{prefix}/layers_{i}/{layer_name}/value/kernel"""]
return k, o, q, v
def SCREAMING_SNAKE_CASE ( lowercase_ : List[str] , lowercase_ : int , lowercase_ : Any , lowercase_ : Optional[Any]=False ):
if split_mlp_wi:
lowercase = params[F"""{prefix}/layers_{i}/mlp/wi_0/kernel"""]
lowercase = params[F"""{prefix}/layers_{i}/mlp/wi_1/kernel"""]
lowercase = (wi_a, wi_a)
else:
lowercase = params[F"""{prefix}/layers_{i}/mlp/wi/kernel"""]
lowercase = params[F"""{prefix}/layers_{i}/mlp/wo/kernel"""]
return wi, wo
def SCREAMING_SNAKE_CASE ( lowercase_ : Any , lowercase_ : Union[str, Any] , lowercase_ : Dict , lowercase_ : int ):
return params[F"""{prefix}/layers_{i}/{layer_name}/scale"""]
def SCREAMING_SNAKE_CASE ( lowercase_ : dict , *, lowercase_ : int , lowercase_ : bool ):
lowercase = traverse_util.flatten_dict(variables["""target"""] )
lowercase = {"""/""".join(lowercase_ ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
lowercase = """encoder/layers_0/mlp/wi_0/kernel""" in old
print("""Split MLP:""" , lowercase_ )
lowercase = collections.OrderedDict()
# Shared embeddings.
lowercase = old["""token_embedder/embedding"""]
# Encoder.
for i in range(lowercase_ ):
# Block i, layer 0 (Self Attention).
lowercase = tax_layer_norm_lookup(lowercase_ , lowercase_ , """encoder""" , """pre_attention_layer_norm""" )
lowercase , lowercase , lowercase , lowercase = tax_attention_lookup(lowercase_ , lowercase_ , """encoder""" , """attention""" )
lowercase = layer_norm
lowercase = k.T
lowercase = o.T
lowercase = q.T
lowercase = v.T
# Block i, layer 1 (MLP).
lowercase = tax_layer_norm_lookup(lowercase_ , lowercase_ , """encoder""" , """pre_mlp_layer_norm""" )
lowercase , lowercase = tax_mlp_lookup(lowercase_ , lowercase_ , """encoder""" , lowercase_ )
lowercase = layer_norm
if split_mlp_wi:
lowercase = wi[0].T
lowercase = wi[1].T
else:
lowercase = wi.T
lowercase = wo.T
lowercase = old[
"""encoder/relpos_bias/rel_embedding"""
].T
lowercase = old["""encoder/encoder_norm/scale"""]
if not is_encoder_only:
# Decoder.
for i in range(lowercase_ ):
# Block i, layer 0 (Self Attention).
lowercase = tax_layer_norm_lookup(lowercase_ , lowercase_ , """decoder""" , """pre_self_attention_layer_norm""" )
lowercase , lowercase , lowercase , lowercase = tax_attention_lookup(lowercase_ , lowercase_ , """decoder""" , """self_attention""" )
lowercase = layer_norm
lowercase = k.T
lowercase = o.T
lowercase = q.T
lowercase = v.T
# Block i, layer 1 (Cross Attention).
lowercase = tax_layer_norm_lookup(lowercase_ , lowercase_ , """decoder""" , """pre_cross_attention_layer_norm""" )
lowercase , lowercase , lowercase , lowercase = tax_attention_lookup(lowercase_ , lowercase_ , """decoder""" , """encoder_decoder_attention""" )
lowercase = layer_norm
lowercase = k.T
lowercase = o.T
lowercase = q.T
lowercase = v.T
# Block i, layer 2 (MLP).
lowercase = tax_layer_norm_lookup(lowercase_ , lowercase_ , """decoder""" , """pre_mlp_layer_norm""" )
lowercase , lowercase = tax_mlp_lookup(lowercase_ , lowercase_ , """decoder""" , lowercase_ )
lowercase = layer_norm
if split_mlp_wi:
lowercase = wi[0].T
lowercase = wi[1].T
else:
lowercase = wi.T
lowercase = wo.T
lowercase = old["""decoder/decoder_norm/scale"""]
lowercase = old[
"""decoder/relpos_bias/rel_embedding"""
].T
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
lowercase = old["""decoder/logits_dense/kernel"""].T
return new
def SCREAMING_SNAKE_CASE ( lowercase_ : Dict , lowercase_ : bool ):
lowercase = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
lowercase = state_dict["""shared.weight"""]
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
lowercase = state_dict["""shared.weight"""]
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print("""Using shared word embeddings as lm_head.""" )
lowercase = state_dict["""shared.weight"""]
return state_dict
def SCREAMING_SNAKE_CASE ( lowercase_ : Optional[Any] , lowercase_ : Tuple , lowercase_ : int , lowercase_ : Any ):
lowercase = checkpoints.load_tax_checkpoint(lowercase_ )
lowercase = convert_tax_to_pytorch(lowercase_ , num_layers=config.num_layers , is_encoder_only=lowercase_ )
lowercase = make_state_dict(lowercase_ , lowercase_ )
model.load_state_dict(lowercase_ , strict=lowercase_ )
def SCREAMING_SNAKE_CASE ( lowercase_ : List[Any] , lowercase_ : str , lowercase_ : List[Any] , lowercase_ : bool = False ):
lowercase = TaConfig.from_json_file(lowercase_ )
print(F"""Building PyTorch model from configuration: {config}""" )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
lowercase = TaEncoderModel(lowercase_ )
else:
lowercase = TaForConditionalGeneration(lowercase_ )
# Load weights from tf checkpoint
load_tax_weights_in_ta(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(lowercase_ )
# Verify that we can load the checkpoint.
model.from_pretrained(lowercase_ )
print("""Done""" )
if __name__ == "__main__":
lowercase_ : Optional[int] = argparse.ArgumentParser(description='''Converts a native T5X checkpoint into a PyTorch checkpoint.''')
# Required parameters
parser.add_argument(
'''--t5x_checkpoint_path''', default=None, type=str, required=True, help='''Path to the T5X checkpoint.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--is_encoder_only''', action='''store_true''', help='''Check if the model is encoder-decoder model''', default=False
)
lowercase_ : Union[str, Any] = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path, args.config_file, args.pytorch_dump_path, args.is_encoder_only
)
| 719 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE ( ):
lowercase = []
lowercase = 1
while len(lowercase_ ) < 1E6:
constant.append(str(lowercase_ ) )
i += 1
lowercase = """""".join(lowercase_ )
return (
int(constant[0] )
* int(constant[9] )
* int(constant[99] )
* int(constant[999] )
* int(constant[9999] )
* int(constant[9_9999] )
* int(constant[99_9999] )
)
if __name__ == "__main__":
print(solution())
| 653 | 0 |
'''simple docstring'''
from transformers import DistilBertTokenizer, DistilBertTokenizerFast
from transformers.testing_utils import require_tokenizers, slow
from ..bert.test_tokenization_bert import BertTokenizationTest
@require_tokenizers
class __UpperCamelCase (_UpperCAmelCase ):
__A = DistilBertTokenizer
__A = DistilBertTokenizerFast
__A = True
@slow
def _a ( self ) -> Optional[int]:
'''simple docstring'''
lowercase = DistilBertTokenizer.from_pretrained("""distilbert-base-uncased""" )
lowercase = tokenizer.encode("""sequence builders""" , add_special_tokens=_lowerCAmelCase )
lowercase = tokenizer.encode("""multi-sequence build""" , add_special_tokens=_lowerCAmelCase )
lowercase = tokenizer.build_inputs_with_special_tokens(_lowerCAmelCase )
lowercase = tokenizer.build_inputs_with_special_tokens(_lowerCAmelCase , _lowerCAmelCase )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
| 720 |
'''simple docstring'''
import os
def SCREAMING_SNAKE_CASE ( ):
lowercase = os.path.join(os.path.dirname(lowercase_ ) , """num.txt""" )
with open(lowercase_ ) as file_hand:
return str(sum(int(lowercase_ ) for line in file_hand ) )[:10]
if __name__ == "__main__":
print(solution())
| 653 | 0 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE ( lowercase_ : str ):
lowercase = 0
for ch in input_str:
lowercase = ord(lowercase_ )
lowercase = pow(2 , lowercase_ )
# If we already turned on bit for current character's unicode
if bitmap >> ch_unicode & 1 == 1:
return False
bitmap |= ch_bit_index_on
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 721 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPanoramaPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
@skip_mps
class __UpperCamelCase (_UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
__A = StableDiffusionPanoramaPipeline
__A = TEXT_TO_IMAGE_PARAMS
__A = TEXT_TO_IMAGE_BATCH_PARAMS
__A = TEXT_TO_IMAGE_IMAGE_PARAMS
__A = TEXT_TO_IMAGE_IMAGE_PARAMS
def _a ( self ) -> Dict:
'''simple docstring'''
torch.manual_seed(0 )
lowercase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
lowercase = DDIMScheduler()
torch.manual_seed(0 )
lowercase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
lowercase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
lowercase = CLIPTextModel(_lowerCAmelCase )
lowercase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
lowercase = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def _a ( self , _lowerCAmelCase , _lowerCAmelCase=0 ) -> Optional[int]:
'''simple docstring'''
lowercase = torch.manual_seed(_lowerCAmelCase )
lowercase = {
"""prompt""": """a photo of the dolomites""",
"""generator""": generator,
# Setting height and width to None to prevent OOMs on CPU.
"""height""": None,
"""width""": None,
"""num_inference_steps""": 1,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def _a ( self ) -> int:
'''simple docstring'''
lowercase = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowercase = self.get_dummy_components()
lowercase = StableDiffusionPanoramaPipeline(**_lowerCAmelCase )
lowercase = sd_pipe.to(_lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
lowercase = self.get_dummy_inputs(_lowerCAmelCase )
lowercase = sd_pipe(**_lowerCAmelCase ).images
lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase = np.array([0.6186, 0.5374, 0.4915, 0.4135, 0.4114, 0.4563, 0.5128, 0.4977, 0.4757] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _a ( self ) -> Union[str, Any]:
'''simple docstring'''
super().test_inference_batch_consistent(batch_sizes=[1, 2] )
def _a ( self ) -> str:
'''simple docstring'''
super().test_inference_batch_single_identical(batch_size=2 , expected_max_diff=3.25E-3 )
def _a ( self ) -> List[Any]:
'''simple docstring'''
lowercase = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowercase = self.get_dummy_components()
lowercase = StableDiffusionPanoramaPipeline(**_lowerCAmelCase )
lowercase = sd_pipe.to(_lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
lowercase = self.get_dummy_inputs(_lowerCAmelCase )
lowercase = """french fries"""
lowercase = sd_pipe(**_lowerCAmelCase , negative_prompt=_lowerCAmelCase )
lowercase = output.images
lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase = np.array([0.6187, 0.5375, 0.4915, 0.4136, 0.4114, 0.4563, 0.5128, 0.4976, 0.4757] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _a ( self ) -> Tuple:
'''simple docstring'''
lowercase = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowercase = self.get_dummy_components()
lowercase = StableDiffusionPanoramaPipeline(**_lowerCAmelCase )
lowercase = sd_pipe.to(_lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
lowercase = self.get_dummy_inputs(_lowerCAmelCase )
lowercase = sd_pipe(**_lowerCAmelCase , view_batch_size=2 )
lowercase = output.images
lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase = np.array([0.6187, 0.5375, 0.4915, 0.4136, 0.4114, 0.4563, 0.5128, 0.4976, 0.4757] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _a ( self ) -> Any:
'''simple docstring'''
lowercase = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowercase = self.get_dummy_components()
lowercase = EulerAncestralDiscreteScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule="""scaled_linear""" )
lowercase = StableDiffusionPanoramaPipeline(**_lowerCAmelCase )
lowercase = sd_pipe.to(_lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
lowercase = self.get_dummy_inputs(_lowerCAmelCase )
lowercase = sd_pipe(**_lowerCAmelCase ).images
lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase = np.array([0.4024, 0.6510, 0.4901, 0.5378, 0.5813, 0.5622, 0.4795, 0.4467, 0.4952] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _a ( self ) -> Dict:
'''simple docstring'''
lowercase = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowercase = self.get_dummy_components()
lowercase = PNDMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , skip_prk_steps=_lowerCAmelCase )
lowercase = StableDiffusionPanoramaPipeline(**_lowerCAmelCase )
lowercase = sd_pipe.to(_lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
lowercase = self.get_dummy_inputs(_lowerCAmelCase )
lowercase = sd_pipe(**_lowerCAmelCase ).images
lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase = np.array([0.6391, 0.6291, 0.4861, 0.5134, 0.5552, 0.4578, 0.5032, 0.5023, 0.4539] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class __UpperCamelCase (unittest.TestCase ):
def _a ( self ) -> List[Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _a ( self , _lowerCAmelCase=0 ) -> Optional[int]:
'''simple docstring'''
lowercase = torch.manual_seed(_lowerCAmelCase )
lowercase = {
"""prompt""": """a photo of the dolomites""",
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def _a ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase = """stabilityai/stable-diffusion-2-base"""
lowercase = DDIMScheduler.from_pretrained(_lowerCAmelCase , subfolder="""scheduler""" )
lowercase = StableDiffusionPanoramaPipeline.from_pretrained(_lowerCAmelCase , scheduler=_lowerCAmelCase , safety_checker=_lowerCAmelCase )
pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
pipe.enable_attention_slicing()
lowercase = self.get_inputs()
lowercase = pipe(**_lowerCAmelCase ).images
lowercase = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 2048, 3)
lowercase = np.array(
[
0.3696_8392,
0.2702_5372,
0.3244_6766,
0.2837_9387,
0.3636_3274,
0.3073_3347,
0.2710_0027,
0.2705_4125,
0.2553_6096,
] )
assert np.abs(expected_slice - image_slice ).max() < 1E-2
def _a ( self ) -> str:
'''simple docstring'''
lowercase = StableDiffusionPanoramaPipeline.from_pretrained(
"""stabilityai/stable-diffusion-2-base""" , safety_checker=_lowerCAmelCase )
lowercase = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
pipe.enable_attention_slicing()
lowercase = self.get_inputs()
lowercase = pipe(**_lowerCAmelCase ).images
lowercase = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 2048, 3)
lowercase = np.array(
[
[
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
]
] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def _a ( self ) -> Any:
'''simple docstring'''
lowercase = 0
def callback_fn(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> None:
lowercase = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
lowercase = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 256)
lowercase = latents[0, -3:, -3:, -1]
lowercase = np.array(
[
0.1868_1869,
0.3390_7816,
0.536_1276,
0.1443_2865,
-0.0285_6611,
-0.7394_1123,
0.2339_7987,
0.4732_2682,
-0.3782_3164,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
elif step == 2:
lowercase = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 256)
lowercase = latents[0, -3:, -3:, -1]
lowercase = np.array(
[
0.1853_9645,
0.3398_7248,
0.537_8559,
0.1443_7142,
-0.0245_5261,
-0.733_8317,
0.2399_0755,
0.4735_6272,
-0.378_6505,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
lowercase = False
lowercase = """stabilityai/stable-diffusion-2-base"""
lowercase = DDIMScheduler.from_pretrained(_lowerCAmelCase , subfolder="""scheduler""" )
lowercase = StableDiffusionPanoramaPipeline.from_pretrained(_lowerCAmelCase , scheduler=_lowerCAmelCase , safety_checker=_lowerCAmelCase )
lowercase = pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
pipe.enable_attention_slicing()
lowercase = self.get_inputs()
pipe(**_lowerCAmelCase , callback=_lowerCAmelCase , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def _a ( self ) -> int:
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowercase = """stabilityai/stable-diffusion-2-base"""
lowercase = DDIMScheduler.from_pretrained(_lowerCAmelCase , subfolder="""scheduler""" )
lowercase = StableDiffusionPanoramaPipeline.from_pretrained(_lowerCAmelCase , scheduler=_lowerCAmelCase , safety_checker=_lowerCAmelCase )
lowercase = pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
lowercase = self.get_inputs()
lowercase = pipe(**_lowerCAmelCase )
lowercase = torch.cuda.max_memory_allocated()
# make sure that less than 5.2 GB is allocated
assert mem_bytes < 5.5 * 10**9
| 653 | 0 |
'''simple docstring'''
import argparse
import math
import traceback
import dateutil.parser as date_parser
import requests
def SCREAMING_SNAKE_CASE ( lowercase_ : Any ):
lowercase = {}
lowercase = job["""started_at"""]
lowercase = job["""completed_at"""]
lowercase = date_parser.parse(lowercase_ )
lowercase = date_parser.parse(lowercase_ )
lowercase = round((end_datetime - start_datetime).total_seconds() / 60.0 )
lowercase = start
lowercase = end
lowercase = duration_in_min
return job_info
def SCREAMING_SNAKE_CASE ( lowercase_ : Tuple , lowercase_ : List[Any]=None ):
lowercase = None
if token is not None:
lowercase = {"""Accept""": """application/vnd.github+json""", """Authorization""": F"""Bearer {token}"""}
lowercase = F"""https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100"""
lowercase = requests.get(lowercase_ , headers=lowercase_ ).json()
lowercase = {}
try:
job_time.update({job["""name"""]: extract_time_from_single_job(lowercase_ ) for job in result["""jobs"""]} )
lowercase = math.ceil((result["""total_count"""] - 100) / 100 )
for i in range(lowercase_ ):
lowercase = requests.get(url + F"""&page={i + 2}""" , headers=lowercase_ ).json()
job_time.update({job["""name"""]: extract_time_from_single_job(lowercase_ ) for job in result["""jobs"""]} )
return job_time
except Exception:
print(F"""Unknown error, could not fetch links:\n{traceback.format_exc()}""" )
return {}
if __name__ == "__main__":
lowercase_ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--workflow_run_id''', type=str, required=True, help='''A GitHub Actions workflow run id.''')
lowercase_ : Union[str, Any] = parser.parse_args()
lowercase_ : str = get_job_time(args.workflow_run_id)
lowercase_ : List[str] = dict(sorted(job_time.items(), key=lambda item: item[1]["duration"], reverse=True))
for k, v in job_time.items():
print(f'''{k}: {v["duration"]}''')
| 700 |
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import SeqaSeqTrainer
from seqaseq_training_args import SeqaSeqTrainingArguments
import transformers
from transformers import (
AutoConfig,
AutoModelForSeqaSeqLM,
AutoTokenizer,
HfArgumentParser,
MBartTokenizer,
MBartTokenizerFast,
set_seed,
)
from transformers.trainer_utils import EvaluationStrategy, is_main_process
from transformers.training_args import ParallelMode
from utils import (
SeqaSeqDataCollator,
SeqaSeqDataset,
assert_all_frozen,
build_compute_metrics_fn,
check_output_dir,
freeze_embeds,
freeze_params,
lmap,
save_json,
use_task_specific_params,
write_txt_file,
)
lowercase_ : Tuple = logging.getLogger(__name__)
@dataclass
class __UpperCamelCase :
__A = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
__A = field(
default=_UpperCAmelCase , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
__A = field(
default=_UpperCAmelCase , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
__A = field(
default=_UpperCAmelCase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
__A = field(default=_UpperCAmelCase , metadata={'''help''': '''Whether tp freeze the encoder.'''} )
__A = field(default=_UpperCAmelCase , metadata={'''help''': '''Whether to freeze the embeddings.'''} )
@dataclass
class __UpperCamelCase :
__A = field(
metadata={'''help''': '''The input data dir. Should contain the .tsv files (or other data files) for the task.'''} )
__A = field(
default='''summarization''' , metadata={'''help''': '''Task name, summarization (or summarization_{dataset} for pegasus) or translation'''} , )
__A = field(
default=1024 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
__A = field(
default=128 , metadata={
'''help''': (
'''The maximum total sequence length for target text after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
__A = field(
default=142 , metadata={
'''help''': (
'''The maximum total sequence length for validation target text after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded. '''
'''This argument is also used to override the ``max_length`` param of ``model.generate``, which is used '''
'''during ``evaluate`` and ``predict``.'''
)
} , )
__A = field(
default=142 , metadata={
'''help''': (
'''The maximum total sequence length for test target text after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
__A = field(default=-1 , metadata={'''help''': '''# training examples. -1 means use all.'''} )
__A = field(default=-1 , metadata={'''help''': '''# validation examples. -1 means use all.'''} )
__A = field(default=-1 , metadata={'''help''': '''# test examples. -1 means use all.'''} )
__A = field(default=_UpperCAmelCase , metadata={'''help''': '''Source language id for translation.'''} )
__A = field(default=_UpperCAmelCase , metadata={'''help''': '''Target language id for translation.'''} )
__A = field(default=_UpperCAmelCase , metadata={'''help''': '''# num_beams to use for evaluation.'''} )
__A = field(
default=_UpperCAmelCase , metadata={'''help''': '''If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined.'''} , )
def SCREAMING_SNAKE_CASE ( lowercase_ : List[Any] , lowercase_ : int , lowercase_ : List[Any] ):
logger.info(F"""***** {split} metrics *****""" )
for key in sorted(metrics.keys() ):
logger.info(F""" {key} = {metrics[key]}""" )
save_json(lowercase_ , os.path.join(lowercase_ , F"""{split}_results.json""" ) )
def SCREAMING_SNAKE_CASE ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowercase = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowercase , lowercase , lowercase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowercase , lowercase , lowercase = parser.parse_args_into_dataclasses()
check_output_dir(lowercase_ )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"""Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s""" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info("""Training/evaluation parameters %s""" , lowercase_ )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowercase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
lowercase = ("""encoder_layerdrop""", """decoder_layerdrop""", """dropout""", """attention_dropout""")
for p in extra_model_params:
if getattr(lowercase_ , lowercase_ , lowercase_ ):
assert hasattr(lowercase_ , lowercase_ ), F"""({config.__class__.__name__}) doesn't have a `{p}` attribute"""
setattr(lowercase_ , lowercase_ , getattr(lowercase_ , lowercase_ ) )
lowercase = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
lowercase = AutoModelForSeqaSeqLM.from_pretrained(
model_args.model_name_or_path , from_tf=""".ckpt""" in model_args.model_name_or_path , config=lowercase_ , cache_dir=model_args.cache_dir , )
# use task specific params
use_task_specific_params(lowercase_ , data_args.task )
# set num_beams for evaluation
if data_args.eval_beams is None:
lowercase = model.config.num_beams
# set decoder_start_token_id for MBart
if model.config.decoder_start_token_id is None and isinstance(lowercase_ , (MBartTokenizer, MBartTokenizerFast) ):
assert (
data_args.tgt_lang is not None and data_args.src_lang is not None
), "mBart requires --tgt_lang and --src_lang"
if isinstance(lowercase_ , lowercase_ ):
lowercase = tokenizer.lang_code_to_id[data_args.tgt_lang]
else:
lowercase = tokenizer.convert_tokens_to_ids(data_args.tgt_lang )
if model_args.freeze_embeds:
freeze_embeds(lowercase_ )
if model_args.freeze_encoder:
freeze_params(model.get_encoder() )
assert_all_frozen(model.get_encoder() )
lowercase = SeqaSeqDataset
# Get datasets
lowercase = (
dataset_class(
lowercase_ , type_path="""train""" , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or """""" , )
if training_args.do_train
else None
)
lowercase = (
dataset_class(
lowercase_ , type_path="""val""" , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or """""" , )
if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO
else None
)
lowercase = (
dataset_class(
lowercase_ , type_path="""test""" , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or """""" , )
if training_args.do_predict
else None
)
# Initialize our Trainer
lowercase = (
build_compute_metrics_fn(data_args.task , lowercase_ ) if training_args.predict_with_generate else None
)
lowercase = SeqaSeqTrainer(
model=lowercase_ , args=lowercase_ , data_args=lowercase_ , train_dataset=lowercase_ , eval_dataset=lowercase_ , data_collator=SeqaSeqDataCollator(
lowercase_ , lowercase_ , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=lowercase_ , tokenizer=lowercase_ , )
lowercase = {}
# Training
if training_args.do_train:
logger.info("""*** Train ***""" )
lowercase = trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
lowercase = train_result.metrics
lowercase = data_args.n_train
trainer.save_model() # this also saves the tokenizer
if trainer.is_world_process_zero():
handle_metrics("""train""" , lowercase_ , training_args.output_dir )
all_metrics.update(lowercase_ )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , """trainer_state.json""" ) )
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
lowercase = trainer.evaluate(metric_key_prefix="""val""" )
lowercase = data_args.n_val
lowercase = round(metrics["""val_loss"""] , 4 )
if trainer.is_world_process_zero():
handle_metrics("""val""" , lowercase_ , training_args.output_dir )
all_metrics.update(lowercase_ )
if training_args.do_predict:
logger.info("""*** Predict ***""" )
lowercase = trainer.predict(test_dataset=lowercase_ , metric_key_prefix="""test""" )
lowercase = test_output.metrics
lowercase = data_args.n_test
if trainer.is_world_process_zero():
lowercase = round(metrics["""test_loss"""] , 4 )
handle_metrics("""test""" , lowercase_ , training_args.output_dir )
all_metrics.update(lowercase_ )
if training_args.predict_with_generate:
lowercase = tokenizer.batch_decode(
test_output.predictions , skip_special_tokens=lowercase_ , clean_up_tokenization_spaces=lowercase_ )
lowercase = lmap(str.strip , lowercase_ )
write_txt_file(lowercase_ , os.path.join(training_args.output_dir , """test_generations.txt""" ) )
if trainer.is_world_process_zero():
save_json(lowercase_ , os.path.join(training_args.output_dir , """all_results.json""" ) )
return all_metrics
def SCREAMING_SNAKE_CASE ( lowercase_ : Dict ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 653 | 0 |
'''simple docstring'''
from __future__ import annotations
from PIL import Image
# Define glider example
lowercase_ : List[str] = [
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
]
# Define blinker example
lowercase_ : List[str] = [[0, 1, 0], [0, 1, 0], [0, 1, 0]]
def SCREAMING_SNAKE_CASE ( lowercase_ : list[list[int]] ):
lowercase = []
for i in range(len(lowercase_ ) ):
lowercase = []
for j in range(len(cells[i] ) ):
# Get the number of live neighbours
lowercase = 0
if i > 0 and j > 0:
neighbour_count += cells[i - 1][j - 1]
if i > 0:
neighbour_count += cells[i - 1][j]
if i > 0 and j < len(cells[i] ) - 1:
neighbour_count += cells[i - 1][j + 1]
if j > 0:
neighbour_count += cells[i][j - 1]
if j < len(cells[i] ) - 1:
neighbour_count += cells[i][j + 1]
if i < len(lowercase_ ) - 1 and j > 0:
neighbour_count += cells[i + 1][j - 1]
if i < len(lowercase_ ) - 1:
neighbour_count += cells[i + 1][j]
if i < len(lowercase_ ) - 1 and j < len(cells[i] ) - 1:
neighbour_count += cells[i + 1][j + 1]
# Rules of the game of life (excerpt from Wikipedia):
# 1. Any live cell with two or three live neighbours survives.
# 2. Any dead cell with three live neighbours becomes a live cell.
# 3. All other live cells die in the next generation.
# Similarly, all other dead cells stay dead.
lowercase = cells[i][j] == 1
if (
(alive and 2 <= neighbour_count <= 3)
or not alive
and neighbour_count == 3
):
next_generation_row.append(1 )
else:
next_generation_row.append(0 )
next_generation.append(lowercase_ )
return next_generation
def SCREAMING_SNAKE_CASE ( lowercase_ : list[list[int]] , lowercase_ : int ):
lowercase = []
for _ in range(lowercase_ ):
# Create output image
lowercase = Image.new("""RGB""" , (len(cells[0] ), len(lowercase_ )) )
lowercase = img.load()
# Save cells to image
for x in range(len(lowercase_ ) ):
for y in range(len(cells[0] ) ):
lowercase = 255 - cells[y][x] * 255
lowercase = (colour, colour, colour)
# Save image
images.append(lowercase_ )
lowercase = new_generation(lowercase_ )
return images
if __name__ == "__main__":
lowercase_ : Tuple = generate_images(GLIDER, 16)
images[0].save('''out.gif''', save_all=True, append_images=images[1:])
| 701 |
'''simple docstring'''
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING
lowercase_ : Union[str, Any] = logging.get_logger(__name__)
@add_end_docstrings(_UpperCAmelCase )
class __UpperCamelCase (_UpperCAmelCase ):
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Dict:
'''simple docstring'''
super().__init__(*_lowerCAmelCase , **_lowerCAmelCase )
requires_backends(self , """vision""" )
self.check_model_type(
TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == """tf""" else MODEL_FOR_VISION_2_SEQ_MAPPING )
def _a ( self , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=None ) -> str:
'''simple docstring'''
lowercase = {}
lowercase = {}
if prompt is not None:
lowercase = prompt
if generate_kwargs is not None:
lowercase = generate_kwargs
if max_new_tokens is not None:
if "generate_kwargs" not in forward_kwargs:
lowercase = {}
if "max_new_tokens" in forward_kwargs["generate_kwargs"]:
raise ValueError(
"""'max_new_tokens' is defined twice, once in 'generate_kwargs' and once as a direct parameter,"""
""" please use only one""" )
lowercase = max_new_tokens
return preprocess_params, forward_kwargs, {}
def __call__( self , _lowerCAmelCase , **_lowerCAmelCase ) -> Any:
'''simple docstring'''
return super().__call__(_lowerCAmelCase , **_lowerCAmelCase )
def _a ( self , _lowerCAmelCase , _lowerCAmelCase=None ) -> List[str]:
'''simple docstring'''
lowercase = load_image(_lowerCAmelCase )
if prompt is not None:
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ):
raise ValueError(
F"""Received an invalid text input, got - {type(_lowerCAmelCase )} - but expected a single string. """
"""Note also that one single text can be provided for conditional image to text generation.""" )
lowercase = self.model.config.model_type
if model_type == "git":
lowercase = self.image_processor(images=_lowerCAmelCase , return_tensors=self.framework )
lowercase = self.tokenizer(text=_lowerCAmelCase , add_special_tokens=_lowerCAmelCase ).input_ids
lowercase = [self.tokenizer.cls_token_id] + input_ids
lowercase = torch.tensor(_lowerCAmelCase ).unsqueeze(0 )
model_inputs.update({"""input_ids""": input_ids} )
elif model_type == "pix2struct":
lowercase = self.image_processor(images=_lowerCAmelCase , header_text=_lowerCAmelCase , return_tensors=self.framework )
elif model_type != "vision-encoder-decoder":
# vision-encoder-decoder does not support conditional generation
lowercase = self.image_processor(images=_lowerCAmelCase , return_tensors=self.framework )
lowercase = self.tokenizer(_lowerCAmelCase , return_tensors=self.framework )
model_inputs.update(_lowerCAmelCase )
else:
raise ValueError(F"""Model type {model_type} does not support conditional text generation""" )
else:
lowercase = self.image_processor(images=_lowerCAmelCase , return_tensors=self.framework )
if self.model.config.model_type == "git" and prompt is None:
lowercase = None
return model_inputs
def _a ( self , _lowerCAmelCase , _lowerCAmelCase=None ) -> Union[str, Any]:
'''simple docstring'''
if (
"input_ids" in model_inputs
and isinstance(model_inputs["""input_ids"""] , _lowerCAmelCase )
and all(x is None for x in model_inputs["""input_ids"""] )
):
lowercase = None
if generate_kwargs is None:
lowercase = {}
# FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py`
# parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas
# the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name`
# in the `_prepare_model_inputs` method.
lowercase = model_inputs.pop(self.model.main_input_name )
lowercase = self.model.generate(_lowerCAmelCase , **_lowerCAmelCase , **_lowerCAmelCase )
return model_outputs
def _a ( self , _lowerCAmelCase ) -> List[str]:
'''simple docstring'''
lowercase = []
for output_ids in model_outputs:
lowercase = {
"""generated_text""": self.tokenizer.decode(
_lowerCAmelCase , skip_special_tokens=_lowerCAmelCase , )
}
records.append(_lowerCAmelCase )
return records
| 653 | 0 |
from manim import *
class __UpperCamelCase (_UpperCAmelCase ):
def _a ( self ) -> List[str]:
'''simple docstring'''
lowercase = Rectangle(height=0.5 , width=0.5 )
lowercase = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
lowercase = Rectangle(height=0.25 , width=0.25 )
lowercase = [mem.copy() for i in range(6 )]
lowercase = [mem.copy() for i in range(6 )]
lowercase = VGroup(*_lowerCAmelCase ).arrange(_lowerCAmelCase , buff=0 )
lowercase = VGroup(*_lowerCAmelCase ).arrange(_lowerCAmelCase , buff=0 )
lowercase = VGroup(_lowerCAmelCase , _lowerCAmelCase ).arrange(_lowerCAmelCase , buff=0 )
lowercase = Text("""CPU""" , font_size=24 )
lowercase = Group(_lowerCAmelCase , _lowerCAmelCase ).arrange(_lowerCAmelCase , buff=0.5 , aligned_edge=_lowerCAmelCase )
cpu.move_to([-2.5, -0.5, 0] )
self.add(_lowerCAmelCase )
lowercase = [mem.copy() for i in range(4 )]
lowercase = VGroup(*_lowerCAmelCase ).arrange(_lowerCAmelCase , buff=0 )
lowercase = Text("""GPU""" , font_size=24 )
lowercase = Group(_lowerCAmelCase , _lowerCAmelCase ).arrange(_lowerCAmelCase , buff=0.5 , aligned_edge=_lowerCAmelCase )
gpu.move_to([-1, -1, 0] )
self.add(_lowerCAmelCase )
lowercase = [mem.copy() for i in range(6 )]
lowercase = VGroup(*_lowerCAmelCase ).arrange(_lowerCAmelCase , buff=0 )
lowercase = Text("""Model""" , font_size=24 )
lowercase = Group(_lowerCAmelCase , _lowerCAmelCase ).arrange(_lowerCAmelCase , buff=0.5 , aligned_edge=_lowerCAmelCase )
model.move_to([3, -1.0, 0] )
self.add(_lowerCAmelCase )
lowercase = []
lowercase = []
for i, rect in enumerate(_lowerCAmelCase ):
lowercase = fill.copy().set_fill(_lowerCAmelCase , opacity=0.8 )
target.move_to(_lowerCAmelCase )
model_arr.append(_lowerCAmelCase )
lowercase = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(_lowerCAmelCase , opacity=0.8 )
cpu_target.move_to(cpu_left_col_base[i] )
model_cpu_arr.append(_lowerCAmelCase )
self.add(*_lowerCAmelCase , *_lowerCAmelCase )
lowercase = [meta_mem.copy() for i in range(6 )]
lowercase = [meta_mem.copy() for i in range(6 )]
lowercase = VGroup(*_lowerCAmelCase ).arrange(_lowerCAmelCase , buff=0 )
lowercase = VGroup(*_lowerCAmelCase ).arrange(_lowerCAmelCase , buff=0 )
lowercase = VGroup(_lowerCAmelCase , _lowerCAmelCase ).arrange(_lowerCAmelCase , buff=0 )
lowercase = Text("""Disk""" , font_size=24 )
lowercase = Group(_lowerCAmelCase , _lowerCAmelCase ).arrange(_lowerCAmelCase , buff=0.5 , aligned_edge=_lowerCAmelCase )
disk.move_to([-4, -1.25, 0] )
self.add(_lowerCAmelCase , _lowerCAmelCase )
lowercase = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
lowercase = MarkupText(
F"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(_lowerCAmelCase , _lowerCAmelCase )
lowercase = MarkupText(
F"""<span fgcolor='{BLUE}'>●</span> Checkpoint""" , font_size=18 , )
blue_text.next_to(_lowerCAmelCase , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(_lowerCAmelCase )
lowercase = MarkupText(
F"""Now watch as an input is passed through the model\nand how the memory is utilized and handled.""" , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(_lowerCAmelCase ) )
lowercase = Square(0.3 )
input.set_fill(_lowerCAmelCase , opacity=1.0 )
input.set_stroke(width=0.0 )
input.next_to(model_base[0] , _lowerCAmelCase , buff=0.5 )
self.play(Write(_lowerCAmelCase ) )
input.generate_target()
input.target.next_to(model_arr[0] , direction=_lowerCAmelCase , buff=0.02 )
self.play(MoveToTarget(_lowerCAmelCase ) )
self.play(FadeOut(_lowerCAmelCase ) )
lowercase = Arrow(start=_lowerCAmelCase , end=_lowerCAmelCase , color=_lowerCAmelCase , buff=0.5 )
a.next_to(model_arr[0].get_left() , _lowerCAmelCase , buff=0.2 )
model_cpu_arr[0].generate_target()
model_cpu_arr[0].target.move_to(gpu_rect[0] )
lowercase = MarkupText(
F"""As the input reaches a layer, the hook triggers\nand weights are moved from the CPU\nto the GPU and back.""" , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(_lowerCAmelCase , run_time=3 ) )
lowercase = {"""run_time""": 1, """fade_in""": True, """fade_out""": True, """buff""": 0.02}
self.play(
Write(_lowerCAmelCase ) , Circumscribe(model_arr[0] , color=_lowerCAmelCase , **_lowerCAmelCase ) , Circumscribe(model_cpu_arr[0] , color=_lowerCAmelCase , **_lowerCAmelCase ) , Circumscribe(gpu_rect[0] , color=_lowerCAmelCase , **_lowerCAmelCase ) , )
self.play(MoveToTarget(model_cpu_arr[0] ) )
lowercase = a.copy()
for i in range(6 ):
a_c.next_to(model_arr[i].get_right() + 0.02 , _lowerCAmelCase , buff=0.2 )
input.generate_target()
input.target.move_to(model_arr[i].get_right() + 0.02 )
lowercase = AnimationGroup(
FadeOut(_lowerCAmelCase , run_time=0.5 ) , MoveToTarget(_lowerCAmelCase , run_time=0.5 ) , FadeIn(_lowerCAmelCase , run_time=0.5 ) , lag_ratio=0.2 )
self.play(_lowerCAmelCase )
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[i] )
if i < 5:
model_cpu_arr[i + 1].generate_target()
model_cpu_arr[i + 1].target.move_to(gpu_rect[0] )
if i >= 1:
lowercase = 0.7
self.play(
Circumscribe(model_arr[i] , **_lowerCAmelCase ) , Circumscribe(cpu_left_col_base[i] , **_lowerCAmelCase ) , Circumscribe(cpu_left_col_base[i + 1] , color=_lowerCAmelCase , **_lowerCAmelCase ) , Circumscribe(gpu_rect[0] , color=_lowerCAmelCase , **_lowerCAmelCase ) , Circumscribe(model_arr[i + 1] , color=_lowerCAmelCase , **_lowerCAmelCase ) , )
if i < 1:
self.play(
MoveToTarget(model_cpu_arr[i] ) , MoveToTarget(model_cpu_arr[i + 1] ) , )
else:
self.play(
MoveToTarget(model_cpu_arr[i] , run_time=0.7 ) , MoveToTarget(model_cpu_arr[i + 1] , run_time=0.7 ) , )
else:
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[-1] )
input.generate_target()
input.target.next_to(model_arr[-1].get_right() , RIGHT + 0.02 , buff=0.2 )
self.play(
Circumscribe(model_arr[-1] , color=_lowerCAmelCase , **_lowerCAmelCase ) , Circumscribe(cpu_left_col_base[-1] , color=_lowerCAmelCase , **_lowerCAmelCase ) , Circumscribe(gpu_rect[0] , color=_lowerCAmelCase , **_lowerCAmelCase ) , )
self.play(MoveToTarget(model_cpu_arr[i] ) )
lowercase = a_c
lowercase = a_c.copy()
input.generate_target()
input.target.next_to(model_base[-1] , RIGHT + 0.02 , buff=0.5 )
self.play(
FadeOut(_lowerCAmelCase ) , FadeOut(_lowerCAmelCase , run_time=0.5 ) , )
lowercase = MarkupText(F"""Inference on a model too large for GPU memory\nis successfully completed.""" , font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(_lowerCAmelCase , run_time=3 ) , MoveToTarget(_lowerCAmelCase ) )
self.wait()
| 702 |
'''simple docstring'''
from ... import PretrainedConfig
lowercase_ : int = {
'''sijunhe/nezha-cn-base''': '''https://huggingface.co/sijunhe/nezha-cn-base/resolve/main/config.json''',
}
class __UpperCamelCase (_UpperCAmelCase ):
__A = NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP
__A = '''nezha'''
def __init__( self , _lowerCAmelCase=2_1128 , _lowerCAmelCase=768 , _lowerCAmelCase=12 , _lowerCAmelCase=12 , _lowerCAmelCase=3072 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=512 , _lowerCAmelCase=64 , _lowerCAmelCase=2 , _lowerCAmelCase=0.02 , _lowerCAmelCase=1E-12 , _lowerCAmelCase=0.1 , _lowerCAmelCase=0 , _lowerCAmelCase=2 , _lowerCAmelCase=3 , _lowerCAmelCase=True , **_lowerCAmelCase , ) -> int:
'''simple docstring'''
super().__init__(pad_token_id=_lowerCAmelCase , bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , **_lowerCAmelCase )
lowercase = vocab_size
lowercase = hidden_size
lowercase = num_hidden_layers
lowercase = num_attention_heads
lowercase = hidden_act
lowercase = intermediate_size
lowercase = hidden_dropout_prob
lowercase = attention_probs_dropout_prob
lowercase = max_position_embeddings
lowercase = max_relative_position
lowercase = type_vocab_size
lowercase = initializer_range
lowercase = layer_norm_eps
lowercase = classifier_dropout
lowercase = use_cache
| 653 | 0 |
'''simple docstring'''
import numpy as np
from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey
def SCREAMING_SNAKE_CASE ( lowercase_ : int , lowercase_ : int , lowercase_ : int , lowercase_ : int , lowercase_ : int , lowercase_ : int ):
# prepare kernel
# the kernel size have to be odd
if (ksize % 2) == 0:
lowercase = ksize + 1
lowercase = np.zeros((ksize, ksize) , dtype=np.floataa )
# each value
for y in range(lowercase_ ):
for x in range(lowercase_ ):
# distance from center
lowercase = x - ksize // 2
lowercase = y - ksize // 2
# degree to radiant
lowercase = theta / 180 * np.pi
lowercase = np.cos(_theta )
lowercase = np.sin(_theta )
# get kernel x
lowercase = cos_theta * px + sin_theta * py
# get kernel y
lowercase = -sin_theta * px + cos_theta * py
# fill kernel
lowercase = np.exp(
-(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) ) * np.cos(2 * np.pi * _x / lambd + psi )
return gabor
if __name__ == "__main__":
import doctest
doctest.testmod()
# read original image
lowercase_ : List[Any] = imread('''../image_data/lena.jpg''')
# turn image in gray scale value
lowercase_ : str = cvtColor(img, COLOR_BGR2GRAY)
# Apply multiple Kernel to detect edges
lowercase_ : Union[str, Any] = np.zeros(gray.shape[:2])
for theta in [0, 30, 60, 90, 120, 150]:
lowercase_ : Dict = gabor_filter_kernel(10, 8, theta, 10, 0, 0)
out += filteraD(gray, CV_8UC3, kernel_aa)
lowercase_ : Tuple = out / out.max() * 255
lowercase_ : Dict = out.astype(np.uinta)
imshow('''Original''', gray)
imshow('''Gabor filter with 20x20 mask and 6 directions''', out)
waitKey(0)
| 703 |
'''simple docstring'''
import json
import logging
import os
import socket
import git
import numpy as np
import torch
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - PID: %(process)d - %(message)s''',
datefmt='''%m/%d/%Y %H:%M:%S''',
level=logging.INFO,
)
lowercase_ : Tuple = logging.getLogger(__name__)
def SCREAMING_SNAKE_CASE ( lowercase_ : str ):
lowercase = git.Repo(search_parent_directories=lowercase_ )
lowercase = {
"""repo_id""": str(lowercase_ ),
"""repo_sha""": str(repo.head.object.hexsha ),
"""repo_branch""": str(repo.active_branch ),
}
with open(os.path.join(lowercase_ , """git_log.json""" ) , """w""" ) as f:
json.dump(lowercase_ , lowercase_ , indent=4 )
def SCREAMING_SNAKE_CASE ( lowercase_ : str ):
if params.n_gpu <= 0:
lowercase = 0
lowercase = -1
lowercase = True
lowercase = False
return
assert torch.cuda.is_available()
logger.info("""Initializing GPUs""" )
if params.n_gpu > 1:
assert params.local_rank != -1
lowercase = int(os.environ["""WORLD_SIZE"""] )
lowercase = int(os.environ["""N_GPU_NODE"""] )
lowercase = int(os.environ["""RANK"""] )
# number of nodes / node ID
lowercase = params.world_size // params.n_gpu_per_node
lowercase = params.global_rank // params.n_gpu_per_node
lowercase = True
assert params.n_nodes == int(os.environ["""N_NODES"""] )
assert params.node_id == int(os.environ["""NODE_RANK"""] )
# local job (single GPU)
else:
assert params.local_rank == -1
lowercase = 1
lowercase = 0
lowercase = 0
lowercase = 0
lowercase = 1
lowercase = 1
lowercase = False
# sanity checks
assert params.n_nodes >= 1
assert 0 <= params.node_id < params.n_nodes
assert 0 <= params.local_rank <= params.global_rank < params.world_size
assert params.world_size == params.n_nodes * params.n_gpu_per_node
# define whether this is the master process / if we are in multi-node distributed mode
lowercase = params.node_id == 0 and params.local_rank == 0
lowercase = params.n_nodes > 1
# summary
lowercase = F"""--- Global rank: {params.global_rank} - """
logger.info(PREFIX + """Number of nodes: %i""" % params.n_nodes )
logger.info(PREFIX + """Node ID : %i""" % params.node_id )
logger.info(PREFIX + """Local rank : %i""" % params.local_rank )
logger.info(PREFIX + """World size : %i""" % params.world_size )
logger.info(PREFIX + """GPUs per node : %i""" % params.n_gpu_per_node )
logger.info(PREFIX + """Master : %s""" % str(params.is_master ) )
logger.info(PREFIX + """Multi-node : %s""" % str(params.multi_node ) )
logger.info(PREFIX + """Multi-GPU : %s""" % str(params.multi_gpu ) )
logger.info(PREFIX + """Hostname : %s""" % socket.gethostname() )
# set GPU device
torch.cuda.set_device(params.local_rank )
# initialize multi-GPU
if params.multi_gpu:
logger.info("""Initializing PyTorch distributed""" )
torch.distributed.init_process_group(
init_method="""env://""" , backend="""nccl""" , )
def SCREAMING_SNAKE_CASE ( lowercase_ : Optional[Any] ):
np.random.seed(args.seed )
torch.manual_seed(args.seed )
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed )
| 653 | 0 |
'''simple docstring'''
from typing import Callable, List, Optional, Union
import PIL
import torch
from transformers import (
CLIPImageProcessor,
CLIPSegForImageSegmentation,
CLIPSegProcessor,
CLIPTextModel,
CLIPTokenizer,
)
from diffusers import DiffusionPipeline
from diffusers.configuration_utils import FrozenDict
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionInpaintPipeline
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import deprecate, is_accelerate_available, logging
lowercase_ : str = logging.get_logger(__name__) # pylint: disable=invalid-name
class __UpperCamelCase (_UpperCAmelCase ):
def __init__( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , ) -> Dict:
'''simple docstring'''
super().__init__()
if hasattr(scheduler.config , """steps_offset""" ) and scheduler.config.steps_offset != 1:
lowercase = (
F"""The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`"""
F""" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure """
"""to update the config accordingly as leaving `steps_offset` might led to incorrect results"""
""" in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,"""
""" it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`"""
""" file"""
)
deprecate("""steps_offset!=1""" , """1.0.0""" , _lowerCAmelCase , standard_warn=_lowerCAmelCase )
lowercase = dict(scheduler.config )
lowercase = 1
lowercase = FrozenDict(_lowerCAmelCase )
if hasattr(scheduler.config , """skip_prk_steps""" ) and scheduler.config.skip_prk_steps is False:
lowercase = (
F"""The configuration file of this scheduler: {scheduler} has not set the configuration"""
""" `skip_prk_steps`. `skip_prk_steps` should be set to True in the configuration file. Please make"""
""" sure to update the config accordingly as not setting `skip_prk_steps` in the config might lead to"""
""" incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face"""
""" Hub, it would be very nice if you could open a Pull request for the"""
""" `scheduler/scheduler_config.json` file"""
)
deprecate("""skip_prk_steps not set""" , """1.0.0""" , _lowerCAmelCase , standard_warn=_lowerCAmelCase )
lowercase = dict(scheduler.config )
lowercase = True
lowercase = FrozenDict(_lowerCAmelCase )
if safety_checker is None:
logger.warning(
F"""You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"""
""" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"""
""" results in services or applications open to the public. Both the diffusers team and Hugging Face"""
""" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"""
""" it only for use-cases that involve analyzing network behavior or auditing its results. For more"""
""" information, please have a look at https://github.com/huggingface/diffusers/pull/254 .""" )
self.register_modules(
segmentation_model=_lowerCAmelCase , segmentation_processor=_lowerCAmelCase , vae=_lowerCAmelCase , text_encoder=_lowerCAmelCase , tokenizer=_lowerCAmelCase , unet=_lowerCAmelCase , scheduler=_lowerCAmelCase , safety_checker=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , )
def _a ( self , _lowerCAmelCase = "auto" ) -> Optional[int]:
'''simple docstring'''
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
lowercase = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(_lowerCAmelCase )
def _a ( self ) -> List[str]:
'''simple docstring'''
self.enable_attention_slicing(_lowerCAmelCase )
def _a ( self ) -> Tuple:
'''simple docstring'''
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("""Please install accelerate via `pip install accelerate`""" )
lowercase = torch.device("""cuda""" )
for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.safety_checker]:
if cpu_offloaded_model is not None:
cpu_offload(_lowerCAmelCase , _lowerCAmelCase )
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def _a ( self ) -> Optional[Any]:
'''simple docstring'''
if self.device != torch.device("""meta""" ) or not hasattr(self.unet , """_hf_hook""" ):
return self.device
for module in self.unet.modules():
if (
hasattr(_lowerCAmelCase , """_hf_hook""" )
and hasattr(module._hf_hook , """execution_device""" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
def __call__( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = 512 , _lowerCAmelCase = 512 , _lowerCAmelCase = 50 , _lowerCAmelCase = 7.5 , _lowerCAmelCase = None , _lowerCAmelCase = 1 , _lowerCAmelCase = 0.0 , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = "pil" , _lowerCAmelCase = True , _lowerCAmelCase = None , _lowerCAmelCase = 1 , **_lowerCAmelCase , ) -> Dict:
'''simple docstring'''
lowercase = self.segmentation_processor(
text=[text] , images=[image] , padding="""max_length""" , return_tensors="""pt""" ).to(self.device )
lowercase = self.segmentation_model(**_lowerCAmelCase )
lowercase = torch.sigmoid(outputs.logits ).cpu().detach().unsqueeze(-1 ).numpy()
lowercase = self.numpy_to_pil(_lowerCAmelCase )[0].resize(image.size )
# Run inpainting pipeline with the generated mask
lowercase = StableDiffusionInpaintPipeline(
vae=self.vae , text_encoder=self.text_encoder , tokenizer=self.tokenizer , unet=self.unet , scheduler=self.scheduler , safety_checker=self.safety_checker , feature_extractor=self.feature_extractor , )
return inpainting_pipeline(
prompt=_lowerCAmelCase , image=_lowerCAmelCase , mask_image=_lowerCAmelCase , height=_lowerCAmelCase , width=_lowerCAmelCase , num_inference_steps=_lowerCAmelCase , guidance_scale=_lowerCAmelCase , negative_prompt=_lowerCAmelCase , num_images_per_prompt=_lowerCAmelCase , eta=_lowerCAmelCase , generator=_lowerCAmelCase , latents=_lowerCAmelCase , output_type=_lowerCAmelCase , return_dict=_lowerCAmelCase , callback=_lowerCAmelCase , callback_steps=_lowerCAmelCase , )
| 704 |
'''simple docstring'''
from __future__ import annotations
import os
from typing import Any
import requests
lowercase_ : List[str] = '''https://api.github.com'''
# https://docs.github.com/en/free-pro-team@latest/rest/reference/users#get-the-authenticated-user
lowercase_ : Any = BASE_URL + '''/user'''
# https://github.com/settings/tokens
lowercase_ : Union[str, Any] = os.environ.get('''USER_TOKEN''', '''''')
def SCREAMING_SNAKE_CASE ( lowercase_ : str ):
lowercase = {
"""Authorization""": F"""token {auth_token}""",
"""Accept""": """application/vnd.github.v3+json""",
}
return requests.get(lowercase_ , headers=lowercase_ ).json()
if __name__ == "__main__": # pragma: no cover
if USER_TOKEN:
for key, value in fetch_github_info(USER_TOKEN).items():
print(f'''{key}: {value}''')
else:
raise ValueError('''\'USER_TOKEN\' field cannot be empty.''')
| 653 | 0 |
from __future__ import annotations
def SCREAMING_SNAKE_CASE ( lowercase_ : list[int] ):
return len(set(lowercase_ ) ) == len(lowercase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 705 |
'''simple docstring'''
import logging
import os
import sys
import warnings
from dataclasses import dataclass, field
from random import randint
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import DatasetDict, load_dataset
import transformers
from transformers import (
AutoConfig,
AutoFeatureExtractor,
AutoModelForAudioClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
lowercase_ : Union[str, Any] = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.31.0''')
require_version('''datasets>=1.14.0''', '''To fix: pip install -r examples/pytorch/audio-classification/requirements.txt''')
def SCREAMING_SNAKE_CASE ( lowercase_ : np.ndarray , lowercase_ : float , lowercase_ : int = 1_6000 ):
lowercase = int(round(sample_rate * max_length ) )
if len(lowercase_ ) <= sample_length:
return wav
lowercase = randint(0 , len(lowercase_ ) - sample_length - 1 )
return wav[random_offset : random_offset + sample_length]
@dataclass
class __UpperCamelCase :
__A = field(default=_UpperCAmelCase , metadata={'''help''': '''Name of a dataset from the datasets package'''} )
__A = field(
default=_UpperCAmelCase , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} )
__A = field(
default=_UpperCAmelCase , metadata={'''help''': '''A file containing the training audio paths and labels.'''} )
__A = field(
default=_UpperCAmelCase , metadata={'''help''': '''A file containing the validation audio paths and labels.'''} )
__A = field(
default='''train''' , metadata={
'''help''': '''The name of the training data set split to use (via the datasets library). Defaults to \'train\''''
} , )
__A = field(
default='''validation''' , metadata={
'''help''': (
'''The name of the training data set split to use (via the datasets library). Defaults to \'validation\''''
)
} , )
__A = field(
default='''audio''' , metadata={'''help''': '''The name of the dataset column containing the audio data. Defaults to \'audio\''''} , )
__A = field(
default='''label''' , metadata={'''help''': '''The name of the dataset column containing the labels. Defaults to \'label\''''} )
__A = field(
default=_UpperCAmelCase , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of training examples to this '''
'''value if set.'''
)
} , )
__A = field(
default=_UpperCAmelCase , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of evaluation examples to this '''
'''value if set.'''
)
} , )
__A = field(
default=20 , metadata={'''help''': '''Audio clips will be randomly cut to this length during training if the value is set.'''} , )
@dataclass
class __UpperCamelCase :
__A = field(
default='''facebook/wav2vec2-base''' , metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} , )
__A = field(
default=_UpperCAmelCase , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
__A = field(
default=_UpperCAmelCase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from the Hub'''} )
__A = field(
default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , )
__A = field(
default=_UpperCAmelCase , metadata={'''help''': '''Name or path of preprocessor config.'''} )
__A = field(
default=_UpperCAmelCase , metadata={'''help''': '''Whether to freeze the feature encoder layers of the model.'''} )
__A = field(
default=_UpperCAmelCase , metadata={'''help''': '''Whether to generate an attention mask in the feature extractor.'''} )
__A = field(
default=_UpperCAmelCase , metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} , )
__A = field(
default=_UpperCAmelCase , metadata={'''help''': '''Whether to freeze the feature extractor layers of the model.'''} )
__A = field(
default=_UpperCAmelCase , metadata={'''help''': '''Will enable to load a pretrained model whose head dimensions are different.'''} , )
def _a ( self ) -> List[Any]:
'''simple docstring'''
if not self.freeze_feature_extractor and self.freeze_feature_encoder:
warnings.warn(
"""The argument `--freeze_feature_extractor` is deprecated and """
"""will be removed in a future version. Use `--freeze_feature_encoder`"""
"""instead. Setting `freeze_feature_encoder==True`.""" , _lowerCAmelCase , )
if self.freeze_feature_extractor and not self.freeze_feature_encoder:
raise ValueError(
"""The argument `--freeze_feature_extractor` is deprecated and """
"""should not be used in combination with `--freeze_feature_encoder`."""
"""Only make use of `--freeze_feature_encoder`.""" )
def SCREAMING_SNAKE_CASE ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowercase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowercase , lowercase , lowercase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowercase , lowercase , lowercase = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("""run_audio_classification""" , lowercase_ , lowercase_ )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
lowercase = training_args.get_process_log_level()
logger.setLevel(lowercase_ )
transformers.utils.logging.set_verbosity(lowercase_ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu} """
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Set seed before initializing model.
set_seed(training_args.seed )
# Detecting last checkpoint.
lowercase = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowercase = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
"""Use --overwrite_output_dir to train from scratch.""" )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Initialize our dataset and prepare it for the audio classification task.
lowercase = DatasetDict()
lowercase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.train_split_name , use_auth_token=True if model_args.use_auth_token else None , )
lowercase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.eval_split_name , use_auth_token=True if model_args.use_auth_token else None , )
if data_args.audio_column_name not in raw_datasets["train"].column_names:
raise ValueError(
F"""--audio_column_name {data_args.audio_column_name} not found in dataset '{data_args.dataset_name}'. """
"""Make sure to set `--audio_column_name` to the correct audio column - one of """
F"""{', '.join(raw_datasets['train'].column_names )}.""" )
if data_args.label_column_name not in raw_datasets["train"].column_names:
raise ValueError(
F"""--label_column_name {data_args.label_column_name} not found in dataset '{data_args.dataset_name}'. """
"""Make sure to set `--label_column_name` to the correct text column - one of """
F"""{', '.join(raw_datasets['train'].column_names )}.""" )
# Setting `return_attention_mask=True` is the way to get a correctly masked mean-pooling over
# transformer outputs in the classifier, but it doesn't always lead to better accuracy
lowercase = AutoFeatureExtractor.from_pretrained(
model_args.feature_extractor_name or model_args.model_name_or_path , return_attention_mask=model_args.attention_mask , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# `datasets` takes care of automatically loading and resampling the audio,
# so we just need to set the correct target sampling rate.
lowercase = raw_datasets.cast_column(
data_args.audio_column_name , datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate ) )
lowercase = feature_extractor.model_input_names[0]
def train_transforms(lowercase_ : int ):
lowercase = []
for audio in batch[data_args.audio_column_name]:
lowercase = random_subsample(
audio["""array"""] , max_length=data_args.max_length_seconds , sample_rate=feature_extractor.sampling_rate )
subsampled_wavs.append(lowercase_ )
lowercase = feature_extractor(lowercase_ , sampling_rate=feature_extractor.sampling_rate )
lowercase = {model_input_name: inputs.get(lowercase_ )}
lowercase = list(batch[data_args.label_column_name] )
return output_batch
def val_transforms(lowercase_ : Dict ):
lowercase = [audio["""array"""] for audio in batch[data_args.audio_column_name]]
lowercase = feature_extractor(lowercase_ , sampling_rate=feature_extractor.sampling_rate )
lowercase = {model_input_name: inputs.get(lowercase_ )}
lowercase = list(batch[data_args.label_column_name] )
return output_batch
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
lowercase = raw_datasets["""train"""].features[data_args.label_column_name].names
lowercase , lowercase = {}, {}
for i, label in enumerate(lowercase_ ):
lowercase = str(lowercase_ )
lowercase = label
# Load the accuracy metric from the datasets package
lowercase = evaluate.load("""accuracy""" )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with
# `predictions` and `label_ids` fields) and has to return a dictionary string to float.
def compute_metrics(lowercase_ : Tuple ):
lowercase = np.argmax(eval_pred.predictions , axis=1 )
return metric.compute(predictions=lowercase_ , references=eval_pred.label_ids )
lowercase = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path , num_labels=len(lowercase_ ) , labelaid=lowercase_ , idalabel=lowercase_ , finetuning_task="""audio-classification""" , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
lowercase = AutoModelForAudioClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=lowercase_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
# freeze the convolutional waveform encoder
if model_args.freeze_feature_encoder:
model.freeze_feature_encoder()
if training_args.do_train:
if data_args.max_train_samples is not None:
lowercase = (
raw_datasets["""train"""].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
raw_datasets["train"].set_transform(lowercase_ , output_all_columns=lowercase_ )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
lowercase = (
raw_datasets["""eval"""].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
raw_datasets["eval"].set_transform(lowercase_ , output_all_columns=lowercase_ )
# Initialize our trainer
lowercase = Trainer(
model=lowercase_ , args=lowercase_ , train_dataset=raw_datasets["""train"""] if training_args.do_train else None , eval_dataset=raw_datasets["""eval"""] if training_args.do_eval else None , compute_metrics=lowercase_ , tokenizer=lowercase_ , )
# Training
if training_args.do_train:
lowercase = None
if training_args.resume_from_checkpoint is not None:
lowercase = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowercase = last_checkpoint
lowercase = trainer.train(resume_from_checkpoint=lowercase_ )
trainer.save_model()
trainer.log_metrics("""train""" , train_result.metrics )
trainer.save_metrics("""train""" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
lowercase = trainer.evaluate()
trainer.log_metrics("""eval""" , lowercase_ )
trainer.save_metrics("""eval""" , lowercase_ )
# Write model card and (optionally) push to hub
lowercase = {
"""finetuned_from""": model_args.model_name_or_path,
"""tasks""": """audio-classification""",
"""dataset""": data_args.dataset_name,
"""tags""": ["""audio-classification"""],
}
if training_args.push_to_hub:
trainer.push_to_hub(**lowercase_ )
else:
trainer.create_model_card(**lowercase_ )
if __name__ == "__main__":
main()
| 653 | 0 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_camembert import CamembertTokenizer
else:
lowercase_ : int = None
lowercase_ : Union[str, Any] = logging.get_logger(__name__)
lowercase_ : Optional[Any] = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''}
lowercase_ : List[Any] = {
'''vocab_file''': {
'''camembert-base''': '''https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model''',
},
'''tokenizer_file''': {
'''camembert-base''': '''https://huggingface.co/camembert-base/resolve/main/tokenizer.json''',
},
}
lowercase_ : Dict = {
'''camembert-base''': 512,
}
lowercase_ : Union[str, Any] = '''▁'''
class __UpperCamelCase (_UpperCAmelCase ):
__A = VOCAB_FILES_NAMES
__A = PRETRAINED_VOCAB_FILES_MAP
__A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__A = ['''input_ids''', '''attention_mask''']
__A = CamembertTokenizer
def __init__( self , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase="<s>" , _lowerCAmelCase="</s>" , _lowerCAmelCase="</s>" , _lowerCAmelCase="<s>" , _lowerCAmelCase="<unk>" , _lowerCAmelCase="<pad>" , _lowerCAmelCase="<mask>" , _lowerCAmelCase=["<s>NOTUSED", "</s>NOTUSED"] , **_lowerCAmelCase , ) -> Any:
lowercase = AddedToken(_lowerCAmelCase , lstrip=_lowerCAmelCase , rstrip=_lowerCAmelCase ) if isinstance(_lowerCAmelCase , _lowerCAmelCase ) else mask_token
super().__init__(
_lowerCAmelCase , tokenizer_file=_lowerCAmelCase , bos_token=_lowerCAmelCase , eos_token=_lowerCAmelCase , sep_token=_lowerCAmelCase , cls_token=_lowerCAmelCase , unk_token=_lowerCAmelCase , pad_token=_lowerCAmelCase , mask_token=_lowerCAmelCase , additional_special_tokens=_lowerCAmelCase , **_lowerCAmelCase , )
lowercase = vocab_file
lowercase = False if not self.vocab_file else True
def _a ( self , _lowerCAmelCase , _lowerCAmelCase = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowercase = [self.cls_token_id]
lowercase = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _a ( self , _lowerCAmelCase , _lowerCAmelCase = None ) -> List[int]:
lowercase = [self.sep_token_id]
lowercase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _a ( self , _lowerCAmelCase , _lowerCAmelCase = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(_lowerCAmelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowercase = os.path.join(
_lowerCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCAmelCase ):
copyfile(self.vocab_file , _lowerCAmelCase )
return (out_vocab_file,)
| 706 |
'''simple docstring'''
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_tf_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_tf_available():
import tensorflow as tf
lowercase_ : Union[str, Any] = logging.get_logger(__name__)
@dataclass
class __UpperCamelCase (_UpperCAmelCase ):
__A = [
'''no_inference''',
'''no_cuda''',
'''no_tpu''',
'''no_speed''',
'''no_memory''',
'''no_env_print''',
'''no_multi_process''',
]
def __init__( self , **_lowerCAmelCase ) -> Optional[int]:
'''simple docstring'''
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
lowercase = deprecated_arg[3:]
lowercase = not kwargs.pop(_lowerCAmelCase )
logger.warning(
F"""{deprecated_arg} is depreciated. Please use --no-{positive_arg} or"""
F""" {positive_arg}={kwargs[positive_arg]}""" )
lowercase = kwargs.pop("""tpu_name""" , self.tpu_name )
lowercase = kwargs.pop("""device_idx""" , self.device_idx )
lowercase = kwargs.pop("""eager_mode""" , self.eager_mode )
lowercase = kwargs.pop("""use_xla""" , self.use_xla )
super().__init__(**_lowerCAmelCase )
__A = field(
default=_UpperCAmelCase , metadata={'''help''': '''Name of TPU'''} , )
__A = field(
default=0 , metadata={'''help''': '''CPU / GPU device index. Defaults to 0.'''} , )
__A = field(default=_UpperCAmelCase , metadata={'''help''': '''Benchmark models in eager model.'''} )
__A = field(
default=_UpperCAmelCase , metadata={
'''help''': '''Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`.'''
} , )
@cached_property
def _a ( self ) -> Tuple["tf.distribute.cluster_resolver.TPUClusterResolver"]:
'''simple docstring'''
requires_backends(self , ["""tf"""] )
lowercase = None
if self.tpu:
try:
if self.tpu_name:
lowercase = tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name )
else:
lowercase = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
lowercase = None
return tpu
@cached_property
def _a ( self ) -> Tuple["tf.distribute.Strategy", "tf.distribute.cluster_resolver.TPUClusterResolver"]:
'''simple docstring'''
requires_backends(self , ["""tf"""] )
if self.is_tpu:
tf.config.experimental_connect_to_cluster(self._setup_tpu )
tf.tpu.experimental.initialize_tpu_system(self._setup_tpu )
lowercase = tf.distribute.TPUStrategy(self._setup_tpu )
else:
# currently no multi gpu is allowed
if self.is_gpu:
# TODO: Currently only single GPU is supported
tf.config.set_visible_devices(self.gpu_list[self.device_idx] , """GPU""" )
lowercase = tf.distribute.OneDeviceStrategy(device=F"""/gpu:{self.device_idx}""" )
else:
tf.config.set_visible_devices([] , """GPU""" ) # disable GPU
lowercase = tf.distribute.OneDeviceStrategy(device=F"""/cpu:{self.device_idx}""" )
return strategy
@property
def _a ( self ) -> bool:
'''simple docstring'''
requires_backends(self , ["""tf"""] )
return self._setup_tpu is not None
@property
def _a ( self ) -> "tf.distribute.Strategy":
'''simple docstring'''
requires_backends(self , ["""tf"""] )
return self._setup_strategy
@property
def _a ( self ) -> Tuple:
'''simple docstring'''
requires_backends(self , ["""tf"""] )
return tf.config.list_physical_devices("""GPU""" )
@property
def _a ( self ) -> int:
'''simple docstring'''
requires_backends(self , ["""tf"""] )
if self.cuda:
return len(self.gpu_list )
return 0
@property
def _a ( self ) -> bool:
'''simple docstring'''
return self.n_gpu > 0
| 653 | 0 |
'''simple docstring'''
import copy
import unittest
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
)
from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class __UpperCamelCase :
def __init__( self , _lowerCAmelCase , _lowerCAmelCase=2 , _lowerCAmelCase=3 , _lowerCAmelCase=4 , _lowerCAmelCase=2 , _lowerCAmelCase=7 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=99 , _lowerCAmelCase=36 , _lowerCAmelCase=3 , _lowerCAmelCase=4 , _lowerCAmelCase=37 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=512 , _lowerCAmelCase=16 , _lowerCAmelCase=2 , _lowerCAmelCase=0.02 , _lowerCAmelCase=6 , _lowerCAmelCase=6 , _lowerCAmelCase=3 , _lowerCAmelCase=4 , _lowerCAmelCase=None , _lowerCAmelCase=1000 , ) -> Dict:
'''simple docstring'''
lowercase = parent
lowercase = batch_size
lowercase = num_channels
lowercase = image_size
lowercase = patch_size
lowercase = text_seq_length
lowercase = is_training
lowercase = use_input_mask
lowercase = use_token_type_ids
lowercase = use_labels
lowercase = vocab_size
lowercase = hidden_size
lowercase = num_hidden_layers
lowercase = num_attention_heads
lowercase = intermediate_size
lowercase = hidden_act
lowercase = hidden_dropout_prob
lowercase = attention_probs_dropout_prob
lowercase = max_position_embeddings
lowercase = type_vocab_size
lowercase = type_sequence_label_size
lowercase = initializer_range
lowercase = coordinate_size
lowercase = shape_size
lowercase = num_labels
lowercase = num_choices
lowercase = scope
lowercase = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
lowercase = text_seq_length
lowercase = (image_size // patch_size) ** 2 + 1
lowercase = self.text_seq_length + self.image_seq_length
def _a ( self ) -> int:
'''simple docstring'''
lowercase = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
lowercase = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
lowercase = bbox[i, j, 3]
lowercase = bbox[i, j, 1]
lowercase = t
if bbox[i, j, 2] < bbox[i, j, 0]:
lowercase = bbox[i, j, 2]
lowercase = bbox[i, j, 0]
lowercase = t
lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase = None
if self.use_input_mask:
lowercase = random_attention_mask([self.batch_size, self.text_seq_length] )
lowercase = None
if self.use_token_type_ids:
lowercase = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
lowercase = None
lowercase = None
if self.use_labels:
lowercase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
lowercase = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def _a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> List[str]:
'''simple docstring'''
lowercase = LayoutLMvaModel(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
# text + image
lowercase = model(_lowerCAmelCase , pixel_values=_lowerCAmelCase )
lowercase = model(
_lowerCAmelCase , bbox=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase )
lowercase = model(_lowerCAmelCase , bbox=_lowerCAmelCase , pixel_values=_lowerCAmelCase , token_type_ids=_lowerCAmelCase )
lowercase = model(_lowerCAmelCase , bbox=_lowerCAmelCase , pixel_values=_lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
lowercase = model(_lowerCAmelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
lowercase = model(pixel_values=_lowerCAmelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def _a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[int]:
'''simple docstring'''
lowercase = self.num_labels
lowercase = LayoutLMvaForSequenceClassification(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
lowercase = model(
_lowerCAmelCase , bbox=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> int:
'''simple docstring'''
lowercase = self.num_labels
lowercase = LayoutLMvaForTokenClassification(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
lowercase = model(
_lowerCAmelCase , bbox=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def _a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Any:
'''simple docstring'''
lowercase = LayoutLMvaForQuestionAnswering(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
lowercase = model(
_lowerCAmelCase , bbox=_lowerCAmelCase , pixel_values=_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , start_positions=_lowerCAmelCase , end_positions=_lowerCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _a ( self ) -> int:
'''simple docstring'''
lowercase = self.prepare_config_and_inputs()
(
(
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) ,
) = config_and_inputs
lowercase = {
"""input_ids""": input_ids,
"""bbox""": bbox,
"""pixel_values""": pixel_values,
"""token_type_ids""": token_type_ids,
"""attention_mask""": input_mask,
}
return config, inputs_dict
@require_torch
class __UpperCamelCase (_UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
__A = False
__A = False
__A = False
__A = (
(
LayoutLMvaModel,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaForQuestionAnswering,
)
if is_torch_available()
else ()
)
__A = (
{'''document-question-answering''': LayoutLMvaForQuestionAnswering, '''feature-extraction''': LayoutLMvaModel}
if is_torch_available()
else {}
)
def _a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> List[str]:
'''simple docstring'''
return True
def _a ( self ) -> int:
'''simple docstring'''
lowercase = LayoutLMvaModelTester(self )
lowercase = ConfigTester(self , config_class=_lowerCAmelCase , hidden_size=37 )
def _a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=False ) -> Optional[Any]:
'''simple docstring'''
lowercase = copy.deepcopy(_lowerCAmelCase )
if model_class in get_values(_lowerCAmelCase ):
lowercase = {
k: v.unsqueeze(1 ).expand(-1 , self.model_tester.num_choices , -1 ).contiguous()
if isinstance(_lowerCAmelCase , torch.Tensor ) and v.ndim > 1
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(_lowerCAmelCase ):
lowercase = torch.ones(self.model_tester.batch_size , dtype=torch.long , device=_lowerCAmelCase )
elif model_class in get_values(_lowerCAmelCase ):
lowercase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_lowerCAmelCase )
lowercase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_lowerCAmelCase )
elif model_class in [
*get_values(_lowerCAmelCase ),
]:
lowercase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_lowerCAmelCase )
elif model_class in [
*get_values(_lowerCAmelCase ),
]:
lowercase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=torch.long , device=_lowerCAmelCase , )
return inputs_dict
def _a ( self ) -> Any:
'''simple docstring'''
self.config_tester.run_common_tests()
def _a ( self ) -> str:
'''simple docstring'''
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCAmelCase )
def _a ( self ) -> Dict:
'''simple docstring'''
lowercase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowercase = type
self.model_tester.create_and_check_model(*_lowerCAmelCase )
def _a ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_lowerCAmelCase )
def _a ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_lowerCAmelCase )
def _a ( self ) -> List[Any]:
'''simple docstring'''
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_lowerCAmelCase )
@slow
def _a ( self ) -> Any:
'''simple docstring'''
for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase = LayoutLMvaModel.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( ):
lowercase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
class __UpperCamelCase (unittest.TestCase ):
@cached_property
def _a ( self ) -> int:
'''simple docstring'''
return LayoutLMvaImageProcessor(apply_ocr=_lowerCAmelCase ) if is_vision_available() else None
@slow
def _a ( self ) -> int:
'''simple docstring'''
lowercase = LayoutLMvaModel.from_pretrained("""microsoft/layoutlmv3-base""" ).to(_lowerCAmelCase )
lowercase = self.default_image_processor
lowercase = prepare_img()
lowercase = image_processor(images=_lowerCAmelCase , return_tensors="""pt""" ).pixel_values.to(_lowerCAmelCase )
lowercase = torch.tensor([[1, 2]] )
lowercase = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]] ).unsqueeze(0 )
# forward pass
lowercase = model(
input_ids=input_ids.to(_lowerCAmelCase ) , bbox=bbox.to(_lowerCAmelCase ) , pixel_values=pixel_values.to(_lowerCAmelCase ) , )
# verify the logits
lowercase = torch.Size((1, 199, 768) )
self.assertEqual(outputs.last_hidden_state.shape , _lowerCAmelCase )
lowercase = torch.tensor(
[[-0.0529, 0.3618, 0.1632], [-0.1587, -0.1667, -0.0400], [-0.1557, -0.1671, -0.0505]] ).to(_lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , _lowerCAmelCase , atol=1E-4 ) )
| 707 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ : Any = logging.get_logger(__name__)
lowercase_ : str = {
'''sayakpaul/vit-msn-base''': '''https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json''',
# See all ViT MSN models at https://huggingface.co/models?filter=vit_msn
}
class __UpperCamelCase (_UpperCAmelCase ):
__A = '''vit_msn'''
def __init__( self , _lowerCAmelCase=768 , _lowerCAmelCase=12 , _lowerCAmelCase=12 , _lowerCAmelCase=3072 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.02 , _lowerCAmelCase=1E-06 , _lowerCAmelCase=224 , _lowerCAmelCase=16 , _lowerCAmelCase=3 , _lowerCAmelCase=True , **_lowerCAmelCase , ) -> List[Any]:
'''simple docstring'''
super().__init__(**_lowerCAmelCase )
lowercase = hidden_size
lowercase = num_hidden_layers
lowercase = num_attention_heads
lowercase = intermediate_size
lowercase = hidden_act
lowercase = hidden_dropout_prob
lowercase = attention_probs_dropout_prob
lowercase = initializer_range
lowercase = layer_norm_eps
lowercase = image_size
lowercase = patch_size
lowercase = num_channels
lowercase = qkv_bias
| 653 | 0 |
'''simple docstring'''
from collections.abc import Callable
def SCREAMING_SNAKE_CASE ( lowercase_ : Callable[[float], float] , lowercase_ : float , lowercase_ : float ):
lowercase = a
lowercase = b
if function(lowercase_ ) == 0: # one of the a or b is a root for the function
return a
elif function(lowercase_ ) == 0:
return b
elif (
function(lowercase_ ) * function(lowercase_ ) > 0
): # if none of these are root and they are both positive or negative,
# then this algorithm can't find the root
raise ValueError("""could not find root in given interval.""" )
else:
lowercase = start + (end - start) / 2.0
while abs(start - mid ) > 10**-7: # until precisely equals to 10^-7
if function(lowercase_ ) == 0:
return mid
elif function(lowercase_ ) * function(lowercase_ ) < 0:
lowercase = mid
else:
lowercase = mid
lowercase = start + (end - start) / 2.0
return mid
def SCREAMING_SNAKE_CASE ( lowercase_ : float ):
return x**3 - 2 * x - 5
if __name__ == "__main__":
print(bisection(f, 1, 1000))
import doctest
doctest.testmod()
| 708 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE ( lowercase_ : Union[str, Any] , lowercase_ : str ):
lowercase = """"""
for i in table:
res += inp[i - 1]
return res
def SCREAMING_SNAKE_CASE ( lowercase_ : List[Any] ):
return data[1:] + data[0]
def SCREAMING_SNAKE_CASE ( lowercase_ : Tuple , lowercase_ : Dict ):
lowercase = """"""
for i in range(len(lowercase_ ) ):
if a[i] == b[i]:
res += "0"
else:
res += "1"
return res
def SCREAMING_SNAKE_CASE ( lowercase_ : str , lowercase_ : str ):
lowercase = int("""0b""" + data[0] + data[-1] , 2 )
lowercase = int("""0b""" + data[1:3] , 2 )
return bin(s[row][col] )[2:]
def SCREAMING_SNAKE_CASE ( lowercase_ : Dict , lowercase_ : Tuple , lowercase_ : Tuple , lowercase_ : str , lowercase_ : Any ):
lowercase = message[:4]
lowercase = message[4:]
lowercase = apply_table(lowercase_ , lowercase_ )
lowercase = xor(lowercase_ , lowercase_ )
lowercase = apply_sbox(lowercase_ , temp[:4] ) # noqa: E741
lowercase = apply_sbox(lowercase_ , temp[4:] )
lowercase = """0""" * (2 - len(lowercase_ )) + l # noqa: E741
lowercase = """0""" * (2 - len(lowercase_ )) + r
lowercase = apply_table(l + r , lowercase_ )
lowercase = xor(lowercase_ , lowercase_ )
return temp + right
if __name__ == "__main__":
lowercase_ : Tuple = input('''Enter 10 bit key: ''')
lowercase_ : Any = input('''Enter 8 bit message: ''')
lowercase_ : Dict = [6, 3, 7, 4, 8, 5, 10, 9]
lowercase_ : str = [3, 5, 2, 7, 4, 10, 1, 9, 8, 6]
lowercase_ : List[Any] = [2, 4, 3, 1]
lowercase_ : List[str] = [2, 6, 3, 1, 4, 8, 5, 7]
lowercase_ : Tuple = [4, 1, 3, 5, 7, 2, 8, 6]
lowercase_ : Optional[Any] = [4, 1, 2, 3, 2, 3, 4, 1]
lowercase_ : List[str] = [[1, 0, 3, 2], [3, 2, 1, 0], [0, 2, 1, 3], [3, 1, 3, 2]]
lowercase_ : List[Any] = [[0, 1, 2, 3], [2, 0, 1, 3], [3, 0, 1, 0], [2, 1, 0, 3]]
# key generation
lowercase_ : Union[str, Any] = apply_table(key, paa_table)
lowercase_ : Optional[Any] = temp[:5]
lowercase_ : int = temp[5:]
lowercase_ : List[str] = left_shift(left)
lowercase_ : int = left_shift(right)
lowercase_ : Tuple = apply_table(left + right, pa_table)
lowercase_ : List[str] = left_shift(left)
lowercase_ : Optional[Any] = left_shift(right)
lowercase_ : Union[str, Any] = left_shift(left)
lowercase_ : Union[str, Any] = left_shift(right)
lowercase_ : Optional[int] = apply_table(left + right, pa_table)
# encryption
lowercase_ : int = apply_table(message, IP)
lowercase_ : Dict = function(expansion, sa, sa, keya, temp)
lowercase_ : Any = temp[4:] + temp[:4]
lowercase_ : List[Any] = function(expansion, sa, sa, keya, temp)
lowercase_ : Tuple = apply_table(temp, IP_inv)
print('''Cipher text is:''', CT)
# decryption
lowercase_ : List[str] = apply_table(CT, IP)
lowercase_ : Optional[int] = function(expansion, sa, sa, keya, temp)
lowercase_ : Optional[Any] = temp[4:] + temp[:4]
lowercase_ : Optional[int] = function(expansion, sa, sa, keya, temp)
lowercase_ : Optional[Any] = apply_table(temp, IP_inv)
print('''Plain text after decypting is:''', PT)
| 653 | 0 |
def SCREAMING_SNAKE_CASE ( lowercase_ : Optional[int] , lowercase_ : int , lowercase_ : Optional[int] , lowercase_ : Tuple ):
if height >= 1:
move_tower(height - 1 , lowercase_ , lowercase_ , lowercase_ )
move_disk(lowercase_ , lowercase_ )
move_tower(height - 1 , lowercase_ , lowercase_ , lowercase_ )
def SCREAMING_SNAKE_CASE ( lowercase_ : Optional[int] , lowercase_ : Optional[int] ):
print("""moving disk from""" , lowercase_ , """to""" , lowercase_ )
def SCREAMING_SNAKE_CASE ( ):
lowercase = int(input("""Height of hanoi: """ ).strip() )
move_tower(lowercase_ , """A""" , """B""" , """C""" )
if __name__ == "__main__":
main()
| 709 |
'''simple docstring'''
import json
import os
import tempfile
import transformers
import datasets
from utils import generate_example_dataset, get_duration
lowercase_ : int = 50_0000
lowercase_ , lowercase_ : Union[str, Any] = os.path.split(__file__)
lowercase_ : Optional[Any] = os.path.join(RESULTS_BASEPATH, '''results''', RESULTS_FILENAME.replace('''.py''', '''.json'''))
@get_duration
def SCREAMING_SNAKE_CASE ( lowercase_ : datasets.Dataset , **lowercase_ : Dict ):
lowercase = dataset.map(**lowercase_ )
@get_duration
def SCREAMING_SNAKE_CASE ( lowercase_ : datasets.Dataset , **lowercase_ : Optional[int] ):
lowercase = dataset.filter(**lowercase_ )
def SCREAMING_SNAKE_CASE ( ):
lowercase = {"""num examples""": SPEED_TEST_N_EXAMPLES}
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase = datasets.Features({"""text""": datasets.Value("""string""" ), """numbers""": datasets.Value("""float32""" )} )
lowercase = generate_example_dataset(
os.path.join(lowercase_ , """dataset.arrow""" ) , lowercase_ , num_examples=lowercase_ )
lowercase = transformers.AutoTokenizer.from_pretrained("""bert-base-cased""" , use_fast=lowercase_ )
def tokenize(lowercase_ : Dict ):
return tokenizer(examples["""text"""] )
lowercase = map(lowercase_ )
lowercase = map(lowercase_ , batched=lowercase_ )
lowercase = map(lowercase_ , function=lambda lowercase_ : None , batched=lowercase_ )
with dataset.formatted_as(type="""numpy""" ):
lowercase = map(lowercase_ , function=lambda lowercase_ : None , batched=lowercase_ )
with dataset.formatted_as(type="""pandas""" ):
lowercase = map(lowercase_ , function=lambda lowercase_ : None , batched=lowercase_ )
with dataset.formatted_as(type="""torch""" , columns="""numbers""" ):
lowercase = map(lowercase_ , function=lambda lowercase_ : None , batched=lowercase_ )
with dataset.formatted_as(type="""tensorflow""" , columns="""numbers""" ):
lowercase = map(lowercase_ , function=lambda lowercase_ : None , batched=lowercase_ )
lowercase = map(lowercase_ , function=lowercase_ , batched=lowercase_ )
lowercase = filter(lowercase_ )
# Activate later when tokenizer support batched inputs
# with dataset.formatted_as(type='numpy'):
# times[func.__name__ + " fast-tokenizer batched numpy"] = func(dataset, function=tokenize, batched=True)
with open(lowercase_ , """wb""" ) as f:
f.write(json.dumps(lowercase_ ).encode("""utf-8""" ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_map_filter()
| 653 | 0 |
'''simple docstring'''
from urllib.parse import quote
import pytest
from datasets.utils.hub import hf_hub_url
@pytest.mark.parametrize("""repo_id""" , ["""canonical_dataset_name""", """org-name/dataset-name"""] )
@pytest.mark.parametrize("""path""" , ["""filename.csv""", """filename with blanks.csv"""] )
@pytest.mark.parametrize("""revision""" , [None, """v2"""] )
def SCREAMING_SNAKE_CASE ( lowercase_ : Optional[Any] , lowercase_ : Any , lowercase_ : str ):
lowercase = hf_hub_url(repo_id=lowercase_ , path=lowercase_ , revision=lowercase_ )
assert url == F"""https://huggingface.co/datasets/{repo_id}/resolve/{revision or 'main'}/{quote(lowercase_ )}"""
| 710 |
'''simple docstring'''
from random import shuffle
import tensorflow as tf
from numpy import array
def SCREAMING_SNAKE_CASE ( lowercase_ : List[str] , lowercase_ : Optional[int] ):
lowercase = int(lowercase_ )
assert noofclusters < len(lowercase_ )
# Find out the dimensionality
lowercase = len(vectors[0] )
# Will help select random centroids from among the available vectors
lowercase = list(range(len(lowercase_ ) ) )
shuffle(lowercase_ )
# GRAPH OF COMPUTATION
# We initialize a new graph and set it as the default during each run
# of this algorithm. This ensures that as this function is called
# multiple times, the default graph doesn't keep getting crowded with
# unused ops and Variables from previous function calls.
lowercase = tf.Graph()
with graph.as_default():
# SESSION OF COMPUTATION
lowercase = tf.Session()
##CONSTRUCTING THE ELEMENTS OF COMPUTATION
##First lets ensure we have a Variable vector for each centroid,
##initialized to one of the vectors from the available data points
lowercase = [
tf.Variable(vectors[vector_indices[i]] ) for i in range(lowercase_ )
]
##These nodes will assign the centroid Variables the appropriate
##values
lowercase = tf.placeholder("""float64""" , [dim] )
lowercase = []
for centroid in centroids:
cent_assigns.append(tf.assign(lowercase_ , lowercase_ ) )
##Variables for cluster assignments of individual vectors(initialized
##to 0 at first)
lowercase = [tf.Variable(0 ) for i in range(len(lowercase_ ) )]
##These nodes will assign an assignment Variable the appropriate
##value
lowercase = tf.placeholder("""int32""" )
lowercase = []
for assignment in assignments:
cluster_assigns.append(tf.assign(lowercase_ , lowercase_ ) )
##Now lets construct the node that will compute the mean
# The placeholder for the input
lowercase = tf.placeholder("""float""" , [None, dim] )
# The Node/op takes the input and computes a mean along the 0th
# dimension, i.e. the list of input vectors
lowercase = tf.reduce_mean(lowercase_ , 0 )
##Node for computing Euclidean distances
# Placeholders for input
lowercase = tf.placeholder("""float""" , [dim] )
lowercase = tf.placeholder("""float""" , [dim] )
lowercase = tf.sqrt(tf.reduce_sum(tf.pow(tf.sub(lowercase_ , lowercase_ ) , 2 ) ) )
##This node will figure out which cluster to assign a vector to,
##based on Euclidean distances of the vector from the centroids.
# Placeholder for input
lowercase = tf.placeholder("""float""" , [noofclusters] )
lowercase = tf.argmin(lowercase_ , 0 )
##INITIALIZING STATE VARIABLES
##This will help initialization of all Variables defined with respect
##to the graph. The Variable-initializer should be defined after
##all the Variables have been constructed, so that each of them
##will be included in the initialization.
lowercase = tf.initialize_all_variables()
# Initialize all variables
sess.run(lowercase_ )
##CLUSTERING ITERATIONS
# Now perform the Expectation-Maximization steps of K-Means clustering
# iterations. To keep things simple, we will only do a set number of
# iterations, instead of using a Stopping Criterion.
lowercase = 100
for _ in range(lowercase_ ):
##EXPECTATION STEP
##Based on the centroid locations till last iteration, compute
##the _expected_ centroid assignments.
# Iterate over each vector
for vector_n in range(len(lowercase_ ) ):
lowercase = vectors[vector_n]
# Compute Euclidean distance between this vector and each
# centroid. Remember that this list cannot be named
#'centroid_distances', since that is the input to the
# cluster assignment node.
lowercase = [
sess.run(lowercase_ , feed_dict={va: vect, va: sess.run(lowercase_ )} )
for centroid in centroids
]
# Now use the cluster assignment node, with the distances
# as the input
lowercase = sess.run(
lowercase_ , feed_dict={centroid_distances: distances} )
# Now assign the value to the appropriate state variable
sess.run(
cluster_assigns[vector_n] , feed_dict={assignment_value: assignment} )
##MAXIMIZATION STEP
# Based on the expected state computed from the Expectation Step,
# compute the locations of the centroids so as to maximize the
# overall objective of minimizing within-cluster Sum-of-Squares
for cluster_n in range(lowercase_ ):
# Collect all the vectors assigned to this cluster
lowercase = [
vectors[i]
for i in range(len(lowercase_ ) )
if sess.run(assignments[i] ) == cluster_n
]
# Compute new centroid location
lowercase = sess.run(
lowercase_ , feed_dict={mean_input: array(lowercase_ )} )
# Assign value to appropriate variable
sess.run(
cent_assigns[cluster_n] , feed_dict={centroid_value: new_location} )
# Return centroids and assignments
lowercase = sess.run(lowercase_ )
lowercase = sess.run(lowercase_ )
return centroids, assignments
| 653 | 0 |
'''simple docstring'''
import torch
from diffusers import DiffusionPipeline
class __UpperCamelCase (_UpperCAmelCase ):
def __init__( self , _lowerCAmelCase , _lowerCAmelCase ) -> List[Any]:
'''simple docstring'''
super().__init__()
self.register_modules(unet=_lowerCAmelCase , scheduler=_lowerCAmelCase )
def __call__( self ) -> Optional[int]:
'''simple docstring'''
lowercase = torch.randn(
(1, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , )
lowercase = 1
lowercase = self.unet(_lowerCAmelCase , _lowerCAmelCase ).sample
lowercase = self.scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ).prev_sample
lowercase = scheduler_output - scheduler_output + torch.ones_like(_lowerCAmelCase )
return result
| 711 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE ( lowercase_ : int , lowercase_ : int , lowercase_ : list[list[int]] ):
def update_area_of_max_square(lowercase_ : int , lowercase_ : int ) -> int:
# BASE CASE
if row >= rows or col >= cols:
return 0
lowercase = update_area_of_max_square(lowercase_ , col + 1 )
lowercase = update_area_of_max_square(row + 1 , col + 1 )
lowercase = update_area_of_max_square(row + 1 , lowercase_ )
if mat[row][col]:
lowercase = 1 + min([right, diagonal, down] )
lowercase = max(largest_square_area[0] , lowercase_ )
return sub_problem_sol
else:
return 0
lowercase = [0]
update_area_of_max_square(0 , 0 )
return largest_square_area[0]
def SCREAMING_SNAKE_CASE ( lowercase_ : int , lowercase_ : int , lowercase_ : list[list[int]] ):
def update_area_of_max_square_using_dp_array(
lowercase_ : int , lowercase_ : int , lowercase_ : list[list[int]] ) -> int:
if row >= rows or col >= cols:
return 0
if dp_array[row][col] != -1:
return dp_array[row][col]
lowercase = update_area_of_max_square_using_dp_array(lowercase_ , col + 1 , lowercase_ )
lowercase = update_area_of_max_square_using_dp_array(row + 1 , col + 1 , lowercase_ )
lowercase = update_area_of_max_square_using_dp_array(row + 1 , lowercase_ , lowercase_ )
if mat[row][col]:
lowercase = 1 + min([right, diagonal, down] )
lowercase = max(largest_square_area[0] , lowercase_ )
lowercase = sub_problem_sol
return sub_problem_sol
else:
return 0
lowercase = [0]
lowercase = [[-1] * cols for _ in range(lowercase_ )]
update_area_of_max_square_using_dp_array(0 , 0 , lowercase_ )
return largest_square_area[0]
def SCREAMING_SNAKE_CASE ( lowercase_ : int , lowercase_ : int , lowercase_ : list[list[int]] ):
lowercase = [[0] * (cols + 1) for _ in range(rows + 1 )]
lowercase = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
lowercase = dp_array[row][col + 1]
lowercase = dp_array[row + 1][col + 1]
lowercase = dp_array[row + 1][col]
if mat[row][col] == 1:
lowercase = 1 + min(lowercase_ , lowercase_ , lowercase_ )
lowercase = max(dp_array[row][col] , lowercase_ )
else:
lowercase = 0
return largest_square_area
def SCREAMING_SNAKE_CASE ( lowercase_ : int , lowercase_ : int , lowercase_ : list[list[int]] ):
lowercase = [0] * (cols + 1)
lowercase = [0] * (cols + 1)
lowercase = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
lowercase = current_row[col + 1]
lowercase = next_row[col + 1]
lowercase = next_row[col]
if mat[row][col] == 1:
lowercase = 1 + min(lowercase_ , lowercase_ , lowercase_ )
lowercase = max(current_row[col] , lowercase_ )
else:
lowercase = 0
lowercase = current_row
return largest_square_area
if __name__ == "__main__":
import doctest
doctest.testmod()
print(largest_square_area_in_matrix_bottom_up(2, 2, [[1, 1], [1, 1]]))
| 653 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
lowercase_ : Any = {
'''configuration_rag''': ['''RagConfig'''],
'''retrieval_rag''': ['''RagRetriever'''],
'''tokenization_rag''': ['''RagTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : List[Any] = [
'''RagModel''',
'''RagPreTrainedModel''',
'''RagSequenceForGeneration''',
'''RagTokenForGeneration''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : Optional[int] = [
'''TFRagModel''',
'''TFRagPreTrainedModel''',
'''TFRagSequenceForGeneration''',
'''TFRagTokenForGeneration''',
]
if TYPE_CHECKING:
from .configuration_rag import RagConfig
from .retrieval_rag import RagRetriever
from .tokenization_rag import RagTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rag import RagModel, RagPreTrainedModel, RagSequenceForGeneration, RagTokenForGeneration
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rag import (
TFRagModel,
TFRagPreTrainedModel,
TFRagSequenceForGeneration,
TFRagTokenForGeneration,
)
else:
import sys
lowercase_ : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 712 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ : Optional[Any] = logging.get_logger(__name__)
lowercase_ : int = {
'''bigcode/gpt_bigcode-santacoder''': '''https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json''',
}
class __UpperCamelCase (_UpperCAmelCase ):
__A = '''gpt_bigcode'''
__A = ['''past_key_values''']
__A = {
'''hidden_size''': '''n_embd''',
'''max_position_embeddings''': '''n_positions''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self , _lowerCAmelCase=5_0257 , _lowerCAmelCase=1024 , _lowerCAmelCase=768 , _lowerCAmelCase=12 , _lowerCAmelCase=12 , _lowerCAmelCase=None , _lowerCAmelCase="gelu_pytorch_tanh" , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=1E-5 , _lowerCAmelCase=0.02 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=5_0256 , _lowerCAmelCase=5_0256 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , **_lowerCAmelCase , ) -> Optional[int]:
'''simple docstring'''
lowercase = vocab_size
lowercase = n_positions
lowercase = n_embd
lowercase = n_layer
lowercase = n_head
lowercase = n_inner
lowercase = activation_function
lowercase = resid_pdrop
lowercase = embd_pdrop
lowercase = attn_pdrop
lowercase = layer_norm_epsilon
lowercase = initializer_range
lowercase = scale_attn_weights
lowercase = use_cache
lowercase = attention_softmax_in_fpaa
lowercase = scale_attention_softmax_in_fpaa
lowercase = multi_query
lowercase = bos_token_id
lowercase = eos_token_id
super().__init__(bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , **_lowerCAmelCase )
| 653 | 0 |
'''simple docstring'''
import argparse
import gc
import json
import os
import shutil
import warnings
import torch
from transformers import LlamaConfig, LlamaForCausalLM, LlamaTokenizer
try:
from transformers import LlamaTokenizerFast
except ImportError as e:
warnings.warn(e)
warnings.warn(
'''The converted tokenizer will be the `slow` tokenizer. To use the fast, update your `tokenizers` library and re-run the tokenizer conversion'''
)
lowercase_ : List[Any] = None
lowercase_ : Any = {
'''7B''': 1_1008,
'''13B''': 1_3824,
'''30B''': 1_7920,
'''65B''': 2_2016,
'''70B''': 2_8672,
}
lowercase_ : str = {
'''7B''': 1,
'''7Bf''': 1,
'''13B''': 2,
'''13Bf''': 2,
'''30B''': 4,
'''65B''': 8,
'''70B''': 8,
'''70Bf''': 8,
}
def SCREAMING_SNAKE_CASE ( lowercase_ : Optional[int] , lowercase_ : Any=1 , lowercase_ : Optional[int]=256 ):
return multiple_of * ((int(ffn_dim_multiplier * int(8 * n / 3 ) ) + multiple_of - 1) // multiple_of)
def SCREAMING_SNAKE_CASE ( lowercase_ : Tuple ):
with open(lowercase_ , """r""" ) as f:
return json.load(lowercase_ )
def SCREAMING_SNAKE_CASE ( lowercase_ : List[str] , lowercase_ : int ):
with open(lowercase_ , """w""" ) as f:
json.dump(lowercase_ , lowercase_ )
def SCREAMING_SNAKE_CASE ( lowercase_ : List[str] , lowercase_ : Union[str, Any] , lowercase_ : int , lowercase_ : str=True ):
os.makedirs(lowercase_ , exist_ok=lowercase_ )
lowercase = os.path.join(lowercase_ , """tmp""" )
os.makedirs(lowercase_ , exist_ok=lowercase_ )
lowercase = read_json(os.path.join(lowercase_ , """params.json""" ) )
lowercase = NUM_SHARDS[model_size]
lowercase = params["""n_layers"""]
lowercase = params["""n_heads"""]
lowercase = n_heads // num_shards
lowercase = params["""dim"""]
lowercase = dim // n_heads
lowercase = 1_0000.0
lowercase = 1.0 / (base ** (torch.arange(0 , lowercase_ , 2 ).float() / dims_per_head))
if "n_kv_heads" in params:
lowercase = params["""n_kv_heads"""] # for GQA / MQA
lowercase = n_heads_per_shard // num_key_value_heads
lowercase = dim // num_key_value_heads
else: # compatibility with other checkpoints
lowercase = n_heads
lowercase = n_heads_per_shard
lowercase = dim
# permute for sliced rotary
def permute(lowercase_ : Tuple , lowercase_ : List[Any]=n_heads , lowercase_ : int=dim , lowercase_ : List[str]=dim ):
return w.view(lowercase_ , dima // n_heads // 2 , 2 , lowercase_ ).transpose(1 , 2 ).reshape(lowercase_ , lowercase_ )
print(F"""Fetching all parameters from the checkpoint at {input_base_path}.""" )
# Load weights
if model_size == "7B":
# Not sharded
# (The sharded implementation would also work, but this is simpler.)
lowercase = torch.load(os.path.join(lowercase_ , """consolidated.00.pth""" ) , map_location="""cpu""" )
else:
# Sharded
lowercase = [
torch.load(os.path.join(lowercase_ , F"""consolidated.{i:02d}.pth""" ) , map_location="""cpu""" )
for i in range(lowercase_ )
]
lowercase = 0
lowercase = {"""weight_map""": {}}
for layer_i in range(lowercase_ ):
lowercase = F"""pytorch_model-{layer_i + 1}-of-{n_layers + 1}.bin"""
if model_size == "7B":
# Unsharded
lowercase = {
F"""model.layers.{layer_i}.self_attn.q_proj.weight""": permute(
loaded[F"""layers.{layer_i}.attention.wq.weight"""] ),
F"""model.layers.{layer_i}.self_attn.k_proj.weight""": permute(
loaded[F"""layers.{layer_i}.attention.wk.weight"""] ),
F"""model.layers.{layer_i}.self_attn.v_proj.weight""": loaded[F"""layers.{layer_i}.attention.wv.weight"""],
F"""model.layers.{layer_i}.self_attn.o_proj.weight""": loaded[F"""layers.{layer_i}.attention.wo.weight"""],
F"""model.layers.{layer_i}.mlp.gate_proj.weight""": loaded[F"""layers.{layer_i}.feed_forward.w1.weight"""],
F"""model.layers.{layer_i}.mlp.down_proj.weight""": loaded[F"""layers.{layer_i}.feed_forward.w2.weight"""],
F"""model.layers.{layer_i}.mlp.up_proj.weight""": loaded[F"""layers.{layer_i}.feed_forward.w3.weight"""],
F"""model.layers.{layer_i}.input_layernorm.weight""": loaded[F"""layers.{layer_i}.attention_norm.weight"""],
F"""model.layers.{layer_i}.post_attention_layernorm.weight""": loaded[F"""layers.{layer_i}.ffn_norm.weight"""],
}
else:
# Sharded
# Note that attention.w{q,k,v,o}, feed_fordward.w[1,2,3], attention_norm.weight and ffn_norm.weight share
# the same storage object, saving attention_norm and ffn_norm will save other weights too, which is
# redundant as other weights will be stitched from multiple shards. To avoid that, they are cloned.
lowercase = {
F"""model.layers.{layer_i}.input_layernorm.weight""": loaded[0][
F"""layers.{layer_i}.attention_norm.weight"""
].clone(),
F"""model.layers.{layer_i}.post_attention_layernorm.weight""": loaded[0][
F"""layers.{layer_i}.ffn_norm.weight"""
].clone(),
}
lowercase = permute(
torch.cat(
[
loaded[i][F"""layers.{layer_i}.attention.wq.weight"""].view(lowercase_ , lowercase_ , lowercase_ )
for i in range(lowercase_ )
] , dim=0 , ).reshape(lowercase_ , lowercase_ ) )
lowercase = permute(
torch.cat(
[
loaded[i][F"""layers.{layer_i}.attention.wk.weight"""].view(
lowercase_ , lowercase_ , lowercase_ )
for i in range(lowercase_ )
] , dim=0 , ).reshape(lowercase_ , lowercase_ ) , lowercase_ , lowercase_ , lowercase_ , )
lowercase = torch.cat(
[
loaded[i][F"""layers.{layer_i}.attention.wv.weight"""].view(
lowercase_ , lowercase_ , lowercase_ )
for i in range(lowercase_ )
] , dim=0 , ).reshape(lowercase_ , lowercase_ )
lowercase = torch.cat(
[loaded[i][F"""layers.{layer_i}.attention.wo.weight"""] for i in range(lowercase_ )] , dim=1 )
lowercase = torch.cat(
[loaded[i][F"""layers.{layer_i}.feed_forward.w1.weight"""] for i in range(lowercase_ )] , dim=0 )
lowercase = torch.cat(
[loaded[i][F"""layers.{layer_i}.feed_forward.w2.weight"""] for i in range(lowercase_ )] , dim=1 )
lowercase = torch.cat(
[loaded[i][F"""layers.{layer_i}.feed_forward.w3.weight"""] for i in range(lowercase_ )] , dim=0 )
lowercase = inv_freq
for k, v in state_dict.items():
lowercase = filename
param_count += v.numel()
torch.save(lowercase_ , os.path.join(lowercase_ , lowercase_ ) )
lowercase = F"""pytorch_model-{n_layers + 1}-of-{n_layers + 1}.bin"""
if model_size == "7B":
# Unsharded
lowercase = {
"""model.embed_tokens.weight""": loaded["""tok_embeddings.weight"""],
"""model.norm.weight""": loaded["""norm.weight"""],
"""lm_head.weight""": loaded["""output.weight"""],
}
else:
lowercase = {
"""model.norm.weight""": loaded[0]["""norm.weight"""],
"""model.embed_tokens.weight""": torch.cat(
[loaded[i]["""tok_embeddings.weight"""] for i in range(lowercase_ )] , dim=1 ),
"""lm_head.weight""": torch.cat([loaded[i]["""output.weight"""] for i in range(lowercase_ )] , dim=0 ),
}
for k, v in state_dict.items():
lowercase = filename
param_count += v.numel()
torch.save(lowercase_ , os.path.join(lowercase_ , lowercase_ ) )
# Write configs
lowercase = {"""total_size""": param_count * 2}
write_json(lowercase_ , os.path.join(lowercase_ , """pytorch_model.bin.index.json""" ) )
lowercase = params["""ffn_dim_multiplier"""] if """ffn_dim_multiplier""" in params else 1
lowercase = params["""multiple_of"""] if """multiple_of""" in params else 256
lowercase = LlamaConfig(
hidden_size=lowercase_ , intermediate_size=compute_intermediate_size(lowercase_ , lowercase_ , lowercase_ ) , num_attention_heads=params["""n_heads"""] , num_hidden_layers=params["""n_layers"""] , rms_norm_eps=params["""norm_eps"""] , num_key_value_heads=lowercase_ , )
config.save_pretrained(lowercase_ )
# Make space so we can load the model properly now.
del state_dict
del loaded
gc.collect()
print("""Loading the checkpoint in a Llama model.""" )
lowercase = LlamaForCausalLM.from_pretrained(lowercase_ , torch_dtype=torch.floataa , low_cpu_mem_usage=lowercase_ )
# Avoid saving this as part of the config.
del model.config._name_or_path
print("""Saving in the Transformers format.""" )
model.save_pretrained(lowercase_ , safe_serialization=lowercase_ )
shutil.rmtree(lowercase_ )
def SCREAMING_SNAKE_CASE ( lowercase_ : Any , lowercase_ : str ):
# Initialize the tokenizer based on the `spm` model
lowercase = LlamaTokenizer if LlamaTokenizerFast is None else LlamaTokenizerFast
print(F"""Saving a {tokenizer_class.__name__} to {tokenizer_path}.""" )
lowercase = tokenizer_class(lowercase_ )
tokenizer.save_pretrained(lowercase_ )
def SCREAMING_SNAKE_CASE ( ):
lowercase = argparse.ArgumentParser()
parser.add_argument(
"""--input_dir""" , help="""Location of LLaMA weights, which contains tokenizer.model and model folders""" , )
parser.add_argument(
"""--model_size""" , choices=["""7B""", """7Bf""", """13B""", """13Bf""", """30B""", """65B""", """70B""", """70Bf""", """tokenizer_only"""] , )
parser.add_argument(
"""--output_dir""" , help="""Location to write HF model and tokenizer""" , )
parser.add_argument("""--safe_serialization""" , type=lowercase_ , help="""Whether or not to save using `safetensors`.""" )
lowercase = parser.parse_args()
if args.model_size != "tokenizer_only":
write_model(
model_path=args.output_dir , input_base_path=os.path.join(args.input_dir , args.model_size ) , model_size=args.model_size , safe_serialization=args.safe_serialization , )
lowercase = os.path.join(args.input_dir , """tokenizer.model""" )
write_tokenizer(args.output_dir , lowercase_ )
if __name__ == "__main__":
main()
| 713 |
'''simple docstring'''
import requests
def SCREAMING_SNAKE_CASE ( lowercase_ : str , lowercase_ : str ):
lowercase = {"""Content-Type""": """application/json"""}
lowercase = requests.post(lowercase_ , json={"""text""": message_body} , headers=lowercase_ )
if response.status_code != 200:
lowercase = (
"""Request to slack returned an error """
F"""{response.status_code}, the response is:\n{response.text}"""
)
raise ValueError(lowercase_ )
if __name__ == "__main__":
# Set the slack url to the one provided by Slack when you create the webhook at
# https://my.slack.com/services/new/incoming-webhook/
send_slack_message('''<YOUR MESSAGE BODY>''', '''<SLACK CHANNEL URL>''')
| 653 | 0 |
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class __UpperCamelCase (unittest.TestCase ):
def _a ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase = [[1, 2, 4], [1, 2, 3, 4]]
lowercase = DisjunctiveConstraint(_lowerCAmelCase )
self.assertTrue(isinstance(dc.token_ids , _lowerCAmelCase ) )
with self.assertRaises(_lowerCAmelCase ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(_lowerCAmelCase ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def _a ( self ) -> Any:
'''simple docstring'''
lowercase = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(_lowerCAmelCase ):
DisjunctiveConstraint(_lowerCAmelCase ) # fails here
def _a ( self ) -> Tuple:
'''simple docstring'''
lowercase = [[1, 2, 3], [1, 2, 4]]
lowercase = DisjunctiveConstraint(_lowerCAmelCase )
lowercase , lowercase , lowercase = dc.update(1 )
lowercase = stepped is True and completed is False and reset is False
self.assertTrue(_lowerCAmelCase )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
lowercase , lowercase , lowercase = dc.update(2 )
lowercase = stepped is True and completed is False and reset is False
self.assertTrue(_lowerCAmelCase )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
lowercase , lowercase , lowercase = dc.update(3 )
lowercase = stepped is True and completed is True and reset is False
self.assertTrue(_lowerCAmelCase )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def _a ( self ) -> str:
'''simple docstring'''
lowercase = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
lowercase = DisjunctiveConstraint(_lowerCAmelCase )
lowercase , lowercase , lowercase = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
lowercase , lowercase , lowercase = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
lowercase , lowercase , lowercase = dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
lowercase , lowercase , lowercase = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
lowercase , lowercase , lowercase = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
lowercase , lowercase , lowercase = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
lowercase , lowercase , lowercase = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] )
| 714 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTConfig,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowercase_ : List[str] = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE ( lowercase_ : int ):
lowercase = MobileViTConfig()
# size of the architecture
if "mobilevit_s" in mobilevit_name:
lowercase = [144, 192, 240]
lowercase = [16, 32, 64, 96, 128, 160, 640]
elif "mobilevit_xs" in mobilevit_name:
lowercase = [96, 120, 144]
lowercase = [16, 32, 48, 64, 80, 96, 384]
elif "mobilevit_xxs" in mobilevit_name:
lowercase = [64, 80, 96]
lowercase = [16, 16, 24, 48, 64, 80, 320]
lowercase = 0.05
lowercase = 2.0
if mobilevit_name.startswith("""deeplabv3_""" ):
lowercase = 512
lowercase = 16
lowercase = 21
lowercase = """pascal-voc-id2label.json"""
else:
lowercase = 1000
lowercase = """imagenet-1k-id2label.json"""
lowercase = """huggingface/label-files"""
lowercase = json.load(open(hf_hub_download(lowercase_ , lowercase_ , repo_type="""dataset""" ) , """r""" ) )
lowercase = {int(lowercase_ ): v for k, v in idalabel.items()}
lowercase = idalabel
lowercase = {v: k for k, v in idalabel.items()}
return config
def SCREAMING_SNAKE_CASE ( lowercase_ : Any , lowercase_ : Any=False ):
for i in range(1 , 6 ):
if F"""layer_{i}.""" in name:
lowercase = name.replace(F"""layer_{i}.""" , F"""encoder.layer.{i - 1}.""" )
if "conv_1." in name:
lowercase = name.replace("""conv_1.""" , """conv_stem.""" )
if ".block." in name:
lowercase = name.replace(""".block.""" , """.""" )
if "exp_1x1" in name:
lowercase = name.replace("""exp_1x1""" , """expand_1x1""" )
if "red_1x1" in name:
lowercase = name.replace("""red_1x1""" , """reduce_1x1""" )
if ".local_rep.conv_3x3." in name:
lowercase = name.replace(""".local_rep.conv_3x3.""" , """.conv_kxk.""" )
if ".local_rep.conv_1x1." in name:
lowercase = name.replace(""".local_rep.conv_1x1.""" , """.conv_1x1.""" )
if ".norm." in name:
lowercase = name.replace(""".norm.""" , """.normalization.""" )
if ".conv." in name:
lowercase = name.replace(""".conv.""" , """.convolution.""" )
if ".conv_proj." in name:
lowercase = name.replace(""".conv_proj.""" , """.conv_projection.""" )
for i in range(0 , 2 ):
for j in range(0 , 4 ):
if F""".{i}.{j}.""" in name:
lowercase = name.replace(F""".{i}.{j}.""" , F""".{i}.layer.{j}.""" )
for i in range(2 , 6 ):
for j in range(0 , 4 ):
if F""".{i}.{j}.""" in name:
lowercase = name.replace(F""".{i}.{j}.""" , F""".{i}.""" )
if "expand_1x1" in name:
lowercase = name.replace("""expand_1x1""" , """downsampling_layer.expand_1x1""" )
if "conv_3x3" in name:
lowercase = name.replace("""conv_3x3""" , """downsampling_layer.conv_3x3""" )
if "reduce_1x1" in name:
lowercase = name.replace("""reduce_1x1""" , """downsampling_layer.reduce_1x1""" )
for i in range(2 , 5 ):
if F""".global_rep.{i}.weight""" in name:
lowercase = name.replace(F""".global_rep.{i}.weight""" , """.layernorm.weight""" )
if F""".global_rep.{i}.bias""" in name:
lowercase = name.replace(F""".global_rep.{i}.bias""" , """.layernorm.bias""" )
if ".global_rep." in name:
lowercase = name.replace(""".global_rep.""" , """.transformer.""" )
if ".pre_norm_mha.0." in name:
lowercase = name.replace(""".pre_norm_mha.0.""" , """.layernorm_before.""" )
if ".pre_norm_mha.1.out_proj." in name:
lowercase = name.replace(""".pre_norm_mha.1.out_proj.""" , """.attention.output.dense.""" )
if ".pre_norm_ffn.0." in name:
lowercase = name.replace(""".pre_norm_ffn.0.""" , """.layernorm_after.""" )
if ".pre_norm_ffn.1." in name:
lowercase = name.replace(""".pre_norm_ffn.1.""" , """.intermediate.dense.""" )
if ".pre_norm_ffn.4." in name:
lowercase = name.replace(""".pre_norm_ffn.4.""" , """.output.dense.""" )
if ".transformer." in name:
lowercase = name.replace(""".transformer.""" , """.transformer.layer.""" )
if ".aspp_layer." in name:
lowercase = name.replace(""".aspp_layer.""" , """.""" )
if ".aspp_pool." in name:
lowercase = name.replace(""".aspp_pool.""" , """.""" )
if "seg_head." in name:
lowercase = name.replace("""seg_head.""" , """segmentation_head.""" )
if "segmentation_head.classifier.classifier." in name:
lowercase = name.replace("""segmentation_head.classifier.classifier.""" , """segmentation_head.classifier.""" )
if "classifier.fc." in name:
lowercase = name.replace("""classifier.fc.""" , """classifier.""" )
elif (not base_model) and ("segmentation_head." not in name):
lowercase = """mobilevit.""" + name
return name
def SCREAMING_SNAKE_CASE ( lowercase_ : Optional[Any] , lowercase_ : List[Any] , lowercase_ : str=False ):
if base_model:
lowercase = """"""
else:
lowercase = """mobilevit."""
for key in orig_state_dict.copy().keys():
lowercase = orig_state_dict.pop(lowercase_ )
if key[:8] == "encoder.":
lowercase = key[8:]
if "qkv" in key:
lowercase = key.split(""".""" )
lowercase = int(key_split[0][6:] ) - 1
lowercase = int(key_split[3] )
lowercase = model.get_submodule(F"""{model_prefix}encoder.layer.{layer_num}""" )
lowercase = layer.transformer.layer[transformer_num].attention.attention.all_head_size
lowercase = (
F"""{model_prefix}encoder.layer.{layer_num}.transformer.layer.{transformer_num}.attention.attention."""
)
if "weight" in key:
lowercase = val[:dim, :]
lowercase = val[dim : dim * 2, :]
lowercase = val[-dim:, :]
else:
lowercase = val[:dim]
lowercase = val[dim : dim * 2]
lowercase = val[-dim:]
else:
lowercase = val
return orig_state_dict
def SCREAMING_SNAKE_CASE ( ):
lowercase = """http://images.cocodataset.org/val2017/000000039769.jpg"""
lowercase = Image.open(requests.get(lowercase_ , stream=lowercase_ ).raw )
return im
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( lowercase_ : Dict , lowercase_ : List[Any] , lowercase_ : Any , lowercase_ : List[str]=False ):
lowercase = get_mobilevit_config(lowercase_ )
# load original state_dict
lowercase = torch.load(lowercase_ , map_location="""cpu""" )
# load 🤗 model
if mobilevit_name.startswith("""deeplabv3_""" ):
lowercase = MobileViTForSemanticSegmentation(lowercase_ ).eval()
else:
lowercase = MobileViTForImageClassification(lowercase_ ).eval()
lowercase = convert_state_dict(lowercase_ , lowercase_ )
model.load_state_dict(lowercase_ )
# Check outputs on an image, prepared by MobileViTImageProcessor
lowercase = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 )
lowercase = image_processor(images=prepare_img() , return_tensors="""pt""" )
lowercase = model(**lowercase_ )
lowercase = outputs.logits
if mobilevit_name.startswith("""deeplabv3_""" ):
assert logits.shape == (1, 21, 32, 32)
if mobilevit_name == "deeplabv3_mobilevit_s":
lowercase = torch.tensor(
[
[[6.2_065, 6.1_292, 6.2_070], [6.1_079, 6.1_254, 6.1_747], [6.0_042, 6.1_071, 6.1_034]],
[[-6.9_253, -6.8_653, -7.0_398], [-7.3_218, -7.3_983, -7.3_670], [-7.1_961, -7.2_482, -7.1_569]],
[[-4.4_723, -4.4_348, -4.3_769], [-5.3_629, -5.4_632, -5.4_598], [-5.1_587, -5.3_402, -5.5_059]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xs":
lowercase = torch.tensor(
[
[[5.4_449, 5.5_733, 5.6_314], [5.1_815, 5.3_930, 5.5_963], [5.1_656, 5.4_333, 5.4_853]],
[[-9.4_423, -9.7_766, -9.6_714], [-9.1_581, -9.5_720, -9.5_519], [-9.1_006, -9.6_458, -9.5_703]],
[[-7.7_721, -7.3_716, -7.1_583], [-8.4_599, -8.0_624, -7.7_944], [-8.4_172, -7.8_366, -7.5_025]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xxs":
lowercase = torch.tensor(
[
[[6.9_811, 6.9_743, 7.3_123], [7.1_777, 7.1_931, 7.3_938], [7.5_633, 7.8_050, 7.8_901]],
[[-10.5_536, -10.2_332, -10.2_924], [-10.2_336, -9.8_624, -9.5_964], [-10.8_840, -10.8_158, -10.6_659]],
[[-3.4_938, -3.0_631, -2.8_620], [-3.4_205, -2.8_135, -2.6_875], [-3.4_179, -2.7_945, -2.8_750]],
] )
else:
raise ValueError(F"""Unknown mobilevit_name: {mobilevit_name}""" )
assert torch.allclose(logits[0, :3, :3, :3] , lowercase_ , atol=1E-4 )
else:
assert logits.shape == (1, 1000)
if mobilevit_name == "mobilevit_s":
lowercase = torch.tensor([-0.9_866, 0.2_392, -1.1_241] )
elif mobilevit_name == "mobilevit_xs":
lowercase = torch.tensor([-2.4_761, -0.9_399, -1.9_587] )
elif mobilevit_name == "mobilevit_xxs":
lowercase = torch.tensor([-1.9_364, -1.2_327, -0.4_653] )
else:
raise ValueError(F"""Unknown mobilevit_name: {mobilevit_name}""" )
assert torch.allclose(logits[0, :3] , lowercase_ , atol=1E-4 )
Path(lowercase_ ).mkdir(exist_ok=lowercase_ )
print(F"""Saving model {mobilevit_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowercase_ )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(lowercase_ )
if push_to_hub:
lowercase = {
"""mobilevit_s""": """mobilevit-small""",
"""mobilevit_xs""": """mobilevit-x-small""",
"""mobilevit_xxs""": """mobilevit-xx-small""",
"""deeplabv3_mobilevit_s""": """deeplabv3-mobilevit-small""",
"""deeplabv3_mobilevit_xs""": """deeplabv3-mobilevit-x-small""",
"""deeplabv3_mobilevit_xxs""": """deeplabv3-mobilevit-xx-small""",
}
print("""Pushing to the hub...""" )
lowercase = model_mapping[mobilevit_name]
image_processor.push_to_hub(lowercase_ , organization="""apple""" )
model.push_to_hub(lowercase_ , organization="""apple""" )
if __name__ == "__main__":
lowercase_ : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--mobilevit_name''',
default='''mobilevit_s''',
type=str,
help=(
'''Name of the MobileViT model you\'d like to convert. Should be one of \'mobilevit_s\', \'mobilevit_xs\','''
''' \'mobilevit_xxs\', \'deeplabv3_mobilevit_s\', \'deeplabv3_mobilevit_xs\', \'deeplabv3_mobilevit_xxs\'.'''
),
)
parser.add_argument(
'''--checkpoint_path''', required=True, type=str, help='''Path to the original state dict (.pt file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
lowercase_ : List[str] = parser.parse_args()
convert_movilevit_checkpoint(
args.mobilevit_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 653 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import List, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase_ : Dict = logging.get_logger(__name__)
lowercase_ : Any = {
'''google/efficientnet-b7''': '''https://huggingface.co/google/efficientnet-b7/resolve/main/config.json''',
}
class __UpperCamelCase (_UpperCAmelCase ):
__A = '''efficientnet'''
def __init__( self , _lowerCAmelCase = 3 , _lowerCAmelCase = 600 , _lowerCAmelCase = 2.0 , _lowerCAmelCase = 3.1 , _lowerCAmelCase = 8 , _lowerCAmelCase = [3, 3, 5, 3, 5, 5, 3] , _lowerCAmelCase = [32, 16, 24, 40, 80, 112, 192] , _lowerCAmelCase = [16, 24, 40, 80, 112, 192, 320] , _lowerCAmelCase = [] , _lowerCAmelCase = [1, 2, 2, 2, 1, 2, 1] , _lowerCAmelCase = [1, 2, 2, 3, 3, 4, 1] , _lowerCAmelCase = [1, 6, 6, 6, 6, 6, 6] , _lowerCAmelCase = 0.25 , _lowerCAmelCase = "swish" , _lowerCAmelCase = 2560 , _lowerCAmelCase = "mean" , _lowerCAmelCase = 0.02 , _lowerCAmelCase = 0.001 , _lowerCAmelCase = 0.99 , _lowerCAmelCase = 0.5 , _lowerCAmelCase = 0.2 , **_lowerCAmelCase , ) -> List[Any]:
'''simple docstring'''
super().__init__(**_lowerCAmelCase )
lowercase = num_channels
lowercase = image_size
lowercase = width_coefficient
lowercase = depth_coefficient
lowercase = depth_divisor
lowercase = kernel_sizes
lowercase = in_channels
lowercase = out_channels
lowercase = depthwise_padding
lowercase = strides
lowercase = num_block_repeats
lowercase = expand_ratios
lowercase = squeeze_expansion_ratio
lowercase = hidden_act
lowercase = hidden_dim
lowercase = pooling_type
lowercase = initializer_range
lowercase = batch_norm_eps
lowercase = batch_norm_momentum
lowercase = dropout_rate
lowercase = drop_connect_rate
lowercase = sum(_lowerCAmelCase ) * 4
class __UpperCamelCase (_UpperCAmelCase ):
__A = version.parse('''1.11''' )
@property
def _a ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def _a ( self ) -> float:
'''simple docstring'''
return 1E-5
| 715 |
'''simple docstring'''
import copy
import inspect
import unittest
from transformers import PretrainedConfig, SwiftFormerConfig
from transformers.testing_utils import (
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwiftFormerForImageClassification, SwiftFormerModel
from transformers.models.swiftformer.modeling_swiftformer import SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __UpperCamelCase :
def __init__( self , _lowerCAmelCase , _lowerCAmelCase=13 , _lowerCAmelCase=3 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=224 , _lowerCAmelCase=1000 , _lowerCAmelCase=[3, 3, 6, 4] , _lowerCAmelCase=[48, 56, 112, 220] , ) -> List[str]:
'''simple docstring'''
lowercase = parent
lowercase = batch_size
lowercase = num_channels
lowercase = is_training
lowercase = use_labels
lowercase = hidden_dropout_prob
lowercase = attention_probs_dropout_prob
lowercase = num_labels
lowercase = image_size
lowercase = layer_depths
lowercase = embed_dims
def _a ( self ) -> Tuple:
'''simple docstring'''
lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase = None
if self.use_labels:
lowercase = ids_tensor([self.batch_size] , self.num_labels )
lowercase = self.get_config()
return config, pixel_values, labels
def _a ( self ) -> int:
'''simple docstring'''
return SwiftFormerConfig(
depths=self.layer_depths , embed_dims=self.embed_dims , mlp_ratio=4 , downsamples=[True, True, True, True] , hidden_act="""gelu""" , num_labels=self.num_labels , down_patch_size=3 , down_stride=2 , down_pad=1 , drop_rate=0.0 , drop_path_rate=0.0 , use_layer_scale=_lowerCAmelCase , layer_scale_init_value=1E-5 , )
def _a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]:
'''simple docstring'''
lowercase = SwiftFormerModel(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
lowercase = model(_lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dims[-1], 7, 7) )
def _a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]:
'''simple docstring'''
lowercase = self.num_labels
lowercase = SwiftFormerForImageClassification(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
lowercase = model(_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
lowercase = SwiftFormerForImageClassification(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase = model(_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _a ( self ) -> Optional[Any]:
'''simple docstring'''
((lowercase) , (lowercase) , (lowercase)) = self.prepare_config_and_inputs()
lowercase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class __UpperCamelCase (_UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
__A = (SwiftFormerModel, SwiftFormerForImageClassification) if is_torch_available() else ()
__A = (
{'''feature-extraction''': SwiftFormerModel, '''image-classification''': SwiftFormerForImageClassification}
if is_torch_available()
else {}
)
__A = False
__A = False
__A = False
__A = False
__A = False
def _a ( self ) -> Dict:
'''simple docstring'''
lowercase = SwiftFormerModelTester(self )
lowercase = ConfigTester(
self , config_class=_lowerCAmelCase , has_text_modality=_lowerCAmelCase , hidden_size=37 , num_attention_heads=12 , num_hidden_layers=12 , )
def _a ( self ) -> List[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="""SwiftFormer does not use inputs_embeds""" )
def _a ( self ) -> List[str]:
'''simple docstring'''
pass
def _a ( self ) -> Dict:
'''simple docstring'''
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase = model_class(_lowerCAmelCase )
lowercase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_lowerCAmelCase , nn.Linear ) )
def _a ( self ) -> int:
'''simple docstring'''
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase = model_class(_lowerCAmelCase )
lowercase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase = [*signature.parameters.keys()]
lowercase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _lowerCAmelCase )
def _a ( self ) -> List[str]:
'''simple docstring'''
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCAmelCase )
def _a ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowerCAmelCase )
@slow
def _a ( self ) -> Any:
'''simple docstring'''
for model_name in SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase = SwiftFormerModel.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
@unittest.skip(reason="""SwiftFormer does not output attentions""" )
def _a ( self ) -> Optional[Any]:
'''simple docstring'''
pass
def _a ( self ) -> Union[str, Any]:
'''simple docstring'''
def check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
lowercase = model_class(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
with torch.no_grad():
lowercase = model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) )
lowercase = outputs.hidden_states
lowercase = 8
self.assertEqual(len(_lowerCAmelCase ) , _lowerCAmelCase ) # TODO
# SwiftFormer's feature maps are of shape (batch_size, embed_dims, height, width)
# with the width and height being successively divided by 2, after every 2 blocks
for i in range(len(_lowerCAmelCase ) ):
self.assertEqual(
hidden_states[i].shape , torch.Size(
[
self.model_tester.batch_size,
self.model_tester.embed_dims[i // 2],
(self.model_tester.image_size // 4) // 2 ** (i // 2),
(self.model_tester.image_size // 4) // 2 ** (i // 2),
] ) , )
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase = True
check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase = True
check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
def _a ( self ) -> Dict:
'''simple docstring'''
def _config_zero_init(_lowerCAmelCase ):
lowercase = copy.deepcopy(_lowerCAmelCase )
for key in configs_no_init.__dict__.keys():
if "_range" in key or "_std" in key or "initializer_factor" in key or "layer_scale" in key:
setattr(_lowerCAmelCase , _lowerCAmelCase , 1E-10 )
if isinstance(getattr(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) , _lowerCAmelCase ):
lowercase = _config_zero_init(getattr(_lowerCAmelCase , _lowerCAmelCase ) )
setattr(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
return configs_no_init
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
lowercase = _config_zero_init(_lowerCAmelCase )
for model_class in self.all_model_classes:
lowercase = model_class(config=_lowerCAmelCase )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9) / 1E9).round().item() , [0.0, 1.0] , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def _a ( self ) -> Any:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( ):
lowercase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class __UpperCamelCase (unittest.TestCase ):
@cached_property
def _a ( self ) -> List[str]:
'''simple docstring'''
return ViTImageProcessor.from_pretrained("""MBZUAI/swiftformer-xs""" ) if is_vision_available() else None
@slow
def _a ( self ) -> List[Any]:
'''simple docstring'''
lowercase = SwiftFormerForImageClassification.from_pretrained("""MBZUAI/swiftformer-xs""" ).to(_lowerCAmelCase )
lowercase = self.default_image_processor
lowercase = prepare_img()
lowercase = image_processor(images=_lowerCAmelCase , return_tensors="""pt""" ).to(_lowerCAmelCase )
# forward pass
with torch.no_grad():
lowercase = model(**_lowerCAmelCase )
# verify the logits
lowercase = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _lowerCAmelCase )
lowercase = torch.tensor([[-2.17_03E00, 2.11_07E00, -2.08_11E00]] ).to(_lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _lowerCAmelCase , atol=1E-4 ) )
| 653 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
lowercase_ : Dict = logging.get_logger(__name__)
lowercase_ : Dict = {
'''Helsinki-NLP/opus-mt-en-de''': '''https://huggingface.co/Helsinki-NLP/opus-mt-en-de/resolve/main/config.json''',
# See all Marian models at https://huggingface.co/models?filter=marian
}
class __UpperCamelCase (_UpperCAmelCase ):
__A = '''marian'''
__A = ['''past_key_values''']
__A = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self , _lowerCAmelCase=5_8101 , _lowerCAmelCase=None , _lowerCAmelCase=1024 , _lowerCAmelCase=12 , _lowerCAmelCase=4096 , _lowerCAmelCase=16 , _lowerCAmelCase=12 , _lowerCAmelCase=4096 , _lowerCAmelCase=16 , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.0 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase="gelu" , _lowerCAmelCase=1024 , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.02 , _lowerCAmelCase=5_8100 , _lowerCAmelCase=False , _lowerCAmelCase=5_8100 , _lowerCAmelCase=0 , _lowerCAmelCase=0 , _lowerCAmelCase=True , **_lowerCAmelCase , ) -> Optional[int]:
'''simple docstring'''
lowercase = vocab_size
lowercase = decoder_vocab_size or vocab_size
lowercase = max_position_embeddings
lowercase = d_model
lowercase = encoder_ffn_dim
lowercase = encoder_layers
lowercase = encoder_attention_heads
lowercase = decoder_ffn_dim
lowercase = decoder_layers
lowercase = decoder_attention_heads
lowercase = dropout
lowercase = attention_dropout
lowercase = activation_dropout
lowercase = activation_function
lowercase = init_std
lowercase = encoder_layerdrop
lowercase = decoder_layerdrop
lowercase = use_cache
lowercase = encoder_layers
lowercase = scale_embedding # scale factor will be sqrt(d_model) if True
lowercase = share_encoder_decoder_embeddings
super().__init__(
pad_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , is_encoder_decoder=_lowerCAmelCase , decoder_start_token_id=_lowerCAmelCase , forced_eos_token_id=_lowerCAmelCase , **_lowerCAmelCase , )
class __UpperCamelCase (_UpperCAmelCase ):
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.inputs
def _a ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
lowercase = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
] )
if self.use_past:
lowercase = {0: """batch"""}
lowercase = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
else:
lowercase = {0: """batch""", 1: """decoder_sequence"""}
lowercase = {0: """batch""", 1: """decoder_sequence"""}
if self.use_past:
self.fill_with_past_key_values_(_lowerCAmelCase , direction="""inputs""" )
elif self.task == "causal-lm":
# TODO: figure this case out.
lowercase = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
] )
if self.use_past:
lowercase , lowercase = self.num_layers
for i in range(_lowerCAmelCase ):
lowercase = {0: """batch""", 2: """past_sequence + sequence"""}
lowercase = {0: """batch""", 2: """past_sequence + sequence"""}
else:
lowercase = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
("""decoder_input_ids""", {0: """batch""", 1: """decoder_sequence"""}),
("""decoder_attention_mask""", {0: """batch""", 1: """decoder_sequence"""}),
] )
return common_inputs
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.outputs
def _a ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
lowercase = super().outputs
else:
lowercase = super(_lowerCAmelCase , self ).outputs
if self.use_past:
lowercase , lowercase = self.num_layers
for i in range(_lowerCAmelCase ):
lowercase = {0: """batch""", 2: """past_sequence + sequence"""}
lowercase = {0: """batch""", 2: """past_sequence + sequence"""}
return common_outputs
def _a ( self , _lowerCAmelCase , _lowerCAmelCase = -1 , _lowerCAmelCase = -1 , _lowerCAmelCase = False , _lowerCAmelCase = None , ) -> Mapping[str, Any]:
'''simple docstring'''
lowercase = self._generate_dummy_inputs_for_encoder_and_decoder(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# Generate decoder inputs
lowercase = seq_length if not self.use_past else 1
lowercase = self._generate_dummy_inputs_for_encoder_and_decoder(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
lowercase = {F"""decoder_{name}""": tensor for name, tensor in decoder_inputs.items()}
lowercase = dict(**_lowerCAmelCase , **_lowerCAmelCase )
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
lowercase , lowercase = common_inputs["""input_ids"""].shape
lowercase = common_inputs["""decoder_input_ids"""].shape[1]
lowercase , lowercase = self.num_attention_heads
lowercase = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
lowercase = decoder_seq_length + 3
lowercase = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
lowercase = torch.cat(
[common_inputs["""decoder_attention_mask"""], torch.ones(_lowerCAmelCase , _lowerCAmelCase )] , dim=1 )
lowercase = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
lowercase , lowercase = self.num_layers
lowercase = min(_lowerCAmelCase , _lowerCAmelCase )
lowercase = max(_lowerCAmelCase , _lowerCAmelCase ) - min_num_layers
lowercase = """encoder""" if num_encoder_layers > num_decoder_layers else """decoder"""
for _ in range(_lowerCAmelCase ):
common_inputs["past_key_values"].append(
(
torch.zeros(_lowerCAmelCase ),
torch.zeros(_lowerCAmelCase ),
torch.zeros(_lowerCAmelCase ),
torch.zeros(_lowerCAmelCase ),
) )
# TODO: test this.
lowercase = encoder_shape if remaining_side_name == """encoder""" else decoder_shape
for _ in range(_lowerCAmelCase , _lowerCAmelCase ):
common_inputs["past_key_values"].append((torch.zeros(_lowerCAmelCase ), torch.zeros(_lowerCAmelCase )) )
return common_inputs
def _a ( self , _lowerCAmelCase , _lowerCAmelCase = -1 , _lowerCAmelCase = -1 , _lowerCAmelCase = False , _lowerCAmelCase = None , ) -> Mapping[str, Any]:
'''simple docstring'''
lowercase = self._generate_dummy_inputs_for_encoder_and_decoder(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
lowercase , lowercase = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
lowercase = seqlen + 2
lowercase , lowercase = self.num_layers
lowercase , lowercase = self.num_attention_heads
lowercase = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
lowercase = common_inputs["""attention_mask"""].dtype
lowercase = torch.cat(
[common_inputs["""attention_mask"""], torch.ones(_lowerCAmelCase , _lowerCAmelCase , dtype=_lowerCAmelCase )] , dim=1 )
lowercase = [
(torch.zeros(_lowerCAmelCase ), torch.zeros(_lowerCAmelCase )) for _ in range(_lowerCAmelCase )
]
return common_inputs
def _a ( self , _lowerCAmelCase , _lowerCAmelCase = -1 , _lowerCAmelCase = -1 , _lowerCAmelCase = False , _lowerCAmelCase = None , ) -> Mapping[str, Any]:
'''simple docstring'''
lowercase = compute_effective_axis_dimension(
_lowerCAmelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
lowercase = tokenizer.num_special_tokens_to_add(_lowerCAmelCase )
lowercase = compute_effective_axis_dimension(
_lowerCAmelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=_lowerCAmelCase )
# Generate dummy inputs according to compute batch and sequence
lowercase = [""" """.join([tokenizer.unk_token] ) * seq_length] * batch_size
lowercase = dict(tokenizer(_lowerCAmelCase , return_tensors=_lowerCAmelCase ) )
return common_inputs
def _a ( self , _lowerCAmelCase , _lowerCAmelCase = -1 , _lowerCAmelCase = -1 , _lowerCAmelCase = False , _lowerCAmelCase = None , ) -> Mapping[str, Any]:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
lowercase = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
_lowerCAmelCase , batch_size=_lowerCAmelCase , seq_length=_lowerCAmelCase , is_pair=_lowerCAmelCase , framework=_lowerCAmelCase )
else:
lowercase = self._generate_dummy_inputs_for_causal_lm(
_lowerCAmelCase , batch_size=_lowerCAmelCase , seq_length=_lowerCAmelCase , is_pair=_lowerCAmelCase , framework=_lowerCAmelCase )
return common_inputs
def _a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> List[str]:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
lowercase = super()._flatten_past_key_values_(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
else:
lowercase = super(_lowerCAmelCase , self )._flatten_past_key_values_(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
@property
def _a ( self ) -> float:
'''simple docstring'''
return 1E-4
| 716 |
'''simple docstring'''
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def SCREAMING_SNAKE_CASE ( ):
lowercase = HfArgumentParser(lowercase_ )
lowercase = parser.parse_args_into_dataclasses()[0]
lowercase = TensorFlowBenchmark(args=lowercase_ )
try:
lowercase = parser.parse_args_into_dataclasses()[0]
except ValueError as e:
lowercase = """Arg --no_{0} is no longer used, please use --no-{0} instead."""
lowercase = """ """.join(str(lowercase_ ).split(""" """ )[:-1] )
lowercase = """"""
lowercase = eval(str(lowercase_ ).split(""" """ )[-1] )
lowercase = []
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:] )
else:
wrong_args.append(lowercase_ )
if len(lowercase_ ) > 0:
lowercase = full_error_msg + begin_error_msg + str(lowercase_ )
raise ValueError(lowercase_ )
benchmark.run()
if __name__ == "__main__":
main()
| 653 | 0 |
'''simple docstring'''
lowercase_ : List[str] = '''ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/'''
def SCREAMING_SNAKE_CASE ( lowercase_ : bytes ):
# Make sure the supplied data is a bytes-like object
if not isinstance(lowercase_ , lowercase_ ):
lowercase = F"""a bytes-like object is required, not '{data.__class__.__name__}'"""
raise TypeError(lowercase_ )
lowercase = """""".join(bin(lowercase_ )[2:].zfill(8 ) for byte in data )
lowercase = len(lowercase_ ) % 6 != 0
if padding_needed:
# The padding that will be added later
lowercase = B"""=""" * ((6 - len(lowercase_ ) % 6) // 2)
# Append binary_stream with arbitrary binary digits (0's by default) to make its
# length a multiple of 6.
binary_stream += "0" * (6 - len(lowercase_ ) % 6)
else:
lowercase = B""""""
# Encode every 6 binary digits to their corresponding Base64 character
return (
"".join(
B64_CHARSET[int(binary_stream[index : index + 6] , 2 )]
for index in range(0 , len(lowercase_ ) , 6 ) ).encode()
+ padding
)
def SCREAMING_SNAKE_CASE ( lowercase_ : str ):
# Make sure encoded_data is either a string or a bytes-like object
if not isinstance(lowercase_ , lowercase_ ) and not isinstance(lowercase_ , lowercase_ ):
lowercase = (
"""argument should be a bytes-like object or ASCII string, """
F"""not '{encoded_data.__class__.__name__}'"""
)
raise TypeError(lowercase_ )
# In case encoded_data is a bytes-like object, make sure it contains only
# ASCII characters so we convert it to a string object
if isinstance(lowercase_ , lowercase_ ):
try:
lowercase = encoded_data.decode("""utf-8""" )
except UnicodeDecodeError:
raise ValueError("""base64 encoded data should only contain ASCII characters""" )
lowercase = encoded_data.count("""=""" )
# Check if the encoded string contains non base64 characters
if padding:
assert all(
char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found."
else:
assert all(
char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found."
# Check the padding
assert len(lowercase_ ) % 4 == 0 and padding < 3, "Incorrect padding"
if padding:
# Remove padding if there is one
lowercase = encoded_data[:-padding]
lowercase = """""".join(
bin(B64_CHARSET.index(lowercase_ ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2]
else:
lowercase = """""".join(
bin(B64_CHARSET.index(lowercase_ ) )[2:].zfill(6 ) for char in encoded_data )
lowercase = [
int(binary_stream[index : index + 8] , 2 )
for index in range(0 , len(lowercase_ ) , 8 )
]
return bytes(lowercase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 717 |
'''simple docstring'''
# coding=utf-8
# Copyright 2023 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import platform
import sys
lowercase_ : List[str] = '''3'''
print('''Python version:''', sys.version)
print('''OS platform:''', platform.platform())
print('''OS architecture:''', platform.machine())
try:
import torch
print('''Torch version:''', torch.__version__)
print('''Cuda available:''', torch.cuda.is_available())
print('''Cuda version:''', torch.version.cuda)
print('''CuDNN version:''', torch.backends.cudnn.version())
print('''Number of GPUs available:''', torch.cuda.device_count())
except ImportError:
print('''Torch version:''', None)
try:
import transformers
print('''transformers version:''', transformers.__version__)
except ImportError:
print('''transformers version:''', None)
| 653 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowercase_ : Tuple = logging.get_logger(__name__)
lowercase_ : Optional[int] = {
'''shi-labs/nat-mini-in1k-224''': '''https://huggingface.co/shi-labs/nat-mini-in1k-224/resolve/main/config.json''',
# See all Nat models at https://huggingface.co/models?filter=nat
}
class __UpperCamelCase (_UpperCAmelCase , _UpperCAmelCase ):
__A = '''nat'''
__A = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self , _lowerCAmelCase=4 , _lowerCAmelCase=3 , _lowerCAmelCase=64 , _lowerCAmelCase=[3, 4, 6, 5] , _lowerCAmelCase=[2, 4, 8, 16] , _lowerCAmelCase=7 , _lowerCAmelCase=3.0 , _lowerCAmelCase=True , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.1 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.02 , _lowerCAmelCase=1E-5 , _lowerCAmelCase=0.0 , _lowerCAmelCase=None , _lowerCAmelCase=None , **_lowerCAmelCase , ) -> Dict:
'''simple docstring'''
super().__init__(**_lowerCAmelCase )
lowercase = patch_size
lowercase = num_channels
lowercase = embed_dim
lowercase = depths
lowercase = len(_lowerCAmelCase )
lowercase = num_heads
lowercase = kernel_size
lowercase = mlp_ratio
lowercase = qkv_bias
lowercase = hidden_dropout_prob
lowercase = attention_probs_dropout_prob
lowercase = drop_path_rate
lowercase = hidden_act
lowercase = layer_norm_eps
lowercase = initializer_range
# we set the hidden_size attribute in order to make Nat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
lowercase = int(embed_dim * 2 ** (len(_lowerCAmelCase ) - 1) )
lowercase = layer_scale_init_value
lowercase = ["""stem"""] + [F"""stage{idx}""" for idx in range(1 , len(_lowerCAmelCase ) + 1 )]
lowercase , lowercase = get_aligned_output_features_output_indices(
out_features=_lowerCAmelCase , out_indices=_lowerCAmelCase , stage_names=self.stage_names )
| 718 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowercase_ : Optional[Any] = logging.get_logger(__name__)
lowercase_ : int = {'''vocab_file''': '''spm_char.model'''}
lowercase_ : int = {
'''vocab_file''': {
'''microsoft/speecht5_asr''': '''https://huggingface.co/microsoft/speecht5_asr/resolve/main/spm_char.model''',
'''microsoft/speecht5_tts''': '''https://huggingface.co/microsoft/speecht5_tts/resolve/main/spm_char.model''',
'''microsoft/speecht5_vc''': '''https://huggingface.co/microsoft/speecht5_vc/resolve/main/spm_char.model''',
}
}
lowercase_ : Optional[Any] = {
'''microsoft/speecht5_asr''': 1024,
'''microsoft/speecht5_tts''': 1024,
'''microsoft/speecht5_vc''': 1024,
}
class __UpperCamelCase (_UpperCAmelCase ):
__A = VOCAB_FILES_NAMES
__A = PRETRAINED_VOCAB_FILES_MAP
__A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__A = ['''input_ids''', '''attention_mask''']
def __init__( self , _lowerCAmelCase , _lowerCAmelCase="<s>" , _lowerCAmelCase="</s>" , _lowerCAmelCase="<unk>" , _lowerCAmelCase="<pad>" , _lowerCAmelCase = None , **_lowerCAmelCase , ) -> None:
'''simple docstring'''
lowercase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_lowerCAmelCase , eos_token=_lowerCAmelCase , unk_token=_lowerCAmelCase , pad_token=_lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **_lowerCAmelCase , )
lowercase = vocab_file
lowercase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_lowerCAmelCase )
@property
def _a ( self ) -> List[Any]:
'''simple docstring'''
return self.sp_model.get_piece_size()
def _a ( self ) -> str:
'''simple docstring'''
lowercase = {self.convert_ids_to_tokens(_lowerCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase = self.__dict__.copy()
lowercase = None
return state
def __setstate__( self , _lowerCAmelCase ) -> Optional[int]:
'''simple docstring'''
lowercase = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
lowercase = {}
lowercase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _a ( self , _lowerCAmelCase ) -> List[str]:
'''simple docstring'''
return self.sp_model.encode(_lowerCAmelCase , out_type=_lowerCAmelCase )
def _a ( self , _lowerCAmelCase ) -> List[Any]:
'''simple docstring'''
return self.sp_model.piece_to_id(_lowerCAmelCase )
def _a ( self , _lowerCAmelCase ) -> str:
'''simple docstring'''
lowercase = self.sp_model.IdToPiece(_lowerCAmelCase )
return token
def _a ( self , _lowerCAmelCase ) -> Optional[Any]:
'''simple docstring'''
lowercase = []
lowercase = """"""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(_lowerCAmelCase ) + token
lowercase = []
else:
current_sub_tokens.append(_lowerCAmelCase )
out_string += self.sp_model.decode(_lowerCAmelCase )
return out_string.strip()
def _a ( self , _lowerCAmelCase , _lowerCAmelCase=None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def _a ( self , _lowerCAmelCase , _lowerCAmelCase = None , _lowerCAmelCase = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowerCAmelCase , token_ids_a=_lowerCAmelCase , already_has_special_tokens=_lowerCAmelCase )
lowercase = [1]
if token_ids_a is None:
return ([0] * len(_lowerCAmelCase )) + suffix_ones
return ([0] * len(_lowerCAmelCase )) + ([0] * len(_lowerCAmelCase )) + suffix_ones
def _a ( self , _lowerCAmelCase , _lowerCAmelCase = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(_lowerCAmelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowercase = os.path.join(
_lowerCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _lowerCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_lowerCAmelCase , """wb""" ) as fi:
lowercase = self.sp_model.serialized_model_proto()
fi.write(_lowerCAmelCase )
return (out_vocab_file,)
| 653 | 0 |
'''simple docstring'''
from collections import namedtuple
lowercase_ : List[str] = namedtuple('''from_to''', '''from_ to''')
lowercase_ : List[Any] = {
'''cubicmeter''': from_to(1, 1),
'''litre''': from_to(0.001, 1000),
'''kilolitre''': from_to(1, 1),
'''gallon''': from_to(0.00454, 264.172),
'''cubicyard''': from_to(0.76455, 1.30795),
'''cubicfoot''': from_to(0.028, 35.3147),
'''cup''': from_to(0.000236588, 4226.75),
}
def SCREAMING_SNAKE_CASE ( lowercase_ : float , lowercase_ : str , lowercase_ : str ):
if from_type not in METRIC_CONVERSION:
raise ValueError(
F"""Invalid 'from_type' value: {from_type!r} Supported values are:\n"""
+ """, """.join(lowercase_ ) )
if to_type not in METRIC_CONVERSION:
raise ValueError(
F"""Invalid 'to_type' value: {to_type!r}. Supported values are:\n"""
+ """, """.join(lowercase_ ) )
return value * METRIC_CONVERSION[from_type].from_ * METRIC_CONVERSION[to_type].to
if __name__ == "__main__":
import doctest
doctest.testmod()
| 719 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE ( ):
lowercase = []
lowercase = 1
while len(lowercase_ ) < 1E6:
constant.append(str(lowercase_ ) )
i += 1
lowercase = """""".join(lowercase_ )
return (
int(constant[0] )
* int(constant[9] )
* int(constant[99] )
* int(constant[999] )
* int(constant[9999] )
* int(constant[9_9999] )
* int(constant[99_9999] )
)
if __name__ == "__main__":
print(solution())
| 653 | 0 |
'''simple docstring'''
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
lowercase_ : Optional[Any] = logging.get_logger(__name__)
lowercase_ : Optional[int] = {'''vocab_file''': '''spiece.model'''}
lowercase_ : Optional[Any] = {
'''vocab_file''': {
'''TsinghuaAI/CPM-Generate''': '''https://huggingface.co/TsinghuaAI/CPM-Generate/resolve/main/spiece.model''',
}
}
class __UpperCamelCase (_UpperCAmelCase ):
def __init__( self , _lowerCAmelCase , _lowerCAmelCase=False , _lowerCAmelCase=True , _lowerCAmelCase=False , _lowerCAmelCase="<s>" , _lowerCAmelCase="</s>" , _lowerCAmelCase="<unk>" , _lowerCAmelCase="<sep>" , _lowerCAmelCase="<pad>" , _lowerCAmelCase="<cls>" , _lowerCAmelCase="<mask>" , _lowerCAmelCase=["<eop>", "<eod>"] , _lowerCAmelCase = None , **_lowerCAmelCase , ) -> None:
'''simple docstring'''
lowercase = AddedToken(_lowerCAmelCase , lstrip=_lowerCAmelCase , rstrip=_lowerCAmelCase ) if isinstance(_lowerCAmelCase , _lowerCAmelCase ) else mask_token
lowercase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=_lowerCAmelCase , remove_space=_lowerCAmelCase , keep_accents=_lowerCAmelCase , bos_token=_lowerCAmelCase , eos_token=_lowerCAmelCase , unk_token=_lowerCAmelCase , sep_token=_lowerCAmelCase , pad_token=_lowerCAmelCase , cls_token=_lowerCAmelCase , mask_token=_lowerCAmelCase , additional_special_tokens=_lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **_lowerCAmelCase , )
lowercase = 3
lowercase = do_lower_case
lowercase = remove_space
lowercase = keep_accents
lowercase = vocab_file
lowercase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_lowerCAmelCase )
try:
import jieba
except ModuleNotFoundError as error:
raise error.__class__(
"""You need to install jieba to use CpmTokenizer or CpmTokenizerFast. """
"""See https://pypi.org/project/jieba/ for installation.""" )
lowercase = jieba
lowercase = str.maketrans(""" \n""" , """\u2582\u2583""" )
@property
# Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.vocab_size
def _a ( self ) -> Dict:
'''simple docstring'''
return len(self.sp_model )
def _a ( self ) -> Dict:
'''simple docstring'''
lowercase = {self.convert_ids_to_tokens(_lowerCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> str:
'''simple docstring'''
lowercase = self.__dict__.copy()
lowercase = None
return state
def __setstate__( self , _lowerCAmelCase ) -> List[Any]:
'''simple docstring'''
lowercase = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
lowercase = {}
lowercase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _a ( self , _lowerCAmelCase ) -> Tuple:
'''simple docstring'''
if self.remove_space:
lowercase = """ """.join(inputs.strip().split() )
else:
lowercase = inputs
lowercase = outputs.replace("""``""" , """\"""" ).replace("""''""" , """\"""" )
if not self.keep_accents:
lowercase = unicodedata.normalize("""NFKD""" , _lowerCAmelCase )
lowercase = """""".join([c for c in outputs if not unicodedata.combining(_lowerCAmelCase )] )
if self.do_lower_case:
lowercase = outputs.lower()
return outputs
def _a ( self , _lowerCAmelCase ) -> List[str]:
'''simple docstring'''
lowercase = self.preprocess_text(_lowerCAmelCase )
lowercase = self.sp_model.encode(_lowerCAmelCase , out_type=_lowerCAmelCase )
lowercase = []
for piece in pieces:
if len(_lowerCAmelCase ) > 1 and piece[-1] == str(""",""" ) and piece[-2].isdigit():
lowercase = self.sp_model.EncodeAsPieces(piece[:-1].replace(_lowerCAmelCase , """""" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
lowercase = cur_pieces[1:]
else:
lowercase = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(_lowerCAmelCase )
else:
new_pieces.append(_lowerCAmelCase )
return new_pieces
def _a ( self , _lowerCAmelCase ) -> List[Any]:
'''simple docstring'''
return self.sp_model.PieceToId(_lowerCAmelCase )
def _a ( self , _lowerCAmelCase ) -> Any:
'''simple docstring'''
return self.sp_model.IdToPiece(_lowerCAmelCase )
def _a ( self , _lowerCAmelCase ) -> str:
'''simple docstring'''
lowercase = """""".join(_lowerCAmelCase ).replace(_lowerCAmelCase , """ """ ).strip()
return out_string
def _a ( self , _lowerCAmelCase , _lowerCAmelCase = None ) -> List[int]:
'''simple docstring'''
lowercase = [self.sep_token_id]
lowercase = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def _a ( self , _lowerCAmelCase , _lowerCAmelCase = None , _lowerCAmelCase = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowerCAmelCase , token_ids_a=_lowerCAmelCase , already_has_special_tokens=_lowerCAmelCase )
if token_ids_a is not None:
return ([0] * len(_lowerCAmelCase )) + [1] + ([0] * len(_lowerCAmelCase )) + [1, 1]
return ([0] * len(_lowerCAmelCase )) + [1, 1]
def _a ( self , _lowerCAmelCase , _lowerCAmelCase = None ) -> List[int]:
'''simple docstring'''
lowercase = [self.sep_token_id]
lowercase = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def _a ( self , _lowerCAmelCase , _lowerCAmelCase = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(_lowerCAmelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowercase = os.path.join(
_lowerCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _lowerCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_lowerCAmelCase , """wb""" ) as fi:
lowercase = self.sp_model.serialized_model_proto()
fi.write(_lowerCAmelCase )
return (out_vocab_file,)
def _a ( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]:
'''simple docstring'''
lowercase = super()._decode(*_lowerCAmelCase , **_lowerCAmelCase )
lowercase = text.replace(""" """ , """""" ).replace("""\u2582""" , """ """ ).replace("""\u2583""" , """\n""" )
return text
| 720 |
'''simple docstring'''
import os
def SCREAMING_SNAKE_CASE ( ):
lowercase = os.path.join(os.path.dirname(lowercase_ ) , """num.txt""" )
with open(lowercase_ ) as file_hand:
return str(sum(int(lowercase_ ) for line in file_hand ) )[:10]
if __name__ == "__main__":
print(solution())
| 653 | 0 |
'''simple docstring'''
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class __UpperCamelCase (_UpperCAmelCase ):
__A = '''facebook/bart-large-mnli'''
__A = (
'''This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which '''
'''should be the text to classify, and `labels`, which should be the list of labels to use for classification. '''
'''It returns the most likely label in the list of provided `labels` for the input text.'''
)
__A = '''text_classifier'''
__A = AutoTokenizer
__A = AutoModelForSequenceClassification
__A = ['''text''', ['''text''']]
__A = ['''text''']
def _a ( self ) -> Union[str, Any]:
'''simple docstring'''
super().setup()
lowercase = self.model.config
lowercase = -1
for idx, label in config.idalabel.items():
if label.lower().startswith("""entail""" ):
lowercase = int(_lowerCAmelCase )
if self.entailment_id == -1:
raise ValueError("""Could not determine the entailment ID from the model config, please pass it at init.""" )
def _a ( self , _lowerCAmelCase , _lowerCAmelCase ) -> List[str]:
'''simple docstring'''
lowercase = labels
return self.pre_processor(
[text] * len(_lowerCAmelCase ) , [F"""This example is {label}""" for label in labels] , return_tensors="""pt""" , padding="""max_length""" , )
def _a ( self , _lowerCAmelCase ) -> Tuple:
'''simple docstring'''
lowercase = outputs.logits
lowercase = torch.argmax(logits[:, 2] ).item()
return self._labels[label_id]
| 721 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPanoramaPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
@skip_mps
class __UpperCamelCase (_UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
__A = StableDiffusionPanoramaPipeline
__A = TEXT_TO_IMAGE_PARAMS
__A = TEXT_TO_IMAGE_BATCH_PARAMS
__A = TEXT_TO_IMAGE_IMAGE_PARAMS
__A = TEXT_TO_IMAGE_IMAGE_PARAMS
def _a ( self ) -> Dict:
'''simple docstring'''
torch.manual_seed(0 )
lowercase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
lowercase = DDIMScheduler()
torch.manual_seed(0 )
lowercase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
lowercase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
lowercase = CLIPTextModel(_lowerCAmelCase )
lowercase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
lowercase = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def _a ( self , _lowerCAmelCase , _lowerCAmelCase=0 ) -> Optional[int]:
'''simple docstring'''
lowercase = torch.manual_seed(_lowerCAmelCase )
lowercase = {
"""prompt""": """a photo of the dolomites""",
"""generator""": generator,
# Setting height and width to None to prevent OOMs on CPU.
"""height""": None,
"""width""": None,
"""num_inference_steps""": 1,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def _a ( self ) -> int:
'''simple docstring'''
lowercase = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowercase = self.get_dummy_components()
lowercase = StableDiffusionPanoramaPipeline(**_lowerCAmelCase )
lowercase = sd_pipe.to(_lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
lowercase = self.get_dummy_inputs(_lowerCAmelCase )
lowercase = sd_pipe(**_lowerCAmelCase ).images
lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase = np.array([0.6186, 0.5374, 0.4915, 0.4135, 0.4114, 0.4563, 0.5128, 0.4977, 0.4757] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _a ( self ) -> Union[str, Any]:
'''simple docstring'''
super().test_inference_batch_consistent(batch_sizes=[1, 2] )
def _a ( self ) -> str:
'''simple docstring'''
super().test_inference_batch_single_identical(batch_size=2 , expected_max_diff=3.25E-3 )
def _a ( self ) -> List[Any]:
'''simple docstring'''
lowercase = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowercase = self.get_dummy_components()
lowercase = StableDiffusionPanoramaPipeline(**_lowerCAmelCase )
lowercase = sd_pipe.to(_lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
lowercase = self.get_dummy_inputs(_lowerCAmelCase )
lowercase = """french fries"""
lowercase = sd_pipe(**_lowerCAmelCase , negative_prompt=_lowerCAmelCase )
lowercase = output.images
lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase = np.array([0.6187, 0.5375, 0.4915, 0.4136, 0.4114, 0.4563, 0.5128, 0.4976, 0.4757] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _a ( self ) -> Tuple:
'''simple docstring'''
lowercase = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowercase = self.get_dummy_components()
lowercase = StableDiffusionPanoramaPipeline(**_lowerCAmelCase )
lowercase = sd_pipe.to(_lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
lowercase = self.get_dummy_inputs(_lowerCAmelCase )
lowercase = sd_pipe(**_lowerCAmelCase , view_batch_size=2 )
lowercase = output.images
lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase = np.array([0.6187, 0.5375, 0.4915, 0.4136, 0.4114, 0.4563, 0.5128, 0.4976, 0.4757] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _a ( self ) -> Any:
'''simple docstring'''
lowercase = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowercase = self.get_dummy_components()
lowercase = EulerAncestralDiscreteScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule="""scaled_linear""" )
lowercase = StableDiffusionPanoramaPipeline(**_lowerCAmelCase )
lowercase = sd_pipe.to(_lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
lowercase = self.get_dummy_inputs(_lowerCAmelCase )
lowercase = sd_pipe(**_lowerCAmelCase ).images
lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase = np.array([0.4024, 0.6510, 0.4901, 0.5378, 0.5813, 0.5622, 0.4795, 0.4467, 0.4952] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _a ( self ) -> Dict:
'''simple docstring'''
lowercase = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowercase = self.get_dummy_components()
lowercase = PNDMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , skip_prk_steps=_lowerCAmelCase )
lowercase = StableDiffusionPanoramaPipeline(**_lowerCAmelCase )
lowercase = sd_pipe.to(_lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
lowercase = self.get_dummy_inputs(_lowerCAmelCase )
lowercase = sd_pipe(**_lowerCAmelCase ).images
lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase = np.array([0.6391, 0.6291, 0.4861, 0.5134, 0.5552, 0.4578, 0.5032, 0.5023, 0.4539] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class __UpperCamelCase (unittest.TestCase ):
def _a ( self ) -> List[Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _a ( self , _lowerCAmelCase=0 ) -> Optional[int]:
'''simple docstring'''
lowercase = torch.manual_seed(_lowerCAmelCase )
lowercase = {
"""prompt""": """a photo of the dolomites""",
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def _a ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase = """stabilityai/stable-diffusion-2-base"""
lowercase = DDIMScheduler.from_pretrained(_lowerCAmelCase , subfolder="""scheduler""" )
lowercase = StableDiffusionPanoramaPipeline.from_pretrained(_lowerCAmelCase , scheduler=_lowerCAmelCase , safety_checker=_lowerCAmelCase )
pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
pipe.enable_attention_slicing()
lowercase = self.get_inputs()
lowercase = pipe(**_lowerCAmelCase ).images
lowercase = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 2048, 3)
lowercase = np.array(
[
0.3696_8392,
0.2702_5372,
0.3244_6766,
0.2837_9387,
0.3636_3274,
0.3073_3347,
0.2710_0027,
0.2705_4125,
0.2553_6096,
] )
assert np.abs(expected_slice - image_slice ).max() < 1E-2
def _a ( self ) -> str:
'''simple docstring'''
lowercase = StableDiffusionPanoramaPipeline.from_pretrained(
"""stabilityai/stable-diffusion-2-base""" , safety_checker=_lowerCAmelCase )
lowercase = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
pipe.enable_attention_slicing()
lowercase = self.get_inputs()
lowercase = pipe(**_lowerCAmelCase ).images
lowercase = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 2048, 3)
lowercase = np.array(
[
[
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
]
] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def _a ( self ) -> Any:
'''simple docstring'''
lowercase = 0
def callback_fn(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> None:
lowercase = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
lowercase = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 256)
lowercase = latents[0, -3:, -3:, -1]
lowercase = np.array(
[
0.1868_1869,
0.3390_7816,
0.536_1276,
0.1443_2865,
-0.0285_6611,
-0.7394_1123,
0.2339_7987,
0.4732_2682,
-0.3782_3164,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
elif step == 2:
lowercase = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 256)
lowercase = latents[0, -3:, -3:, -1]
lowercase = np.array(
[
0.1853_9645,
0.3398_7248,
0.537_8559,
0.1443_7142,
-0.0245_5261,
-0.733_8317,
0.2399_0755,
0.4735_6272,
-0.378_6505,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
lowercase = False
lowercase = """stabilityai/stable-diffusion-2-base"""
lowercase = DDIMScheduler.from_pretrained(_lowerCAmelCase , subfolder="""scheduler""" )
lowercase = StableDiffusionPanoramaPipeline.from_pretrained(_lowerCAmelCase , scheduler=_lowerCAmelCase , safety_checker=_lowerCAmelCase )
lowercase = pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
pipe.enable_attention_slicing()
lowercase = self.get_inputs()
pipe(**_lowerCAmelCase , callback=_lowerCAmelCase , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def _a ( self ) -> int:
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowercase = """stabilityai/stable-diffusion-2-base"""
lowercase = DDIMScheduler.from_pretrained(_lowerCAmelCase , subfolder="""scheduler""" )
lowercase = StableDiffusionPanoramaPipeline.from_pretrained(_lowerCAmelCase , scheduler=_lowerCAmelCase , safety_checker=_lowerCAmelCase )
lowercase = pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
lowercase = self.get_inputs()
lowercase = pipe(**_lowerCAmelCase )
lowercase = torch.cuda.max_memory_allocated()
# make sure that less than 5.2 GB is allocated
assert mem_bytes < 5.5 * 10**9
| 653 | 0 |
'''simple docstring'''
from transformers import BertTokenizerFast
from .custom_tokenization import CustomTokenizer
class A ( UpperCAmelCase ):
a_ = CustomTokenizer
pass
| 654 |
'''simple docstring'''
import ast
import os
import re
import shutil
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.test_utils.examples import compare_against_test
from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow
from accelerate.utils import write_basic_config
# DataLoaders built from `test_samples/MRPC` for quick testing
# Should mock `{script_name}.get_dataloaders` via:
# @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders)
__lowerCAmelCase : Optional[int] = [
"cross_validation.py",
"gradient_accumulation.py",
"local_sgd.py",
"multi_process_metrics.py",
"memory.py",
"automatic_gradient_accumulation.py",
"fsdp_with_peak_mem_tracking.py",
"deepspeed_with_config_support.py",
"megatron_lm_gpt_pretraining.py",
]
class A ( unittest.TestCase ):
def snake_case__ ( self : Any , __a : str , __a : bool , __a : str = None , __a : list = None ) -> Tuple:
__UpperCAmelCase = None
__UpperCAmelCase = os.path.abspath(os.path.join('''examples''' , '''by_feature''' ) )
__UpperCAmelCase = os.path.abspath('''examples''' )
for item in os.listdir(__a ):
if item not in EXCLUDE_EXAMPLES:
__UpperCAmelCase = os.path.join(__a , __a )
if os.path.isfile(__a ) and ".py" in item_path:
with self.subTest(
tested_script=__a , feature_script=__a , tested_section='''main()''' if parser_only else '''training_function()''' , ):
__UpperCAmelCase = compare_against_test(
os.path.join(__a , __a ) , __a , __a , __a )
__UpperCAmelCase = '''\n'''.join(__a )
if special_strings is not None:
for string in special_strings:
__UpperCAmelCase = diff.replace(__a , '''''' )
self.assertEqual(__a , '''''' )
def snake_case__ ( self : Optional[Any] ) -> str:
self.one_complete_example('''complete_nlp_example.py''' , __a )
self.one_complete_example('''complete_nlp_example.py''' , __a )
def snake_case__ ( self : List[str] ) -> Tuple:
__UpperCAmelCase = os.path.abspath(os.path.join('''examples''' , '''cv_example.py''' ) )
__UpperCAmelCase = [
''' ''' * 1_6 + '''{\n\n''',
''' ''' * 2_0 + '''"accuracy": eval_metric["accuracy"],\n\n''',
''' ''' * 2_0 + '''"f1": eval_metric["f1"],\n\n''',
''' ''' * 2_0 + '''"train_loss": total_loss.item() / len(train_dataloader),\n\n''',
''' ''' * 2_0 + '''"epoch": epoch,\n\n''',
''' ''' * 1_6 + '''},\n\n''',
''' ''' * 1_6 + '''step=epoch,\n''',
''' ''' * 1_2,
''' ''' * 8 + '''for step, batch in enumerate(active_dataloader):\n''',
]
self.one_complete_example('''complete_cv_example.py''' , __a , __a , __a )
self.one_complete_example('''complete_cv_example.py''' , __a , __a , __a )
@mock.patch.dict(os.environ , {'''TESTING_MOCKED_DATALOADERS''': '''1'''} )
class A ( UpperCAmelCase ):
a_ = False
@classmethod
def snake_case__ ( cls : Tuple ) -> str:
super().setUpClass()
__UpperCAmelCase = tempfile.mkdtemp()
__UpperCAmelCase = os.path.join(cls._tmpdir , '''default_config.yml''' )
write_basic_config(save_location=cls.configPath )
__UpperCAmelCase = ['''accelerate''', '''launch''', '''--config_file''', cls.configPath]
@classmethod
def snake_case__ ( cls : Dict ) -> int:
super().tearDownClass()
shutil.rmtree(cls._tmpdir )
def snake_case__ ( self : Tuple ) -> Dict:
__UpperCAmelCase = f"""
examples/by_feature/checkpointing.py
--checkpointing_steps epoch
--output_dir {self.tmpdir}
""".split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , '''epoch_0''' ) ) )
def snake_case__ ( self : str ) -> int:
__UpperCAmelCase = f"""
examples/by_feature/checkpointing.py
--checkpointing_steps 1
--output_dir {self.tmpdir}
""".split()
__UpperCAmelCase = run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , '''step_2''' ) ) )
def snake_case__ ( self : Any ) -> Any:
__UpperCAmelCase = f"""
examples/by_feature/checkpointing.py
--resume_from_checkpoint {os.path.join(self.tmpdir , 'epoch_0' )}
""".split()
__UpperCAmelCase = run_command(self._launch_args + testargs , return_stdout=__a )
self.assertNotIn('''epoch 0:''' , __a )
self.assertIn('''epoch 1:''' , __a )
def snake_case__ ( self : Tuple ) -> Optional[int]:
__UpperCAmelCase = f"""
examples/by_feature/checkpointing.py
--resume_from_checkpoint {os.path.join(self.tmpdir , 'step_2' )}
""".split()
__UpperCAmelCase = run_command(self._launch_args + testargs , return_stdout=__a )
if torch.cuda.is_available():
__UpperCAmelCase = torch.cuda.device_count()
else:
__UpperCAmelCase = 1
if num_processes > 1:
self.assertNotIn('''epoch 0:''' , __a )
self.assertIn('''epoch 1:''' , __a )
else:
self.assertIn('''epoch 0:''' , __a )
self.assertIn('''epoch 1:''' , __a )
@slow
def snake_case__ ( self : Any ) -> Optional[Any]:
__UpperCAmelCase = '''
examples/by_feature/cross_validation.py
--num_folds 2
'''.split()
with mock.patch.dict(os.environ , {'''TESTING_MOCKED_DATALOADERS''': '''0'''} ):
__UpperCAmelCase = run_command(self._launch_args + testargs , return_stdout=__a )
__UpperCAmelCase = re.findall('''({.+})''' , __a )
__UpperCAmelCase = [r for r in results if '''accuracy''' in r][-1]
__UpperCAmelCase = ast.literal_eval(__a )
self.assertGreaterEqual(results['''accuracy'''] , 0.7_5 )
def snake_case__ ( self : Dict ) -> int:
__UpperCAmelCase = ['''examples/by_feature/multi_process_metrics.py''']
run_command(self._launch_args + testargs )
@require_trackers
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def snake_case__ ( self : Optional[Any] ) -> Optional[int]:
with tempfile.TemporaryDirectory() as tmpdir:
__UpperCAmelCase = f"""
examples/by_feature/tracking.py
--with_tracking
--project_dir {tmpdir}
""".split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(__a , '''tracking''' ) ) )
def snake_case__ ( self : Optional[int] ) -> List[Any]:
__UpperCAmelCase = ['''examples/by_feature/gradient_accumulation.py''']
run_command(self._launch_args + testargs )
def snake_case__ ( self : Tuple ) -> Optional[Any]:
__UpperCAmelCase = ['''examples/by_feature/local_sgd.py''']
run_command(self._launch_args + testargs )
| 654 | 1 |
'''simple docstring'''
import heapq
import sys
import numpy as np
__lowerCAmelCase : Any = tuple[int, int]
class A :
def __init__( self : Optional[int] ) -> int:
__UpperCAmelCase = []
__UpperCAmelCase = set()
def snake_case__ ( self : Optional[Any] ) -> List[Any]:
if not self.empty():
return self.elements[0][0]
else:
return float('''inf''' )
def snake_case__ ( self : Dict ) -> Optional[int]:
return len(self.elements ) == 0
def snake_case__ ( self : Optional[int] , __a : Optional[Any] , __a : Dict ) -> Optional[Any]:
if item not in self.set:
heapq.heappush(self.elements , (priority, item) )
self.set.add(__a )
else:
# update
# print("update", item)
__UpperCAmelCase = []
((__UpperCAmelCase) , (__UpperCAmelCase)) = heapq.heappop(self.elements )
while x != item:
temp.append((pri, x) )
((__UpperCAmelCase) , (__UpperCAmelCase)) = heapq.heappop(self.elements )
temp.append((priority, item) )
for pro, xxx in temp:
heapq.heappush(self.elements , (pro, xxx) )
def snake_case__ ( self : int , __a : Any ) -> int:
if item in self.set:
self.set.remove(__a )
__UpperCAmelCase = []
((__UpperCAmelCase) , (__UpperCAmelCase)) = heapq.heappop(self.elements )
while x != item:
temp.append((pro, x) )
((__UpperCAmelCase) , (__UpperCAmelCase)) = heapq.heappop(self.elements )
for prito, yyy in temp:
heapq.heappush(self.elements , (prito, yyy) )
def snake_case__ ( self : List[str] ) -> Dict:
return self.elements[0][1]
def snake_case__ ( self : Any ) -> List[str]:
((__UpperCAmelCase) , (__UpperCAmelCase)) = heapq.heappop(self.elements )
self.set.remove(__a )
return (priority, item)
def lowerCAmelCase ( UpperCamelCase__ : TPos , UpperCamelCase__ : TPos ):
"""simple docstring"""
# euclidean distance
__UpperCAmelCase = np.array(UpperCamelCase__ )
__UpperCAmelCase = np.array(UpperCamelCase__ )
return np.linalg.norm(a - b )
def lowerCAmelCase ( UpperCamelCase__ : TPos , UpperCamelCase__ : TPos ):
"""simple docstring"""
# integer division by time variable
return consistent_heuristic(UpperCamelCase__ , UpperCamelCase__ ) // t
def lowerCAmelCase ( UpperCamelCase__ : TPos , UpperCamelCase__ : TPos ):
"""simple docstring"""
# manhattan distance
return abs(p[0] - goal[0] ) + abs(p[1] - goal[1] )
def lowerCAmelCase ( UpperCamelCase__ : TPos , UpperCamelCase__ : int , UpperCamelCase__ : TPos , UpperCamelCase__ : dict[TPos, float] ):
"""simple docstring"""
__UpperCAmelCase = g_function[start] + Wa * heuristics[i](UpperCamelCase__ , UpperCamelCase__ )
return ans
def lowerCAmelCase ( UpperCamelCase__ : List[Any] , UpperCamelCase__ : int , UpperCamelCase__ : Optional[int] ):
"""simple docstring"""
__UpperCAmelCase = np.chararray((n, n) )
for i in range(UpperCamelCase__ ):
for j in range(UpperCamelCase__ ):
__UpperCAmelCase = '''*'''
for i in range(UpperCamelCase__ ):
for j in range(UpperCamelCase__ ):
if (j, (n - 1) - i) in blocks:
__UpperCAmelCase = '''#'''
__UpperCAmelCase = '''-'''
__UpperCAmelCase = back_pointer[goal]
while x != start:
((__UpperCAmelCase) , (__UpperCAmelCase)) = x
# print(x)
__UpperCAmelCase = '''-'''
__UpperCAmelCase = back_pointer[x]
__UpperCAmelCase = '''-'''
for i in range(UpperCamelCase__ ):
for j in range(UpperCamelCase__ ):
if (i, j) == (0, n - 1):
print(grid[i][j] , end=''' ''' )
print('''<-- End position''' , end=''' ''' )
else:
print(grid[i][j] , end=''' ''' )
print()
print('''^''' )
print('''Start position''' )
print()
print('''# is an obstacle''' )
print('''- is the path taken by algorithm''' )
print('''PATH TAKEN BY THE ALGORITHM IS:-''' )
__UpperCAmelCase = back_pointer[goal]
while x != start:
print(UpperCamelCase__ , end=''' ''' )
__UpperCAmelCase = back_pointer[x]
print(UpperCamelCase__ )
sys.exit()
def lowerCAmelCase ( UpperCamelCase__ : TPos ):
"""simple docstring"""
if p[0] < 0 or p[0] > n - 1:
return False
if p[1] < 0 or p[1] > n - 1:
return False
return True
def lowerCAmelCase ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : int , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : Dict , UpperCamelCase__ : Dict , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Tuple , ):
"""simple docstring"""
for itera in range(UpperCamelCase__ ):
open_list[itera].remove_element(UpperCamelCase__ )
# print("s", s)
# print("j", j)
((__UpperCAmelCase) , (__UpperCAmelCase)) = s
__UpperCAmelCase = (x - 1, y)
__UpperCAmelCase = (x + 1, y)
__UpperCAmelCase = (x, y + 1)
__UpperCAmelCase = (x, y - 1)
for neighbours in [left, right, up, down]:
if neighbours not in blocks:
if valid(UpperCamelCase__ ) and neighbours not in visited:
# print("neighbour", neighbours)
visited.add(UpperCamelCase__ )
__UpperCAmelCase = -1
__UpperCAmelCase = float('''inf''' )
if valid(UpperCamelCase__ ) and g_function[neighbours] > g_function[s] + 1:
__UpperCAmelCase = g_function[s] + 1
__UpperCAmelCase = s
if neighbours not in close_list_anchor:
open_list[0].put(UpperCamelCase__ , key(UpperCamelCase__ , 0 , UpperCamelCase__ , UpperCamelCase__ ) )
if neighbours not in close_list_inad:
for var in range(1 , UpperCamelCase__ ):
if key(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) <= Wa * key(
UpperCamelCase__ , 0 , UpperCamelCase__ , UpperCamelCase__ ):
open_list[j].put(
UpperCamelCase__ , key(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) )
def lowerCAmelCase ( ):
"""simple docstring"""
__UpperCAmelCase = []
for x in range(1 , 5 ):
for y in range(1 , 6 ):
some_list.append((x, y) )
for x in range(1_5 , 2_0 ):
some_list.append((x, 1_7) )
for x in range(1_0 , 1_9 ):
for y in range(1 , 1_5 ):
some_list.append((x, y) )
# L block
for x in range(1 , 4 ):
for y in range(1_2 , 1_9 ):
some_list.append((x, y) )
for x in range(3 , 1_3 ):
for y in range(1_6 , 1_9 ):
some_list.append((x, y) )
return some_list
__lowerCAmelCase : Optional[Any] = {0: consistent_heuristic, 1: heuristic_a, 2: heuristic_a}
__lowerCAmelCase : List[Any] = [
(0, 1),
(1, 1),
(2, 1),
(3, 1),
(4, 1),
(5, 1),
(6, 1),
(7, 1),
(8, 1),
(9, 1),
(10, 1),
(11, 1),
(12, 1),
(13, 1),
(14, 1),
(15, 1),
(16, 1),
(17, 1),
(18, 1),
(19, 1),
]
__lowerCAmelCase : Dict = make_common_ground()
__lowerCAmelCase : int = blocks_blk
# hyper parameters
__lowerCAmelCase : Dict = 1
__lowerCAmelCase : List[str] = 1
__lowerCAmelCase : Union[str, Any] = 20
__lowerCAmelCase : Any = 3 # one consistent and two other inconsistent
# start and end destination
__lowerCAmelCase : Optional[Any] = (0, 0)
__lowerCAmelCase : Any = (n - 1, n - 1)
__lowerCAmelCase : Optional[int] = 1
def lowerCAmelCase ( UpperCamelCase__ : TPos , UpperCamelCase__ : TPos , UpperCamelCase__ : int ):
"""simple docstring"""
__UpperCAmelCase = {start: 0, goal: float('''inf''' )}
__UpperCAmelCase = {start: -1, goal: -1}
__UpperCAmelCase = []
__UpperCAmelCase = set()
for i in range(UpperCamelCase__ ):
open_list.append(PriorityQueue() )
open_list[i].put(UpperCamelCase__ , key(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) )
__UpperCAmelCase = []
__UpperCAmelCase = []
while open_list[0].minkey() < float('''inf''' ):
for i in range(1 , UpperCamelCase__ ):
# print(open_list[0].minkey(), open_list[i].minkey())
if open_list[i].minkey() <= Wa * open_list[0].minkey():
global t
t += 1
if g_function[goal] <= open_list[i].minkey():
if g_function[goal] < float('''inf''' ):
do_something(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
else:
__UpperCAmelCase , __UpperCAmelCase = open_list[i].top_show()
visited.add(UpperCamelCase__ )
expand_state(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , )
close_list_inad.append(UpperCamelCase__ )
else:
if g_function[goal] <= open_list[0].minkey():
if g_function[goal] < float('''inf''' ):
do_something(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
else:
__UpperCAmelCase = open_list[0].top_show()
visited.add(UpperCamelCase__ )
expand_state(
UpperCamelCase__ , 0 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , )
close_list_anchor.append(UpperCamelCase__ )
print('''No path found to goal''' )
print()
for i in range(n - 1 , -1 , -1 ):
for j in range(UpperCamelCase__ ):
if (j, i) in blocks:
print('''#''' , end=''' ''' )
elif (j, i) in back_pointer:
if (j, i) == (n - 1, n - 1):
print('''*''' , end=''' ''' )
else:
print('''-''' , end=''' ''' )
else:
print('''*''' , end=''' ''' )
if (j, i) == (n - 1, n - 1):
print('''<-- End position''' , end=''' ''' )
print()
print('''^''' )
print('''Start position''' )
print()
print('''# is an obstacle''' )
print('''- is the path taken by algorithm''' )
if __name__ == "__main__":
multi_a_star(start, goal, n_heuristic)
| 654 |
'''simple docstring'''
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
__lowerCAmelCase : Any = ""
__lowerCAmelCase : int = ""
__lowerCAmelCase : Union[str, Any] = ""
__lowerCAmelCase : Any = 1 # (0 is vertical, 1 is horizontal)
def lowerCAmelCase ( ):
"""simple docstring"""
__UpperCAmelCase , __UpperCAmelCase = get_dataset(UpperCamelCase__ , UpperCamelCase__ )
print('''Processing...''' )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = update_image_and_anno(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
for index, image in enumerate(UpperCamelCase__ ):
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
__UpperCAmelCase = random_chars(3_2 )
__UpperCAmelCase = paths[index].split(os.sep )[-1].rsplit('''.''' , 1 )[0]
__UpperCAmelCase = f"""{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}"""
cva.imwrite(f"""/{file_root}.jpg""" , UpperCamelCase__ , [cva.IMWRITE_JPEG_QUALITY, 8_5] )
print(f"""Success {index+1}/{len(UpperCamelCase__ )} with {file_name}""" )
__UpperCAmelCase = []
for anno in new_annos[index]:
__UpperCAmelCase = f"""{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}"""
annos_list.append(UpperCamelCase__ )
with open(f"""/{file_root}.txt""" , '''w''' ) as outfile:
outfile.write('''\n'''.join(line for line in annos_list ) )
def lowerCAmelCase ( UpperCamelCase__ : str , UpperCamelCase__ : str ):
"""simple docstring"""
__UpperCAmelCase = []
__UpperCAmelCase = []
for label_file in glob.glob(os.path.join(UpperCamelCase__ , '''*.txt''' ) ):
__UpperCAmelCase = label_file.split(os.sep )[-1].rsplit('''.''' , 1 )[0]
with open(UpperCamelCase__ ) as in_file:
__UpperCAmelCase = in_file.readlines()
__UpperCAmelCase = os.path.join(UpperCamelCase__ , f"""{label_name}.jpg""" )
__UpperCAmelCase = []
for obj_list in obj_lists:
__UpperCAmelCase = obj_list.rstrip('''\n''' ).split(''' ''' )
boxes.append(
[
int(obj[0] ),
float(obj[1] ),
float(obj[2] ),
float(obj[3] ),
float(obj[4] ),
] )
if not boxes:
continue
img_paths.append(UpperCamelCase__ )
labels.append(UpperCamelCase__ )
return img_paths, labels
def lowerCAmelCase ( UpperCamelCase__ : list , UpperCamelCase__ : list , UpperCamelCase__ : int = 1 ):
"""simple docstring"""
__UpperCAmelCase = []
__UpperCAmelCase = []
__UpperCAmelCase = []
for idx in range(len(UpperCamelCase__ ) ):
__UpperCAmelCase = []
__UpperCAmelCase = img_list[idx]
path_list.append(UpperCamelCase__ )
__UpperCAmelCase = anno_list[idx]
__UpperCAmelCase = cva.imread(UpperCamelCase__ )
if flip_type == 1:
__UpperCAmelCase = cva.flip(UpperCamelCase__ , UpperCamelCase__ )
for bbox in img_annos:
__UpperCAmelCase = 1 - bbox[1]
new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] )
elif flip_type == 0:
__UpperCAmelCase = cva.flip(UpperCamelCase__ , UpperCamelCase__ )
for bbox in img_annos:
__UpperCAmelCase = 1 - bbox[2]
new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] )
new_annos_lists.append(UpperCamelCase__ )
new_imgs_list.append(UpperCamelCase__ )
return new_imgs_list, new_annos_lists, path_list
def lowerCAmelCase ( UpperCamelCase__ : int = 3_2 ):
"""simple docstring"""
assert number_char > 1, "The number of character should greater than 1"
__UpperCAmelCase = ascii_lowercase + digits
return "".join(random.choice(UpperCamelCase__ ) for _ in range(UpperCamelCase__ ) )
if __name__ == "__main__":
main()
print("DONE ✅")
| 654 | 1 |
'''simple docstring'''
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFGPTaLMHeadModel, is_keras_nlp_available, is_tf_available
from transformers.models.gpta.tokenization_gpta import GPTaTokenizer
from transformers.testing_utils import require_keras_nlp, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_keras_nlp_available():
from transformers.models.gpta import TFGPTaTokenizer
__lowerCAmelCase : Any = ["gpt2"]
__lowerCAmelCase : Union[str, Any] = "gpt2"
if is_tf_available():
class A ( tf.Module ):
def __init__( self : Any , __a : Any ) -> Optional[int]:
super().__init__()
__UpperCAmelCase = tokenizer
__UpperCAmelCase = AutoConfig.from_pretrained(__a )
__UpperCAmelCase = TFGPTaLMHeadModel.from_config(__a )
@tf.function(input_signature=(tf.TensorSpec((None,) , tf.string , name='''text''' ),) )
def snake_case__ ( self : int , __a : Optional[Any] ) -> int:
__UpperCAmelCase = self.tokenizer(__a )
__UpperCAmelCase = tokenized['''input_ids'''].to_tensor()
__UpperCAmelCase = tf.cast(input_ids_dense > 0 , tf.intaa )
# input_mask = tf.reshape(input_mask, [-1, MAX_SEQ_LEN])
__UpperCAmelCase = self.model(input_ids=__a , attention_mask=__a )['''logits''']
return outputs
@require_tf
@require_keras_nlp
class A ( unittest.TestCase ):
def snake_case__ ( self : int ) -> Dict:
super().setUp()
__UpperCAmelCase = [GPTaTokenizer.from_pretrained(__a ) for checkpoint in (TOKENIZER_CHECKPOINTS)]
__UpperCAmelCase = [TFGPTaTokenizer.from_pretrained(__a ) for checkpoint in TOKENIZER_CHECKPOINTS]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
__UpperCAmelCase = [
'''This is a straightforward English test sentence.''',
'''This one has some weird characters\rto\nsee\r\nif those\u00E9break things.''',
'''Now we\'re going to add some Chinese: 一 二 三 一二三''',
'''And some much more rare Chinese: 齉 堃 齉堃''',
'''Je vais aussi écrire en français pour tester les accents''',
'''Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ''',
]
__UpperCAmelCase = list(zip(self.test_sentences , self.test_sentences[::-1] ) )
def snake_case__ ( self : Optional[Any] ) -> List[Any]:
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ):
for test_inputs in self.test_sentences:
__UpperCAmelCase = tokenizer([test_inputs] , return_tensors='''tf''' )
__UpperCAmelCase = tf_tokenizer([test_inputs] )
for key in python_outputs.keys():
# convert them to numpy to avoid messing with ragged tensors
__UpperCAmelCase = python_outputs[key].numpy()
__UpperCAmelCase = tf_outputs[key].numpy()
self.assertTrue(tf.reduce_all(python_outputs_values.shape == tf_outputs_values.shape ) )
self.assertTrue(tf.reduce_all(tf.cast(__a , tf.intaa ) == tf_outputs_values ) )
@slow
def snake_case__ ( self : int ) -> List[Any]:
for tf_tokenizer in self.tf_tokenizers:
__UpperCAmelCase = tf.function(__a )
for test_inputs in self.test_sentences:
__UpperCAmelCase = tf.constant(__a )
__UpperCAmelCase = compiled_tokenizer(__a )
__UpperCAmelCase = tf_tokenizer(__a )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def snake_case__ ( self : Tuple ) -> Optional[Any]:
for tf_tokenizer in self.tf_tokenizers:
__UpperCAmelCase = ModelToSave(tokenizer=__a )
__UpperCAmelCase = tf.convert_to_tensor([self.test_sentences[0]] )
__UpperCAmelCase = model.serving(__a ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
__UpperCAmelCase = Path(__a ) / '''saved.model'''
tf.saved_model.save(__a , __a , signatures={'''serving_default''': model.serving} )
__UpperCAmelCase = tf.saved_model.load(__a )
__UpperCAmelCase = loaded_model.signatures['''serving_default'''](__a )['''output_0''']
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertTrue(tf.reduce_all(out == loaded_output ) )
@slow
def snake_case__ ( self : Optional[int] ) -> Dict:
for tf_tokenizer in self.tf_tokenizers:
__UpperCAmelCase = tf.convert_to_tensor([self.test_sentences[0]] )
__UpperCAmelCase = tf_tokenizer(__a ) # Build model with some sample inputs
__UpperCAmelCase = tf_tokenizer.get_config()
__UpperCAmelCase = TFGPTaTokenizer.from_config(__a )
__UpperCAmelCase = model_from_config(__a )
for key in from_config_output.keys():
self.assertTrue(tf.reduce_all(from_config_output[key] == out[key] ) )
@slow
def snake_case__ ( self : Dict ) -> List[str]:
for tf_tokenizer in self.tf_tokenizers:
# for the test to run
__UpperCAmelCase = 1_2_3_1_2_3
for max_length in [3, 5, 1_0_2_4]:
__UpperCAmelCase = tf.convert_to_tensor([self.test_sentences[0]] )
__UpperCAmelCase = tf_tokenizer(__a , max_length=__a )
__UpperCAmelCase = out['''input_ids'''].numpy().shape[1]
assert out_length == max_length
| 654 |
'''simple docstring'''
from pathlib import Path
import fire
def lowerCAmelCase ( UpperCamelCase__ : str , UpperCamelCase__ : str , UpperCamelCase__ : int ):
"""simple docstring"""
__UpperCAmelCase = Path(UpperCamelCase__ )
__UpperCAmelCase = Path(UpperCamelCase__ )
dest_dir.mkdir(exist_ok=UpperCamelCase__ )
for path in src_dir.iterdir():
__UpperCAmelCase = [x.rstrip() for x in list(path.open().readlines() )][:n]
__UpperCAmelCase = dest_dir.joinpath(path.name )
print(UpperCamelCase__ )
dest_path.open('''w''' ).write('''\n'''.join(UpperCamelCase__ ) )
if __name__ == "__main__":
fire.Fire(minify)
| 654 | 1 |
'''simple docstring'''
from io import BytesIO
from typing import List, Union
import requests
from ..utils import add_end_docstrings, is_decord_available, is_torch_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_decord_available():
import numpy as np
from decord import VideoReader
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
__lowerCAmelCase : List[str] = logging.get_logger(__name__)
@add_end_docstrings(UpperCAmelCase )
class A ( UpperCAmelCase ):
def __init__( self : Optional[Any] , *__a : Union[str, Any] , **__a : str ) -> int:
super().__init__(*__a , **__a )
requires_backends(self , '''decord''' )
self.check_model_type(__a )
def snake_case__ ( self : Tuple , __a : Tuple=None , __a : Optional[Any]=None , __a : Optional[int]=None ) -> Any:
__UpperCAmelCase = {}
if frame_sampling_rate is not None:
__UpperCAmelCase = frame_sampling_rate
if num_frames is not None:
__UpperCAmelCase = num_frames
__UpperCAmelCase = {}
if top_k is not None:
__UpperCAmelCase = top_k
return preprocess_params, {}, postprocess_params
def __call__( self : Dict , __a : Union[str, List[str]] , **__a : Dict ) -> Optional[Any]:
return super().__call__(__a , **__a )
def snake_case__ ( self : Union[str, Any] , __a : Tuple , __a : List[Any]=None , __a : Optional[int]=1 ) -> List[str]:
if num_frames is None:
__UpperCAmelCase = self.model.config.num_frames
if video.startswith('''http://''' ) or video.startswith('''https://''' ):
__UpperCAmelCase = BytesIO(requests.get(__a ).content )
__UpperCAmelCase = VideoReader(__a )
videoreader.seek(0 )
__UpperCAmelCase = 0
__UpperCAmelCase = num_frames * frame_sampling_rate - 1
__UpperCAmelCase = np.linspace(__a , __a , num=__a , dtype=np.intaa )
__UpperCAmelCase = videoreader.get_batch(__a ).asnumpy()
__UpperCAmelCase = list(__a )
__UpperCAmelCase = self.image_processor(__a , return_tensors=self.framework )
return model_inputs
def snake_case__ ( self : Optional[Any] , __a : Union[str, Any] ) -> Optional[int]:
__UpperCAmelCase = self.model(**__a )
return model_outputs
def snake_case__ ( self : Optional[int] , __a : List[Any] , __a : str=5 ) -> int:
if top_k > self.model.config.num_labels:
__UpperCAmelCase = self.model.config.num_labels
if self.framework == "pt":
__UpperCAmelCase = model_outputs.logits.softmax(-1 )[0]
__UpperCAmelCase , __UpperCAmelCase = probs.topk(__a )
else:
raise ValueError(f"""Unsupported framework: {self.framework}""" )
__UpperCAmelCase = scores.tolist()
__UpperCAmelCase = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(__a , __a )]
| 654 |
'''simple docstring'''
def lowerCAmelCase ( UpperCamelCase__ : int ):
"""simple docstring"""
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
__UpperCAmelCase = f"""Input value of [number={number}] must be an integer"""
raise TypeError(UpperCamelCase__ )
if number < 1:
__UpperCAmelCase = f"""Input value of [number={number}] must be > 0"""
raise ValueError(UpperCamelCase__ )
__UpperCAmelCase = 1
for i in range(1 , UpperCamelCase__ ):
current_number *= 4 * i - 2
current_number //= i + 1
return current_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 654 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase : List[str] = logging.get_logger(__name__)
__lowerCAmelCase : Union[str, Any] = {
"MIT/ast-finetuned-audioset-10-10-0.4593": (
"https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json"
),
}
class A ( UpperCAmelCase ):
a_ = '''audio-spectrogram-transformer'''
def __init__( self : str , __a : Optional[Any]=7_6_8 , __a : List[Any]=1_2 , __a : Optional[Any]=1_2 , __a : int=3_0_7_2 , __a : Optional[Any]="gelu" , __a : Optional[Any]=0.0 , __a : int=0.0 , __a : int=0.0_2 , __a : Optional[Any]=1e-12 , __a : Optional[Any]=1_6 , __a : Optional[int]=True , __a : int=1_0 , __a : Any=1_0 , __a : int=1_0_2_4 , __a : Tuple=1_2_8 , **__a : str , ) -> str:
super().__init__(**__a )
__UpperCAmelCase = hidden_size
__UpperCAmelCase = num_hidden_layers
__UpperCAmelCase = num_attention_heads
__UpperCAmelCase = intermediate_size
__UpperCAmelCase = hidden_act
__UpperCAmelCase = hidden_dropout_prob
__UpperCAmelCase = attention_probs_dropout_prob
__UpperCAmelCase = initializer_range
__UpperCAmelCase = layer_norm_eps
__UpperCAmelCase = patch_size
__UpperCAmelCase = qkv_bias
__UpperCAmelCase = frequency_stride
__UpperCAmelCase = time_stride
__UpperCAmelCase = max_length
__UpperCAmelCase = num_mel_bins
| 654 |
'''simple docstring'''
import unittest
import torch
from torch import nn
from accelerate.test_utils import require_cuda
from accelerate.utils.memory import find_executable_batch_size, release_memory
def lowerCAmelCase ( ):
"""simple docstring"""
raise RuntimeError('''CUDA out of memory.''' )
class A ( nn.Module ):
def __init__( self : Optional[Any] ) -> int:
super().__init__()
__UpperCAmelCase = nn.Linear(3 , 4 )
__UpperCAmelCase = nn.BatchNormad(4 )
__UpperCAmelCase = nn.Linear(4 , 5 )
def snake_case__ ( self : List[str] , __a : Optional[int] ) -> Optional[int]:
return self.lineara(self.batchnorm(self.lineara(__a ) ) )
class A ( unittest.TestCase ):
def snake_case__ ( self : Optional[int] ) -> Any:
__UpperCAmelCase = []
@find_executable_batch_size(starting_batch_size=1_2_8 )
def mock_training_loop_function(__a : Union[str, Any] ):
nonlocal batch_sizes
batch_sizes.append(__a )
if batch_size != 8:
raise_fake_out_of_memory()
mock_training_loop_function()
self.assertListEqual(__a , [1_2_8, 6_4, 3_2, 1_6, 8] )
def snake_case__ ( self : str ) -> int:
__UpperCAmelCase = []
@find_executable_batch_size(starting_batch_size=1_2_8 )
def mock_training_loop_function(__a : str , __a : Optional[int] ):
nonlocal batch_sizes
batch_sizes.append(__a )
if batch_size != 8:
raise_fake_out_of_memory()
return batch_size, arga
__UpperCAmelCase , __UpperCAmelCase = mock_training_loop_function('''hello''' )
self.assertListEqual(__a , [1_2_8, 6_4, 3_2, 1_6, 8] )
self.assertListEqual([bs, arga] , [8, '''hello'''] )
def snake_case__ ( self : Any ) -> int:
@find_executable_batch_size(starting_batch_size=0 )
def mock_training_loop_function(__a : Optional[int] ):
pass
with self.assertRaises(__a ) as cm:
mock_training_loop_function()
self.assertIn('''No executable batch size found, reached zero.''' , cm.exception.args[0] )
def snake_case__ ( self : Any ) -> List[Any]:
@find_executable_batch_size(starting_batch_size=1_6 )
def mock_training_loop_function(__a : Dict ):
if batch_size > 0:
raise_fake_out_of_memory()
pass
with self.assertRaises(__a ) as cm:
mock_training_loop_function()
self.assertIn('''No executable batch size found, reached zero.''' , cm.exception.args[0] )
def snake_case__ ( self : List[Any] ) -> List[str]:
@find_executable_batch_size(starting_batch_size=1_2_8 )
def mock_training_loop_function(__a : str , __a : Union[str, Any] , __a : int ):
if batch_size != 8:
raise raise_fake_out_of_memory()
with self.assertRaises(__a ) as cm:
mock_training_loop_function(1_2_8 , '''hello''' , '''world''' )
self.assertIn('''Batch size was passed into `f`''' , cm.exception.args[0] )
self.assertIn('''`f(arg1=\'hello\', arg2=\'world\')''' , cm.exception.args[0] )
def snake_case__ ( self : Tuple ) -> Optional[Any]:
@find_executable_batch_size(starting_batch_size=1_6 )
def mock_training_loop_function(__a : Tuple ):
raise ValueError('''Oops, we had an error!''' )
with self.assertRaises(__a ) as cm:
mock_training_loop_function()
self.assertIn('''Oops, we had an error!''' , cm.exception.args[0] )
@require_cuda
def snake_case__ ( self : Any ) -> List[Any]:
__UpperCAmelCase = torch.cuda.memory_allocated()
__UpperCAmelCase = ModelForTest()
model.cuda()
self.assertGreater(torch.cuda.memory_allocated() , __a )
__UpperCAmelCase = release_memory(__a )
self.assertEqual(torch.cuda.memory_allocated() , __a )
| 654 | 1 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_dpt import DPTImageProcessor
__lowerCAmelCase : Optional[int] = logging.get_logger(__name__)
class A ( UpperCAmelCase ):
def __init__( self : Any , *__a : Any , **__a : Union[str, Any] ) -> None:
warnings.warn(
'''The class DPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use DPTImageProcessor instead.''' , __a , )
super().__init__(*__a , **__a )
| 654 |
'''simple docstring'''
from __future__ import annotations
import math
def lowerCAmelCase ( UpperCamelCase__ : float , UpperCamelCase__ : int ):
"""simple docstring"""
__UpperCAmelCase = u
for i in range(1 , UpperCamelCase__ ):
__UpperCAmelCase = temp * (u - i)
return temp
def lowerCAmelCase ( ):
"""simple docstring"""
__UpperCAmelCase = int(input('''enter the numbers of values: ''' ) )
__UpperCAmelCase = []
for _ in range(UpperCamelCase__ ):
y.append([] )
for i in range(UpperCamelCase__ ):
for j in range(UpperCamelCase__ ):
y[i].append(UpperCamelCase__ )
__UpperCAmelCase = 0
print('''enter the values of parameters in a list: ''' )
__UpperCAmelCase = list(map(UpperCamelCase__ , input().split() ) )
print('''enter the values of corresponding parameters: ''' )
for i in range(UpperCamelCase__ ):
__UpperCAmelCase = float(input() )
__UpperCAmelCase = int(input('''enter the value to interpolate: ''' ) )
__UpperCAmelCase = (value - x[0]) / (x[1] - x[0])
# for calculating forward difference table
for i in range(1 , UpperCamelCase__ ):
for j in range(n - i ):
__UpperCAmelCase = y[j + 1][i - 1] - y[j][i - 1]
__UpperCAmelCase = y[0][0]
for i in range(1 , UpperCamelCase__ ):
summ += (ucal(UpperCamelCase__ , UpperCamelCase__ ) * y[0][i]) / math.factorial(UpperCamelCase__ )
print(f"""the value at {value} is {summ}""" )
if __name__ == "__main__":
main()
| 654 | 1 |
'''simple docstring'''
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import (
BackboneOutput,
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import (
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
logging,
replace_return_docstrings,
)
from ...utils.backbone_utils import BackboneMixin
from .configuration_resnet import ResNetConfig
__lowerCAmelCase : Optional[Any] = logging.get_logger(__name__)
# General docstring
__lowerCAmelCase : Union[str, Any] = "ResNetConfig"
# Base docstring
__lowerCAmelCase : str = "microsoft/resnet-50"
__lowerCAmelCase : Dict = [1, 2_048, 7, 7]
# Image classification docstring
__lowerCAmelCase : str = "microsoft/resnet-50"
__lowerCAmelCase : Optional[Any] = "tiger cat"
__lowerCAmelCase : Any = [
"microsoft/resnet-50",
# See all resnet models at https://huggingface.co/models?filter=resnet
]
class A ( nn.Module ):
def __init__( self : List[Any] , __a : int , __a : int , __a : int = 3 , __a : int = 1 , __a : str = "relu" ) -> Tuple:
super().__init__()
__UpperCAmelCase = nn.Convad(
__a , __a , kernel_size=__a , stride=__a , padding=kernel_size // 2 , bias=__a )
__UpperCAmelCase = nn.BatchNormad(__a )
__UpperCAmelCase = ACTaFN[activation] if activation is not None else nn.Identity()
def snake_case__ ( self : Any , __a : Tensor ) -> Tensor:
__UpperCAmelCase = self.convolution(__a )
__UpperCAmelCase = self.normalization(__a )
__UpperCAmelCase = self.activation(__a )
return hidden_state
class A ( nn.Module ):
def __init__( self : List[Any] , __a : ResNetConfig ) -> Tuple:
super().__init__()
__UpperCAmelCase = ResNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=7 , stride=2 , activation=config.hidden_act )
__UpperCAmelCase = nn.MaxPoolad(kernel_size=3 , stride=2 , padding=1 )
__UpperCAmelCase = config.num_channels
def snake_case__ ( self : Optional[Any] , __a : Tensor ) -> Tensor:
__UpperCAmelCase = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
'''Make sure that the channel dimension of the pixel values match with the one set in the configuration.''' )
__UpperCAmelCase = self.embedder(__a )
__UpperCAmelCase = self.pooler(__a )
return embedding
class A ( nn.Module ):
def __init__( self : Any , __a : int , __a : int , __a : int = 2 ) -> int:
super().__init__()
__UpperCAmelCase = nn.Convad(__a , __a , kernel_size=1 , stride=__a , bias=__a )
__UpperCAmelCase = nn.BatchNormad(__a )
def snake_case__ ( self : int , __a : Tensor ) -> Tensor:
__UpperCAmelCase = self.convolution(__a )
__UpperCAmelCase = self.normalization(__a )
return hidden_state
class A ( nn.Module ):
def __init__( self : Optional[Any] , __a : int , __a : int , __a : int = 1 , __a : str = "relu" ) -> Any:
super().__init__()
__UpperCAmelCase = in_channels != out_channels or stride != 1
__UpperCAmelCase = (
ResNetShortCut(__a , __a , stride=__a ) if should_apply_shortcut else nn.Identity()
)
__UpperCAmelCase = nn.Sequential(
ResNetConvLayer(__a , __a , stride=__a ) , ResNetConvLayer(__a , __a , activation=__a ) , )
__UpperCAmelCase = ACTaFN[activation]
def snake_case__ ( self : Optional[int] , __a : List[Any] ) -> int:
__UpperCAmelCase = hidden_state
__UpperCAmelCase = self.layer(__a )
__UpperCAmelCase = self.shortcut(__a )
hidden_state += residual
__UpperCAmelCase = self.activation(__a )
return hidden_state
class A ( nn.Module ):
def __init__( self : Optional[int] , __a : int , __a : int , __a : int = 1 , __a : str = "relu" , __a : int = 4 ) -> Optional[Any]:
super().__init__()
__UpperCAmelCase = in_channels != out_channels or stride != 1
__UpperCAmelCase = out_channels // reduction
__UpperCAmelCase = (
ResNetShortCut(__a , __a , stride=__a ) if should_apply_shortcut else nn.Identity()
)
__UpperCAmelCase = nn.Sequential(
ResNetConvLayer(__a , __a , kernel_size=1 ) , ResNetConvLayer(__a , __a , stride=__a ) , ResNetConvLayer(__a , __a , kernel_size=1 , activation=__a ) , )
__UpperCAmelCase = ACTaFN[activation]
def snake_case__ ( self : Any , __a : Optional[int] ) -> int:
__UpperCAmelCase = hidden_state
__UpperCAmelCase = self.layer(__a )
__UpperCAmelCase = self.shortcut(__a )
hidden_state += residual
__UpperCAmelCase = self.activation(__a )
return hidden_state
class A ( nn.Module ):
def __init__( self : int , __a : ResNetConfig , __a : int , __a : int , __a : int = 2 , __a : int = 2 , ) -> List[Any]:
super().__init__()
__UpperCAmelCase = ResNetBottleNeckLayer if config.layer_type == '''bottleneck''' else ResNetBasicLayer
__UpperCAmelCase = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(__a , __a , stride=__a , activation=config.hidden_act ) , *[layer(__a , __a , activation=config.hidden_act ) for _ in range(depth - 1 )] , )
def snake_case__ ( self : str , __a : Tensor ) -> Tensor:
__UpperCAmelCase = input
for layer in self.layers:
__UpperCAmelCase = layer(__a )
return hidden_state
class A ( nn.Module ):
def __init__( self : Optional[Any] , __a : ResNetConfig ) -> Any:
super().__init__()
__UpperCAmelCase = nn.ModuleList([] )
# based on `downsample_in_first_stage` the first layer of the first stage may or may not downsample the input
self.stages.append(
ResNetStage(
__a , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) )
__UpperCAmelCase = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(__a , config.depths[1:] ):
self.stages.append(ResNetStage(__a , __a , __a , depth=__a ) )
def snake_case__ ( self : Dict , __a : Tensor , __a : bool = False , __a : bool = True ) -> BaseModelOutputWithNoAttention:
__UpperCAmelCase = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
__UpperCAmelCase = hidden_states + (hidden_state,)
__UpperCAmelCase = stage_module(__a )
if output_hidden_states:
__UpperCAmelCase = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(
last_hidden_state=__a , hidden_states=__a , )
class A ( UpperCAmelCase ):
a_ = ResNetConfig
a_ = '''resnet'''
a_ = '''pixel_values'''
a_ = True
def snake_case__ ( self : List[Any] , __a : List[Any] ) -> List[str]:
if isinstance(__a , nn.Convad ):
nn.init.kaiming_normal_(module.weight , mode='''fan_out''' , nonlinearity='''relu''' )
elif isinstance(__a , (nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight , 1 )
nn.init.constant_(module.bias , 0 )
def snake_case__ ( self : List[Any] , __a : Optional[int] , __a : Optional[Any]=False ) -> Dict:
if isinstance(__a , __a ):
__UpperCAmelCase = value
__lowerCAmelCase : int = r"\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`ResNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n"
__lowerCAmelCase : List[Any] = r"\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConvNextImageProcessor.__call__`] for details.\n\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n"
@add_start_docstrings(
'''The bare ResNet model outputting raw features without any specific head on top.''' , UpperCAmelCase , )
class A ( UpperCAmelCase ):
def __init__( self : List[str] , __a : Any ) -> Dict:
super().__init__(__a )
__UpperCAmelCase = config
__UpperCAmelCase = ResNetEmbeddings(__a )
__UpperCAmelCase = ResNetEncoder(__a )
__UpperCAmelCase = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(__a )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=__a , config_class=_CONFIG_FOR_DOC , modality='''vision''' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def snake_case__ ( self : List[str] , __a : Tensor , __a : Optional[bool] = None , __a : Optional[bool] = None ) -> BaseModelOutputWithPoolingAndNoAttention:
__UpperCAmelCase = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__UpperCAmelCase = return_dict if return_dict is not None else self.config.use_return_dict
__UpperCAmelCase = self.embedder(__a )
__UpperCAmelCase = self.encoder(
__a , output_hidden_states=__a , return_dict=__a )
__UpperCAmelCase = encoder_outputs[0]
__UpperCAmelCase = self.pooler(__a )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=__a , pooler_output=__a , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
'''
ResNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
''' , UpperCAmelCase , )
class A ( UpperCAmelCase ):
def __init__( self : Optional[int] , __a : Tuple ) -> Union[str, Any]:
super().__init__(__a )
__UpperCAmelCase = config.num_labels
__UpperCAmelCase = ResNetModel(__a )
# classification head
__UpperCAmelCase = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(__a )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=__a , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def snake_case__ ( self : Union[str, Any] , __a : Optional[torch.FloatTensor] = None , __a : Optional[torch.LongTensor] = None , __a : Optional[bool] = None , __a : Optional[bool] = None , ) -> ImageClassifierOutputWithNoAttention:
__UpperCAmelCase = return_dict if return_dict is not None else self.config.use_return_dict
__UpperCAmelCase = self.resnet(__a , output_hidden_states=__a , return_dict=__a )
__UpperCAmelCase = outputs.pooler_output if return_dict else outputs[1]
__UpperCAmelCase = self.classifier(__a )
__UpperCAmelCase = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
__UpperCAmelCase = '''regression'''
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
__UpperCAmelCase = '''single_label_classification'''
else:
__UpperCAmelCase = '''multi_label_classification'''
if self.config.problem_type == "regression":
__UpperCAmelCase = MSELoss()
if self.num_labels == 1:
__UpperCAmelCase = loss_fct(logits.squeeze() , labels.squeeze() )
else:
__UpperCAmelCase = loss_fct(__a , __a )
elif self.config.problem_type == "single_label_classification":
__UpperCAmelCase = CrossEntropyLoss()
__UpperCAmelCase = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
__UpperCAmelCase = BCEWithLogitsLoss()
__UpperCAmelCase = loss_fct(__a , __a )
if not return_dict:
__UpperCAmelCase = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=__a , logits=__a , hidden_states=outputs.hidden_states )
@add_start_docstrings(
'''
ResNet backbone, to be used with frameworks like DETR and MaskFormer.
''' , UpperCAmelCase , )
class A ( UpperCAmelCase , UpperCAmelCase ):
def __init__( self : str , __a : str ) -> Optional[Any]:
super().__init__(__a )
super()._init_backbone(__a )
__UpperCAmelCase = [config.embedding_size] + config.hidden_sizes
__UpperCAmelCase = ResNetEmbeddings(__a )
__UpperCAmelCase = ResNetEncoder(__a )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(__a )
@replace_return_docstrings(output_type=__a , config_class=_CONFIG_FOR_DOC )
def snake_case__ ( self : Dict , __a : Tensor , __a : Optional[bool] = None , __a : Optional[bool] = None ) -> BackboneOutput:
__UpperCAmelCase = return_dict if return_dict is not None else self.config.use_return_dict
__UpperCAmelCase = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__UpperCAmelCase = self.embedder(__a )
__UpperCAmelCase = self.encoder(__a , output_hidden_states=__a , return_dict=__a )
__UpperCAmelCase = outputs.hidden_states
__UpperCAmelCase = ()
for idx, stage in enumerate(self.stage_names ):
if stage in self.out_features:
feature_maps += (hidden_states[idx],)
if not return_dict:
__UpperCAmelCase = (feature_maps,)
if output_hidden_states:
output += (outputs.hidden_states,)
return output
return BackboneOutput(
feature_maps=__a , hidden_states=outputs.hidden_states if output_hidden_states else None , attentions=__a , )
| 654 |
'''simple docstring'''
import argparse
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import (
RobertaTokenizer,
TrOCRConfig,
TrOCRForCausalLM,
TrOCRProcessor,
VisionEncoderDecoderModel,
ViTConfig,
ViTImageProcessor,
ViTModel,
)
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCAmelCase : Dict = logging.get_logger(__name__)
def lowerCAmelCase ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Union[str, Any] ):
"""simple docstring"""
__UpperCAmelCase = []
for i in range(encoder_config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f"""encoder.deit.blocks.{i}.norm1.weight""", f"""encoder.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((f"""encoder.deit.blocks.{i}.norm1.bias""", f"""encoder.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append(
(f"""encoder.deit.blocks.{i}.attn.proj.weight""", f"""encoder.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append(
(f"""encoder.deit.blocks.{i}.attn.proj.bias""", f"""encoder.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append(
(f"""encoder.deit.blocks.{i}.norm2.weight""", f"""encoder.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((f"""encoder.deit.blocks.{i}.norm2.bias""", f"""encoder.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append(
(f"""encoder.deit.blocks.{i}.mlp.fc1.weight""", f"""encoder.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append(
(f"""encoder.deit.blocks.{i}.mlp.fc1.bias""", f"""encoder.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append(
(f"""encoder.deit.blocks.{i}.mlp.fc2.weight""", f"""encoder.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((f"""encoder.deit.blocks.{i}.mlp.fc2.bias""", f"""encoder.encoder.layer.{i}.output.dense.bias""") )
# cls token, position embeddings and patch embeddings of encoder
rename_keys.extend(
[
('''encoder.deit.cls_token''', '''encoder.embeddings.cls_token'''),
('''encoder.deit.pos_embed''', '''encoder.embeddings.position_embeddings'''),
('''encoder.deit.patch_embed.proj.weight''', '''encoder.embeddings.patch_embeddings.projection.weight'''),
('''encoder.deit.patch_embed.proj.bias''', '''encoder.embeddings.patch_embeddings.projection.bias'''),
('''encoder.deit.norm.weight''', '''encoder.layernorm.weight'''),
('''encoder.deit.norm.bias''', '''encoder.layernorm.bias'''),
] )
return rename_keys
def lowerCAmelCase ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[Any] ):
"""simple docstring"""
for i in range(encoder_config.num_hidden_layers ):
# queries, keys and values (only weights, no biases)
__UpperCAmelCase = state_dict.pop(f"""encoder.deit.blocks.{i}.attn.qkv.weight""" )
__UpperCAmelCase = in_proj_weight[
: encoder_config.hidden_size, :
]
__UpperCAmelCase = in_proj_weight[
encoder_config.hidden_size : encoder_config.hidden_size * 2, :
]
__UpperCAmelCase = in_proj_weight[
-encoder_config.hidden_size :, :
]
def lowerCAmelCase ( UpperCamelCase__ : Any , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[str] ):
"""simple docstring"""
__UpperCAmelCase = dct.pop(UpperCamelCase__ )
__UpperCAmelCase = val
def lowerCAmelCase ( UpperCamelCase__ : Dict ):
"""simple docstring"""
if "handwritten" in checkpoint_url:
__UpperCAmelCase = '''https://fki.tic.heia-fr.ch/static/img/a01-122-02-00.jpg''' # industry
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-12.jpg" # have
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-10.jpg" # let
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02.jpg" #
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122.jpg"
elif "printed" in checkpoint_url or "stage1" in checkpoint_url:
__UpperCAmelCase = '''https://www.researchgate.net/profile/Dinh-Sang/publication/338099565/figure/fig8/AS:840413229350922@1577381536857/An-receipt-example-in-the-SROIE-2019-dataset_Q640.jpg'''
__UpperCAmelCase = Image.open(requests.get(UpperCamelCase__ , stream=UpperCamelCase__ ).raw ).convert('''RGB''' )
return im
@torch.no_grad()
def lowerCAmelCase ( UpperCamelCase__ : Dict , UpperCamelCase__ : Union[str, Any] ):
"""simple docstring"""
__UpperCAmelCase = ViTConfig(image_size=3_8_4 , qkv_bias=UpperCamelCase__ )
__UpperCAmelCase = TrOCRConfig()
# size of the architecture
if "base" in checkpoint_url:
__UpperCAmelCase = 7_6_8
elif "large" in checkpoint_url:
# use ViT-large encoder
__UpperCAmelCase = 1_0_2_4
__UpperCAmelCase = 4_0_9_6
__UpperCAmelCase = 2_4
__UpperCAmelCase = 1_6
__UpperCAmelCase = 1_0_2_4
else:
raise ValueError('''Should either find \'base\' or \'large\' in checkpoint URL''' )
# the large-printed + stage1 checkpoints uses sinusoidal position embeddings, no layernorm afterwards
if "large-printed" in checkpoint_url or "stage1" in checkpoint_url:
__UpperCAmelCase = False
__UpperCAmelCase = '''relu'''
__UpperCAmelCase = 1_0_2_4
__UpperCAmelCase = True
__UpperCAmelCase = False
__UpperCAmelCase = False
# load HuggingFace model
__UpperCAmelCase = ViTModel(UpperCamelCase__ , add_pooling_layer=UpperCamelCase__ )
__UpperCAmelCase = TrOCRForCausalLM(UpperCamelCase__ )
__UpperCAmelCase = VisionEncoderDecoderModel(encoder=UpperCamelCase__ , decoder=UpperCamelCase__ )
model.eval()
# load state_dict of original model, rename some keys
__UpperCAmelCase = torch.hub.load_state_dict_from_url(UpperCamelCase__ , map_location='''cpu''' , check_hash=UpperCamelCase__ )['''model''']
__UpperCAmelCase = create_rename_keys(UpperCamelCase__ , UpperCamelCase__ )
for src, dest in rename_keys:
rename_key(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
read_in_q_k_v(UpperCamelCase__ , UpperCamelCase__ )
# remove parameters we don't need
del state_dict["encoder.deit.head.weight"]
del state_dict["encoder.deit.head.bias"]
del state_dict["decoder.version"]
# add prefix to decoder keys
for key, val in state_dict.copy().items():
__UpperCAmelCase = state_dict.pop(UpperCamelCase__ )
if key.startswith('''decoder''' ) and "output_projection" not in key:
__UpperCAmelCase = val
else:
__UpperCAmelCase = val
# load state dict
model.load_state_dict(UpperCamelCase__ )
# Check outputs on an image
__UpperCAmelCase = ViTImageProcessor(size=encoder_config.image_size )
__UpperCAmelCase = RobertaTokenizer.from_pretrained('''roberta-large''' )
__UpperCAmelCase = TrOCRProcessor(UpperCamelCase__ , UpperCamelCase__ )
__UpperCAmelCase = processor(images=prepare_img(UpperCamelCase__ ) , return_tensors='''pt''' ).pixel_values
# verify logits
__UpperCAmelCase = torch.tensor([[model.config.decoder.decoder_start_token_id]] )
__UpperCAmelCase = model(pixel_values=UpperCamelCase__ , decoder_input_ids=UpperCamelCase__ )
__UpperCAmelCase = outputs.logits
__UpperCAmelCase = torch.Size([1, 1, 5_0_2_6_5] )
if "trocr-base-handwritten" in checkpoint_url:
__UpperCAmelCase = torch.tensor(
[-1.45_02, -4.66_83, -0.53_47, -2.92_91, 9.14_35, -3.05_71, 8.97_64, 1.75_60, 8.73_58, -1.53_11] )
elif "trocr-large-handwritten" in checkpoint_url:
__UpperCAmelCase = torch.tensor(
[-2.64_37, -1.31_29, -2.25_96, -5.34_55, 6.35_39, 1.76_04, 5.49_91, 1.47_02, 5.61_13, 2.01_70] )
elif "trocr-base-printed" in checkpoint_url:
__UpperCAmelCase = torch.tensor(
[-5.68_16, -5.83_88, 1.13_98, -6.90_34, 6.85_05, -2.43_93, 1.22_84, -1.02_32, -1.96_61, -3.92_10] )
elif "trocr-large-printed" in checkpoint_url:
__UpperCAmelCase = torch.tensor(
[-6.01_62, -7.09_59, 4.41_55, -5.10_63, 7.04_68, -3.16_31, 2.64_66, -0.30_81, -0.81_06, -1.75_35] )
if "stage1" not in checkpoint_url:
assert logits.shape == expected_shape, "Shape of logits not as expected"
assert torch.allclose(logits[0, 0, :1_0] , UpperCamelCase__ , atol=1E-3 ), "First elements of logits not as expected"
Path(UpperCamelCase__ ).mkdir(exist_ok=UpperCamelCase__ )
print(f"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(UpperCamelCase__ )
print(f"""Saving processor to {pytorch_dump_folder_path}""" )
processor.save_pretrained(UpperCamelCase__ )
if __name__ == "__main__":
__lowerCAmelCase : Any = argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_url",
default="https://layoutlm.blob.core.windows.net/trocr/model_zoo/fairseq/trocr-base-handwritten.pt",
type=str,
help="URL to the original PyTorch checkpoint (.pth file).",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
__lowerCAmelCase : Optional[int] = parser.parse_args()
convert_tr_ocr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 654 | 1 |
'''simple docstring'''
import re
def lowerCAmelCase ( UpperCamelCase__ : str ):
"""simple docstring"""
if len(re.findall('''[ATCG]''' , UpperCamelCase__ ) ) != len(UpperCamelCase__ ):
raise ValueError('''Invalid Strand''' )
return dna.translate(dna.maketrans('''ATCG''' , '''TAGC''' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 654 |
'''simple docstring'''
import gc
import unittest
from parameterized import parameterized
from diffusers import FlaxUNetaDConditionModel
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import load_hf_numpy, require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
@slow
@require_flax
class A ( unittest.TestCase ):
def snake_case__ ( self : List[Any] , __a : List[str] , __a : Optional[Any] ) -> List[Any]:
return f"""gaussian_noise_s={seed}_shape={'_'.join([str(__a ) for s in shape] )}.npy"""
def snake_case__ ( self : Dict ) -> Union[str, Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def snake_case__ ( self : Optional[Any] , __a : Tuple=0 , __a : List[Any]=(4, 4, 6_4, 6_4) , __a : Optional[Any]=False ) -> Tuple:
__UpperCAmelCase = jnp.bfloataa if fpaa else jnp.floataa
__UpperCAmelCase = jnp.array(load_hf_numpy(self.get_file_format(__a , __a ) ) , dtype=__a )
return image
def snake_case__ ( self : int , __a : Optional[Any]=False , __a : Optional[Any]="CompVis/stable-diffusion-v1-4" ) -> Any:
__UpperCAmelCase = jnp.bfloataa if fpaa else jnp.floataa
__UpperCAmelCase = '''bf16''' if fpaa else None
__UpperCAmelCase , __UpperCAmelCase = FlaxUNetaDConditionModel.from_pretrained(
__a , subfolder='''unet''' , dtype=__a , revision=__a )
return model, params
def snake_case__ ( self : str , __a : int=0 , __a : Tuple=(4, 7_7, 7_6_8) , __a : Optional[int]=False ) -> Union[str, Any]:
__UpperCAmelCase = jnp.bfloataa if fpaa else jnp.floataa
__UpperCAmelCase = jnp.array(load_hf_numpy(self.get_file_format(__a , __a ) ) , dtype=__a )
return hidden_states
@parameterized.expand(
[
# fmt: off
[8_3, 4, [-0.2_3_2_3, -0.1_3_0_4, 0.0_8_1_3, -0.3_0_9_3, -0.0_9_1_9, -0.1_5_7_1, -0.1_1_2_5, -0.5_8_0_6]],
[1_7, 0.5_5, [-0.0_8_3_1, -0.2_4_4_3, 0.0_9_0_1, -0.0_9_1_9, 0.3_3_9_6, 0.0_1_0_3, -0.3_7_4_3, 0.0_7_0_1]],
[8, 0.8_9, [-0.4_8_6_3, 0.0_8_5_9, 0.0_8_7_5, -0.1_6_5_8, 0.9_1_9_9, -0.0_1_1_4, 0.4_8_3_9, 0.4_6_3_9]],
[3, 1_0_0_0, [-0.5_6_4_9, 0.2_4_0_2, -0.5_5_1_8, 0.1_2_4_8, 1.1_3_2_8, -0.2_4_4_3, -0.0_3_2_5, -1.0_0_7_8]],
# fmt: on
] )
def snake_case__ ( self : Tuple , __a : Tuple , __a : str , __a : Optional[Any] ) -> Any:
__UpperCAmelCase , __UpperCAmelCase = self.get_unet_model(model_id='''CompVis/stable-diffusion-v1-4''' , fpaa=__a )
__UpperCAmelCase = self.get_latents(__a , fpaa=__a )
__UpperCAmelCase = self.get_encoder_hidden_states(__a , fpaa=__a )
__UpperCAmelCase = model.apply(
{'''params''': params} , __a , jnp.array(__a , dtype=jnp.intaa ) , encoder_hidden_states=__a , ).sample
assert sample.shape == latents.shape
__UpperCAmelCase = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
__UpperCAmelCase = jnp.array(__a , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, in the same hardware
assert jnp.allclose(__a , __a , atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[8_3, 4, [0.1_5_1_4, 0.0_8_0_7, 0.1_6_2_4, 0.1_0_1_6, -0.1_8_9_6, 0.0_2_6_3, 0.0_6_7_7, 0.2_3_1_0]],
[1_7, 0.5_5, [0.1_1_6_4, -0.0_2_1_6, 0.0_1_7_0, 0.1_5_8_9, -0.3_1_2_0, 0.1_0_0_5, -0.0_5_8_1, -0.1_4_5_8]],
[8, 0.8_9, [-0.1_7_5_8, -0.0_1_6_9, 0.1_0_0_4, -0.1_4_1_1, 0.1_3_1_2, 0.1_1_0_3, -0.1_9_9_6, 0.2_1_3_9]],
[3, 1_0_0_0, [0.1_2_1_4, 0.0_3_5_2, -0.0_7_3_1, -0.1_5_6_2, -0.0_9_9_4, -0.0_9_0_6, -0.2_3_4_0, -0.0_5_3_9]],
# fmt: on
] )
def snake_case__ ( self : Optional[Any] , __a : Optional[int] , __a : Optional[Any] , __a : Optional[Any] ) -> Union[str, Any]:
__UpperCAmelCase , __UpperCAmelCase = self.get_unet_model(model_id='''stabilityai/stable-diffusion-2''' , fpaa=__a )
__UpperCAmelCase = self.get_latents(__a , shape=(4, 4, 9_6, 9_6) , fpaa=__a )
__UpperCAmelCase = self.get_encoder_hidden_states(__a , shape=(4, 7_7, 1_0_2_4) , fpaa=__a )
__UpperCAmelCase = model.apply(
{'''params''': params} , __a , jnp.array(__a , dtype=jnp.intaa ) , encoder_hidden_states=__a , ).sample
assert sample.shape == latents.shape
__UpperCAmelCase = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
__UpperCAmelCase = jnp.array(__a , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, on the same hardware
assert jnp.allclose(__a , __a , atol=1e-2 )
| 654 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase : Optional[int] = logging.get_logger(__name__)
__lowerCAmelCase : str = {}
class A ( UpperCAmelCase ):
a_ = '''llama'''
a_ = ['''past_key_values''']
def __init__( self : List[Any] , __a : int=3_2_0_0_0 , __a : Optional[int]=4_0_9_6 , __a : Tuple=1_1_0_0_8 , __a : Optional[Any]=3_2 , __a : List[str]=3_2 , __a : Dict=None , __a : Any="silu" , __a : Tuple=2_0_4_8 , __a : Dict=0.0_2 , __a : Optional[Any]=1e-6 , __a : List[str]=True , __a : List[str]=0 , __a : Optional[Any]=1 , __a : List[str]=2 , __a : List[Any]=1 , __a : List[Any]=False , __a : Optional[int]=None , **__a : Dict , ) -> List[Any]:
__UpperCAmelCase = vocab_size
__UpperCAmelCase = max_position_embeddings
__UpperCAmelCase = hidden_size
__UpperCAmelCase = intermediate_size
__UpperCAmelCase = num_hidden_layers
__UpperCAmelCase = num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
__UpperCAmelCase = num_attention_heads
__UpperCAmelCase = num_key_value_heads
__UpperCAmelCase = hidden_act
__UpperCAmelCase = initializer_range
__UpperCAmelCase = rms_norm_eps
__UpperCAmelCase = pretraining_tp
__UpperCAmelCase = use_cache
__UpperCAmelCase = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , tie_word_embeddings=__a , **__a , )
def snake_case__ ( self : str ) -> int:
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , __a ) or len(self.rope_scaling ) != 2:
raise ValueError(
'''`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '''
f"""got {self.rope_scaling}""" )
__UpperCAmelCase = self.rope_scaling.get('''type''' , __a )
__UpperCAmelCase = self.rope_scaling.get('''factor''' , __a )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f"""`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}""" )
if rope_scaling_factor is None or not isinstance(__a , __a ) or rope_scaling_factor <= 1.0:
raise ValueError(f"""`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}""" )
| 654 |
'''simple docstring'''
import argparse
import os
import re
import packaging.version
__lowerCAmelCase : Optional[int] = "examples/"
__lowerCAmelCase : Dict = {
"examples": (re.compile(r"^check_min_version\(\"[^\"]+\"\)\s*$", re.MULTILINE), "check_min_version(\"VERSION\")\n"),
"init": (re.compile(r"^__version__\s+=\s+\"([^\"]+)\"\s*$", re.MULTILINE), "__version__ = \"VERSION\"\n"),
"setup": (re.compile(r"^(\s*)version\s*=\s*\"[^\"]+\",", re.MULTILINE), r"\1version=\"VERSION\","),
"doc": (re.compile(r"^(\s*)release\s*=\s*\"[^\"]+\"$", re.MULTILINE), "release = \"VERSION\"\n"),
}
__lowerCAmelCase : List[str] = {
"init": "src/transformers/__init__.py",
"setup": "setup.py",
}
__lowerCAmelCase : int = "README.md"
def lowerCAmelCase ( UpperCamelCase__ : List[str] , UpperCamelCase__ : int , UpperCamelCase__ : Tuple ):
"""simple docstring"""
with open(UpperCamelCase__ , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
__UpperCAmelCase = f.read()
__UpperCAmelCase , __UpperCAmelCase = REPLACE_PATTERNS[pattern]
__UpperCAmelCase = replace.replace('''VERSION''' , UpperCamelCase__ )
__UpperCAmelCase = re_pattern.sub(UpperCamelCase__ , UpperCamelCase__ )
with open(UpperCamelCase__ , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.write(UpperCamelCase__ )
def lowerCAmelCase ( UpperCamelCase__ : Optional[int] ):
"""simple docstring"""
for folder, directories, fnames in os.walk(UpperCamelCase__ ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove('''research_projects''' )
if "legacy" in directories:
directories.remove('''legacy''' )
for fname in fnames:
if fname.endswith('''.py''' ):
update_version_in_file(os.path.join(UpperCamelCase__ , UpperCamelCase__ ) , UpperCamelCase__ , pattern='''examples''' )
def lowerCAmelCase ( UpperCamelCase__ : Any , UpperCamelCase__ : Any=False ):
"""simple docstring"""
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
if not patch:
update_version_in_examples(UpperCamelCase__ )
def lowerCAmelCase ( ):
"""simple docstring"""
__UpperCAmelCase = '''🤗 Transformers currently provides the following architectures'''
__UpperCAmelCase = '''1. Want to contribute a new model?'''
with open(UpperCamelCase__ , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
__UpperCAmelCase = f.readlines()
# Find the start of the list.
__UpperCAmelCase = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
__UpperCAmelCase = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith('''1.''' ):
__UpperCAmelCase = lines[index].replace(
'''https://huggingface.co/docs/transformers/main/model_doc''' , '''https://huggingface.co/docs/transformers/model_doc''' , )
index += 1
with open(UpperCamelCase__ , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.writelines(UpperCamelCase__ )
def lowerCAmelCase ( ):
"""simple docstring"""
with open(REPLACE_FILES['''init'''] , '''r''' ) as f:
__UpperCAmelCase = f.read()
__UpperCAmelCase = REPLACE_PATTERNS['''init'''][0].search(UpperCamelCase__ ).groups()[0]
return packaging.version.parse(UpperCamelCase__ )
def lowerCAmelCase ( UpperCamelCase__ : Any=False ):
"""simple docstring"""
__UpperCAmelCase = get_version()
if patch and default_version.is_devrelease:
raise ValueError('''Can\'t create a patch version from the dev branch, checkout a released version!''' )
if default_version.is_devrelease:
__UpperCAmelCase = default_version.base_version
elif patch:
__UpperCAmelCase = f"""{default_version.major}.{default_version.minor}.{default_version.micro + 1}"""
else:
__UpperCAmelCase = f"""{default_version.major}.{default_version.minor + 1}.0"""
# Now let's ask nicely if that's the right one.
__UpperCAmelCase = input(f"""Which version are you releasing? [{default_version}]""" )
if len(UpperCamelCase__ ) == 0:
__UpperCAmelCase = default_version
print(f"""Updating version to {version}.""" )
global_version_update(UpperCamelCase__ , patch=UpperCamelCase__ )
if not patch:
print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' )
clean_main_ref_in_model_list()
def lowerCAmelCase ( ):
"""simple docstring"""
__UpperCAmelCase = get_version()
__UpperCAmelCase = f"""{current_version.major}.{current_version.minor + 1}.0.dev0"""
__UpperCAmelCase = current_version.base_version
# Check with the user we got that right.
__UpperCAmelCase = input(f"""Which version are we developing now? [{dev_version}]""" )
if len(UpperCamelCase__ ) == 0:
__UpperCAmelCase = dev_version
print(f"""Updating version to {version}.""" )
global_version_update(UpperCamelCase__ )
print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' )
clean_main_ref_in_model_list()
if __name__ == "__main__":
__lowerCAmelCase : Tuple = argparse.ArgumentParser()
parser.add_argument("--post_release", action="store_true", help="Whether this is pre or post release.")
parser.add_argument("--patch", action="store_true", help="Whether or not this is a patch release.")
__lowerCAmelCase : Tuple = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print("Nothing to do after a patch :-)")
else:
post_release_work()
| 654 | 1 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
PNDMScheduler,
StableDiffusionLDMaDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import nightly, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
enable_full_determinism()
class A ( unittest.TestCase ):
a_ = StableDiffusionLDMaDPipeline
a_ = TEXT_TO_IMAGE_PARAMS
a_ = TEXT_TO_IMAGE_BATCH_PARAMS
a_ = TEXT_TO_IMAGE_IMAGE_PARAMS
def snake_case__ ( self : Dict ) -> Dict:
torch.manual_seed(0 )
__UpperCAmelCase = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=3_2 , )
__UpperCAmelCase = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='''scaled_linear''' , clip_sample=__a , set_alpha_to_one=__a , )
torch.manual_seed(0 )
__UpperCAmelCase = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=6 , out_channels=6 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
__UpperCAmelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
__UpperCAmelCase = CLIPTextModel(__a )
__UpperCAmelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
__UpperCAmelCase = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def snake_case__ ( self : str , __a : str , __a : Optional[Any]=0 ) -> str:
if str(__a ).startswith('''mps''' ):
__UpperCAmelCase = torch.manual_seed(__a )
else:
__UpperCAmelCase = torch.Generator(device=__a ).manual_seed(__a )
__UpperCAmelCase = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def snake_case__ ( self : Optional[int] ) -> Union[str, Any]:
__UpperCAmelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__UpperCAmelCase = self.get_dummy_components()
__UpperCAmelCase = StableDiffusionLDMaDPipeline(**__a )
__UpperCAmelCase = ldmad_pipe.to(__a )
ldmad_pipe.set_progress_bar_config(disable=__a )
__UpperCAmelCase = self.get_dummy_inputs(__a )
__UpperCAmelCase = ldmad_pipe(**__a )
__UpperCAmelCase , __UpperCAmelCase = output.rgb, output.depth
__UpperCAmelCase = rgb[0, -3:, -3:, -1]
__UpperCAmelCase = depth[0, -3:, -1]
assert rgb.shape == (1, 6_4, 6_4, 3)
assert depth.shape == (1, 6_4, 6_4)
__UpperCAmelCase = np.array(
[0.3_7_3_3_8_1_7_6, 0.7_0_2_4_7, 0.7_4_2_0_3_1_9_3, 0.5_1_6_4_3_6_0_4, 0.5_8_2_5_6_7_9_3, 0.6_0_9_3_2_1_3_6, 0.4_1_8_1_0_9_5, 0.4_8_3_5_5_8_7_7, 0.4_6_5_3_5_2_6_2] )
__UpperCAmelCase = np.array([1_0_3.4_6_7_2_7, 8_5.8_1_2_0_0_4, 8_7.8_4_9_2_3_6] )
assert np.abs(image_slice_rgb.flatten() - expected_slice_rgb ).max() < 1e-2
assert np.abs(image_slice_depth.flatten() - expected_slice_depth ).max() < 1e-2
def snake_case__ ( self : List[str] ) -> Any:
__UpperCAmelCase = self.get_dummy_components()
__UpperCAmelCase = StableDiffusionLDMaDPipeline(**__a )
__UpperCAmelCase = ldmad_pipe.to(__a )
ldmad_pipe.set_progress_bar_config(disable=__a )
__UpperCAmelCase = self.get_dummy_inputs(__a )
__UpperCAmelCase = 3 * [inputs['''prompt''']]
# forward
__UpperCAmelCase = ldmad_pipe(**__a )
__UpperCAmelCase , __UpperCAmelCase = output.rgb, output.depth
__UpperCAmelCase = rgb_slice_a[0, -3:, -3:, -1]
__UpperCAmelCase = depth_slice_a[0, -3:, -1]
__UpperCAmelCase = self.get_dummy_inputs(__a )
__UpperCAmelCase = 3 * [inputs.pop('''prompt''' )]
__UpperCAmelCase = ldmad_pipe.tokenizer(
__a , padding='''max_length''' , max_length=ldmad_pipe.tokenizer.model_max_length , truncation=__a , return_tensors='''pt''' , )
__UpperCAmelCase = text_inputs['''input_ids'''].to(__a )
__UpperCAmelCase = ldmad_pipe.text_encoder(__a )[0]
__UpperCAmelCase = prompt_embeds
# forward
__UpperCAmelCase = ldmad_pipe(**__a )
__UpperCAmelCase , __UpperCAmelCase = output.rgb, output.depth
__UpperCAmelCase = rgb_slice_a[0, -3:, -3:, -1]
__UpperCAmelCase = depth_slice_a[0, -3:, -1]
assert np.abs(rgb_slice_a.flatten() - rgb_slice_a.flatten() ).max() < 1e-4
assert np.abs(depth_slice_a.flatten() - depth_slice_a.flatten() ).max() < 1e-4
def snake_case__ ( self : List[str] ) -> List[Any]:
__UpperCAmelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__UpperCAmelCase = self.get_dummy_components()
__UpperCAmelCase = PNDMScheduler(skip_prk_steps=__a )
__UpperCAmelCase = StableDiffusionLDMaDPipeline(**__a )
__UpperCAmelCase = ldmad_pipe.to(__a )
ldmad_pipe.set_progress_bar_config(disable=__a )
__UpperCAmelCase = self.get_dummy_inputs(__a )
__UpperCAmelCase = '''french fries'''
__UpperCAmelCase = ldmad_pipe(**__a , negative_prompt=__a )
__UpperCAmelCase , __UpperCAmelCase = output.rgb, output.depth
__UpperCAmelCase = rgb[0, -3:, -3:, -1]
__UpperCAmelCase = depth[0, -3:, -1]
assert rgb.shape == (1, 6_4, 6_4, 3)
assert depth.shape == (1, 6_4, 6_4)
__UpperCAmelCase = np.array(
[0.3_7_0_4_4, 0.7_1_8_1_1_5_0_3, 0.7_2_2_3_2_5_1, 0.4_8_6_0_3_6_7_5, 0.5_6_3_8_3_9_1, 0.6_3_6_4_9_4_8, 0.4_2_8_3_3_7_0_4, 0.4_9_0_1_3_1_5, 0.4_7_9_2_6_2_1_7] )
__UpperCAmelCase = np.array([1_0_7.8_4_7_3_8, 8_4.6_2_8_0_2, 8_9.9_6_2_1_3_5] )
assert np.abs(rgb_slice.flatten() - expected_slice_rgb ).max() < 1e-2
assert np.abs(depth_slice.flatten() - expected_slice_depth ).max() < 1e-2
@slow
@require_torch_gpu
class A ( unittest.TestCase ):
def snake_case__ ( self : Any ) -> Any:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case__ ( self : Optional[int] , __a : Dict , __a : List[str]="cpu" , __a : Union[str, Any]=torch.floataa , __a : Optional[Any]=0 ) -> List[str]:
__UpperCAmelCase = torch.Generator(device=__a ).manual_seed(__a )
__UpperCAmelCase = np.random.RandomState(__a ).standard_normal((1, 4, 6_4, 6_4) )
__UpperCAmelCase = torch.from_numpy(__a ).to(device=__a , dtype=__a )
__UpperCAmelCase = {
'''prompt''': '''a photograph of an astronaut riding a horse''',
'''latents''': latents,
'''generator''': generator,
'''num_inference_steps''': 3,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def snake_case__ ( self : int ) -> Any:
__UpperCAmelCase = StableDiffusionLDMaDPipeline.from_pretrained('''Intel/ldm3d''' )
__UpperCAmelCase = ldmad_pipe.to(__a )
ldmad_pipe.set_progress_bar_config(disable=__a )
__UpperCAmelCase = self.get_inputs(__a )
__UpperCAmelCase = ldmad_pipe(**__a )
__UpperCAmelCase , __UpperCAmelCase = output.rgb, output.depth
__UpperCAmelCase = rgb[0, -3:, -3:, -1].flatten()
__UpperCAmelCase = rgb[0, -3:, -1].flatten()
assert rgb.shape == (1, 5_1_2, 5_1_2, 3)
assert depth.shape == (1, 5_1_2, 5_1_2)
__UpperCAmelCase = np.array(
[0.5_3_8_0_5_4_6_5, 0.5_6_7_0_7_3_0_5, 0.5_4_8_6_5_1_5, 0.5_7_0_1_2_2_3_6, 0.5_8_1_4_5_1_1, 0.5_6_2_5_3_4_8_7, 0.5_4_8_4_3_0_1_4, 0.5_5_0_9_2_2_6_3, 0.6_4_5_9_7_0_6] )
__UpperCAmelCase = np.array(
[0.9_2_6_3_7_8_1, 0.6_6_7_8_6_7_2, 0.5_4_8_6_5_1_5, 0.9_2_2_0_2_1_4_5, 0.6_7_8_3_1_1_3_5, 0.5_6_2_5_3_4_8_7, 0.9_2_4_1_6_9_4, 0.7_5_5_1_4_7_8, 0.6_4_5_9_7_0_6] )
assert np.abs(rgb_slice - expected_slice_rgb ).max() < 3e-3
assert np.abs(depth_slice - expected_slice_depth ).max() < 3e-3
@nightly
@require_torch_gpu
class A ( unittest.TestCase ):
def snake_case__ ( self : List[str] ) -> Dict:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case__ ( self : Optional[Any] , __a : List[str] , __a : Any="cpu" , __a : List[str]=torch.floataa , __a : Dict=0 ) -> List[Any]:
__UpperCAmelCase = torch.Generator(device=__a ).manual_seed(__a )
__UpperCAmelCase = np.random.RandomState(__a ).standard_normal((1, 4, 6_4, 6_4) )
__UpperCAmelCase = torch.from_numpy(__a ).to(device=__a , dtype=__a )
__UpperCAmelCase = {
'''prompt''': '''a photograph of an astronaut riding a horse''',
'''latents''': latents,
'''generator''': generator,
'''num_inference_steps''': 5_0,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def snake_case__ ( self : Tuple ) -> int:
__UpperCAmelCase = StableDiffusionLDMaDPipeline.from_pretrained('''Intel/ldm3d''' ).to(__a )
ldmad_pipe.set_progress_bar_config(disable=__a )
__UpperCAmelCase = self.get_inputs(__a )
__UpperCAmelCase = ldmad_pipe(**__a )
__UpperCAmelCase , __UpperCAmelCase = output.rgb, output.depth
__UpperCAmelCase = 0.4_9_5_5_8_6
__UpperCAmelCase = 0.3_3_7_9_5_5_1_5
__UpperCAmelCase = 1_1_2.4_8_5_1_8
__UpperCAmelCase = 9_8.4_8_9_7_4_6
assert np.abs(expected_rgb_mean - rgb.mean() ) < 1e-3
assert np.abs(expected_rgb_std - rgb.std() ) < 1e-3
assert np.abs(expected_depth_mean - depth.mean() ) < 1e-3
assert np.abs(expected_depth_std - depth.std() ) < 1e-3
def snake_case__ ( self : List[Any] ) -> str:
__UpperCAmelCase = StableDiffusionLDMaDPipeline.from_pretrained('''Intel/ldm3d-4c''' ).to(__a )
ldmad_pipe.set_progress_bar_config(disable=__a )
__UpperCAmelCase = self.get_inputs(__a )
__UpperCAmelCase = ldmad_pipe(**__a )
__UpperCAmelCase , __UpperCAmelCase = output.rgb, output.depth
__UpperCAmelCase = 0.4_1_9_4_1_2_7
__UpperCAmelCase = 0.3_5_3_7_5_5_8_6
__UpperCAmelCase = 0.5_6_3_8_5_0_2
__UpperCAmelCase = 0.3_4_6_8_6_1_0_3
assert rgb.shape == (1, 5_1_2, 5_1_2, 3)
assert depth.shape == (1, 5_1_2, 5_1_2, 1)
assert np.abs(expected_rgb_mean - rgb.mean() ) < 1e-3
assert np.abs(expected_rgb_std - rgb.std() ) < 1e-3
assert np.abs(expected_depth_mean - depth.mean() ) < 1e-3
assert np.abs(expected_depth_std - depth.std() ) < 1e-3
| 654 |
'''simple docstring'''
def lowerCAmelCase ( UpperCamelCase__ : Tuple ):
"""simple docstring"""
# if the collection is empty, returns empty
if collection == []:
return []
# get some information about the collection
__UpperCAmelCase = len(UpperCamelCase__ )
__UpperCAmelCase = max(UpperCamelCase__ )
__UpperCAmelCase = min(UpperCamelCase__ )
# create the counting array
__UpperCAmelCase = coll_max + 1 - coll_min
__UpperCAmelCase = [0] * counting_arr_length
# count how much a number appears in the collection
for number in collection:
counting_arr[number - coll_min] += 1
# sum each position with it's predecessors. now, counting_arr[i] tells
# us how many elements <= i has in the collection
for i in range(1 , UpperCamelCase__ ):
__UpperCAmelCase = counting_arr[i] + counting_arr[i - 1]
# create the output collection
__UpperCAmelCase = [0] * coll_len
# place the elements in the output, respecting the original order (stable
# sort) from end to begin, updating counting_arr
for i in reversed(range(0 , UpperCamelCase__ ) ):
__UpperCAmelCase = collection[i]
counting_arr[collection[i] - coll_min] -= 1
return ordered
def lowerCAmelCase ( UpperCamelCase__ : Any ):
"""simple docstring"""
return "".join([chr(UpperCamelCase__ ) for i in counting_sort([ord(UpperCamelCase__ ) for c in string] )] )
if __name__ == "__main__":
# Test string sort
assert counting_sort_string("thisisthestring") == "eghhiiinrsssttt"
__lowerCAmelCase : str = input("Enter numbers separated by a comma:\n").strip()
__lowerCAmelCase : List[Any] = [int(item) for item in user_input.split(",")]
print(counting_sort(unsorted))
| 654 | 1 |
'''simple docstring'''
import argparse
import os
import re
import numpy as np
import PIL
import torch
from timm import create_model
from torch.optim.lr_scheduler import OneCycleLR
from torch.utils.data import DataLoader, Dataset
from torchvision.transforms import Compose, RandomResizedCrop, Resize, ToTensor
from accelerate import Accelerator
def lowerCAmelCase ( UpperCamelCase__ : Optional[Any] ):
"""simple docstring"""
__UpperCAmelCase = fname.split(os.path.sep )[-1]
return re.search(R'''^(.*)_\d+\.jpg$''' , UpperCamelCase__ ).groups()[0]
class A ( UpperCAmelCase ):
def __init__( self : Optional[Any] , __a : Optional[Any] , __a : List[Any]=None , __a : int=None ) -> List[Any]:
__UpperCAmelCase = file_names
__UpperCAmelCase = image_transform
__UpperCAmelCase = label_to_id
def __len__( self : Dict ) -> Tuple:
return len(self.file_names )
def __getitem__( self : List[Any] , __a : int ) -> List[str]:
__UpperCAmelCase = self.file_names[idx]
__UpperCAmelCase = PIL.Image.open(__a )
__UpperCAmelCase = raw_image.convert('''RGB''' )
if self.image_transform is not None:
__UpperCAmelCase = self.image_transform(__a )
__UpperCAmelCase = extract_label(__a )
if self.label_to_id is not None:
__UpperCAmelCase = self.label_to_id[label]
return {"image": image, "label": label}
def lowerCAmelCase ( UpperCamelCase__ : List[str] , UpperCamelCase__ : List[str] ):
"""simple docstring"""
# Initialize accelerator
if args.with_tracking:
__UpperCAmelCase = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with='''all''' , project_dir=args.project_dir )
else:
__UpperCAmelCase = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__UpperCAmelCase = config['''lr''']
__UpperCAmelCase = int(config['''num_epochs'''] )
__UpperCAmelCase = int(config['''seed'''] )
__UpperCAmelCase = int(config['''batch_size'''] )
__UpperCAmelCase = config['''image_size''']
if not isinstance(UpperCamelCase__ , (list, tuple) ):
__UpperCAmelCase = (image_size, image_size)
# Parse out whether we are saving every epoch or after a certain number of batches
if hasattr(args.checkpointing_steps , '''isdigit''' ):
if args.checkpointing_steps == "epoch":
__UpperCAmelCase = args.checkpointing_steps
elif args.checkpointing_steps.isdigit():
__UpperCAmelCase = int(args.checkpointing_steps )
else:
raise ValueError(
f"""Argument `checkpointing_steps` must be either a number or `epoch`. `{args.checkpointing_steps}` passed.""" )
else:
__UpperCAmelCase = None
# We need to initialize the trackers we use, and also store our configuration
if args.with_tracking:
__UpperCAmelCase = os.path.split(UpperCamelCase__ )[-1].split('''.''' )[0]
accelerator.init_trackers(UpperCamelCase__ , UpperCamelCase__ )
# Grab all the image filenames
__UpperCAmelCase = [os.path.join(args.data_dir , UpperCamelCase__ ) for fname in os.listdir(args.data_dir ) if fname.endswith('''.jpg''' )]
# Build the label correspondences
__UpperCAmelCase = [extract_label(UpperCamelCase__ ) for fname in file_names]
__UpperCAmelCase = list(set(UpperCamelCase__ ) )
id_to_label.sort()
__UpperCAmelCase = {lbl: i for i, lbl in enumerate(UpperCamelCase__ )}
# Set the seed before splitting the data.
np.random.seed(UpperCamelCase__ )
torch.manual_seed(UpperCamelCase__ )
torch.cuda.manual_seed_all(UpperCamelCase__ )
# Split our filenames between train and validation
__UpperCAmelCase = np.random.permutation(len(UpperCamelCase__ ) )
__UpperCAmelCase = int(0.8 * len(UpperCamelCase__ ) )
__UpperCAmelCase = random_perm[:cut]
__UpperCAmelCase = random_perm[cut:]
# For training we use a simple RandomResizedCrop
__UpperCAmelCase = Compose([RandomResizedCrop(UpperCamelCase__ , scale=(0.5, 1.0) ), ToTensor()] )
__UpperCAmelCase = PetsDataset(
[file_names[i] for i in train_split] , image_transform=UpperCamelCase__ , label_to_id=UpperCamelCase__ )
# For evaluation, we use a deterministic Resize
__UpperCAmelCase = Compose([Resize(UpperCamelCase__ ), ToTensor()] )
__UpperCAmelCase = PetsDataset([file_names[i] for i in eval_split] , image_transform=UpperCamelCase__ , label_to_id=UpperCamelCase__ )
# Instantiate dataloaders.
__UpperCAmelCase = DataLoader(UpperCamelCase__ , shuffle=UpperCamelCase__ , batch_size=UpperCamelCase__ , num_workers=4 )
__UpperCAmelCase = DataLoader(UpperCamelCase__ , shuffle=UpperCamelCase__ , batch_size=UpperCamelCase__ , num_workers=4 )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__UpperCAmelCase = create_model('''resnet50d''' , pretrained=UpperCamelCase__ , num_classes=len(UpperCamelCase__ ) )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
__UpperCAmelCase = model.to(accelerator.device )
# Freezing the base model
for param in model.parameters():
__UpperCAmelCase = False
for param in model.get_classifier().parameters():
__UpperCAmelCase = True
# We normalize the batches of images to be a bit faster.
__UpperCAmelCase = torch.tensor(model.default_cfg['''mean'''] )[None, :, None, None].to(accelerator.device )
__UpperCAmelCase = torch.tensor(model.default_cfg['''std'''] )[None, :, None, None].to(accelerator.device )
# Instantiate optimizer
__UpperCAmelCase = torch.optim.Adam(params=model.parameters() , lr=lr / 2_5 )
# Instantiate learning rate scheduler
__UpperCAmelCase = OneCycleLR(optimizer=UpperCamelCase__ , max_lr=UpperCamelCase__ , epochs=UpperCamelCase__ , steps_per_epoch=len(UpperCamelCase__ ) )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = accelerator.prepare(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# We need to keep track of how many total steps we have iterated over
__UpperCAmelCase = 0
# We also need to keep track of the starting epoch so files are named properly
__UpperCAmelCase = 0
# Potentially load in the weights and states from a previous save
if args.resume_from_checkpoint:
if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != "":
accelerator.print(f"""Resumed from checkpoint: {args.resume_from_checkpoint}""" )
accelerator.load_state(args.resume_from_checkpoint )
__UpperCAmelCase = os.path.basename(args.resume_from_checkpoint )
else:
# Get the most recent checkpoint
__UpperCAmelCase = [f.name for f in os.scandir(os.getcwd() ) if f.is_dir()]
dirs.sort(key=os.path.getctime )
__UpperCAmelCase = dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last
# Extract `epoch_{i}` or `step_{i}`
__UpperCAmelCase = os.path.splitext(UpperCamelCase__ )[0]
if "epoch" in training_difference:
__UpperCAmelCase = int(training_difference.replace('''epoch_''' , '''''' ) ) + 1
__UpperCAmelCase = None
else:
__UpperCAmelCase = int(training_difference.replace('''step_''' , '''''' ) )
__UpperCAmelCase = resume_step // len(UpperCamelCase__ )
resume_step -= starting_epoch * len(UpperCamelCase__ )
# Now we train the model
for epoch in range(UpperCamelCase__ , UpperCamelCase__ ):
model.train()
if args.with_tracking:
__UpperCAmelCase = 0
if args.resume_from_checkpoint and epoch == starting_epoch and resume_step is not None:
# We need to skip steps until we reach the resumed step
__UpperCAmelCase = accelerator.skip_first_batches(UpperCamelCase__ , UpperCamelCase__ )
overall_step += resume_step
else:
# After the first iteration though, we need to go back to the original dataloader
__UpperCAmelCase = train_dataloader
for batch in active_dataloader:
# We could avoid this line since we set the accelerator with `device_placement=True`.
__UpperCAmelCase = {k: v.to(accelerator.device ) for k, v in batch.items()}
__UpperCAmelCase = (batch['''image'''] - mean) / std
__UpperCAmelCase = model(UpperCamelCase__ )
__UpperCAmelCase = torch.nn.functional.cross_entropy(UpperCamelCase__ , batch['''label'''] )
# We keep track of the loss at each epoch
if args.with_tracking:
total_loss += loss.detach().float()
accelerator.backward(UpperCamelCase__ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
__UpperCAmelCase = f"""step_{overall_step}"""
if overall_step % checkpointing_steps == 0:
if args.output_dir is not None:
__UpperCAmelCase = os.path.join(args.output_dir , UpperCamelCase__ )
accelerator.save_state(UpperCamelCase__ )
model.eval()
__UpperCAmelCase = 0
__UpperCAmelCase = 0
for step, batch in enumerate(UpperCamelCase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
__UpperCAmelCase = {k: v.to(accelerator.device ) for k, v in batch.items()}
__UpperCAmelCase = (batch['''image'''] - mean) / std
with torch.no_grad():
__UpperCAmelCase = model(UpperCamelCase__ )
__UpperCAmelCase = outputs.argmax(dim=-1 )
__UpperCAmelCase , __UpperCAmelCase = accelerator.gather_for_metrics((predictions, batch['''label''']) )
__UpperCAmelCase = predictions == references
num_elems += accurate_preds.shape[0]
accurate += accurate_preds.long().sum()
__UpperCAmelCase = accurate.item() / num_elems
# Use accelerator.print to print only on the main process.
accelerator.print(f"""epoch {epoch}: {1_0_0 * eval_metric:.2f}""" )
if args.with_tracking:
accelerator.log(
{
'''accuracy''': 1_0_0 * eval_metric,
'''train_loss''': total_loss.item() / len(UpperCamelCase__ ),
'''epoch''': epoch,
} , step=UpperCamelCase__ , )
if checkpointing_steps == "epoch":
__UpperCAmelCase = f"""epoch_{epoch}"""
if args.output_dir is not None:
__UpperCAmelCase = os.path.join(args.output_dir , UpperCamelCase__ )
accelerator.save_state(UpperCamelCase__ )
if args.with_tracking:
accelerator.end_training()
def lowerCAmelCase ( ):
"""simple docstring"""
__UpperCAmelCase = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument('''--data_dir''' , required=UpperCamelCase__ , help='''The data folder on disk.''' )
parser.add_argument('''--fp16''' , action='''store_true''' , help='''If passed, will use FP16 training.''' )
parser.add_argument(
'''--mixed_precision''' , type=UpperCamelCase__ , default=UpperCamelCase__ , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
parser.add_argument(
'''--checkpointing_steps''' , type=UpperCamelCase__ , default=UpperCamelCase__ , help='''Whether the various states should be saved at the end of every n steps, or \'epoch\' for each epoch.''' , )
parser.add_argument(
'''--output_dir''' , type=UpperCamelCase__ , default='''.''' , help='''Optional save directory where all checkpoint folders will be stored. Default is the current working directory.''' , )
parser.add_argument(
'''--resume_from_checkpoint''' , type=UpperCamelCase__ , default=UpperCamelCase__ , help='''If the training should continue from a checkpoint folder.''' , )
parser.add_argument(
'''--with_tracking''' , action='''store_true''' , help='''Whether to load in all available experiment trackers from the environment and use them for logging.''' , )
parser.add_argument(
'''--project_dir''' , type=UpperCamelCase__ , default='''logs''' , help='''Location on where to store experiment tracking logs` and relevent project information''' , )
__UpperCAmelCase = parser.parse_args()
__UpperCAmelCase = {'''lr''': 3E-2, '''num_epochs''': 3, '''seed''': 4_2, '''batch_size''': 6_4, '''image_size''': 2_2_4}
training_function(UpperCamelCase__ , UpperCamelCase__ )
if __name__ == "__main__":
main()
| 654 |
'''simple docstring'''
import requests
from bsa import BeautifulSoup
def lowerCAmelCase ( UpperCamelCase__ : str = "AAPL" ):
"""simple docstring"""
__UpperCAmelCase = f"""https://in.finance.yahoo.com/quote/{symbol}?s={symbol}"""
__UpperCAmelCase = BeautifulSoup(requests.get(UpperCamelCase__ ).text , '''html.parser''' )
__UpperCAmelCase = '''My(6px) Pos(r) smartphone_Mt(6px)'''
return soup.find('''div''' , class_=class_ ).find('''span''' ).text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(F"""Current {symbol:<4} stock price is {stock_price(symbol):>8}""")
| 654 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__lowerCAmelCase : Any = logging.get_logger(__name__)
__lowerCAmelCase : str = {
"microsoft/focalnet-tiny": "https://huggingface.co/microsoft/focalnet-tiny/resolve/main/config.json",
}
class A ( UpperCAmelCase , UpperCAmelCase ):
a_ = '''focalnet'''
def __init__( self : Optional[int] , __a : Tuple=2_2_4 , __a : Dict=4 , __a : int=3 , __a : int=9_6 , __a : int=False , __a : Optional[int]=[1_9_2, 3_8_4, 7_6_8, 7_6_8] , __a : Tuple=[2, 2, 6, 2] , __a : List[Any]=[2, 2, 2, 2] , __a : Optional[Any]=[3, 3, 3, 3] , __a : Tuple="gelu" , __a : Optional[int]=4.0 , __a : Optional[Any]=0.0 , __a : Optional[int]=0.1 , __a : int=False , __a : List[Any]=1e-4 , __a : Tuple=False , __a : Union[str, Any]=False , __a : List[str]=False , __a : Tuple=0.0_2 , __a : str=1e-5 , __a : Optional[int]=3_2 , __a : Optional[int]=None , __a : List[Any]=None , **__a : Union[str, Any] , ) -> Optional[Any]:
super().__init__(**__a )
__UpperCAmelCase = image_size
__UpperCAmelCase = patch_size
__UpperCAmelCase = num_channels
__UpperCAmelCase = embed_dim
__UpperCAmelCase = use_conv_embed
__UpperCAmelCase = hidden_sizes
__UpperCAmelCase = depths
__UpperCAmelCase = focal_levels
__UpperCAmelCase = focal_windows
__UpperCAmelCase = hidden_act
__UpperCAmelCase = mlp_ratio
__UpperCAmelCase = hidden_dropout_prob
__UpperCAmelCase = drop_path_rate
__UpperCAmelCase = use_layerscale
__UpperCAmelCase = layerscale_value
__UpperCAmelCase = use_post_layernorm
__UpperCAmelCase = use_post_layernorm_in_modulation
__UpperCAmelCase = normalize_modulator
__UpperCAmelCase = initializer_range
__UpperCAmelCase = layer_norm_eps
__UpperCAmelCase = encoder_stride
__UpperCAmelCase = ['''stem'''] + [f"""stage{idx}""" for idx in range(1 , len(self.depths ) + 1 )]
__UpperCAmelCase , __UpperCAmelCase = get_aligned_output_features_output_indices(
out_features=__a , out_indices=__a , stage_names=self.stage_names )
| 654 |
'''simple docstring'''
from __future__ import annotations
from statistics import mean
def lowerCAmelCase ( UpperCamelCase__ : list[int] , UpperCamelCase__ : list[int] , UpperCamelCase__ : int ):
"""simple docstring"""
__UpperCAmelCase = [0] * no_of_processes
__UpperCAmelCase = [0] * no_of_processes
# Initialize remaining_time to waiting_time.
for i in range(UpperCamelCase__ ):
__UpperCAmelCase = burst_time[i]
__UpperCAmelCase = []
__UpperCAmelCase = 0
__UpperCAmelCase = 0
# When processes are not completed,
# A process whose arrival time has passed \
# and has remaining execution time is put into the ready_process.
# The shortest process in the ready_process, target_process is executed.
while completed != no_of_processes:
__UpperCAmelCase = []
__UpperCAmelCase = -1
for i in range(UpperCamelCase__ ):
if (arrival_time[i] <= total_time) and (remaining_time[i] > 0):
ready_process.append(UpperCamelCase__ )
if len(UpperCamelCase__ ) > 0:
__UpperCAmelCase = ready_process[0]
for i in ready_process:
if remaining_time[i] < remaining_time[target_process]:
__UpperCAmelCase = i
total_time += burst_time[target_process]
completed += 1
__UpperCAmelCase = 0
__UpperCAmelCase = (
total_time - arrival_time[target_process] - burst_time[target_process]
)
else:
total_time += 1
return waiting_time
def lowerCAmelCase ( UpperCamelCase__ : list[int] , UpperCamelCase__ : int , UpperCamelCase__ : list[int] ):
"""simple docstring"""
__UpperCAmelCase = [0] * no_of_processes
for i in range(UpperCamelCase__ ):
__UpperCAmelCase = burst_time[i] + waiting_time[i]
return turn_around_time
if __name__ == "__main__":
print("[TEST CASE 01]")
__lowerCAmelCase : List[Any] = 4
__lowerCAmelCase : List[Any] = [2, 5, 3, 7]
__lowerCAmelCase : Tuple = [0, 0, 0, 0]
__lowerCAmelCase : Optional[int] = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
__lowerCAmelCase : Dict = calculate_turnaroundtime(
burst_time, no_of_processes, waiting_time
)
# Printing the Result
print("PID\tBurst Time\tArrival Time\tWaiting Time\tTurnaround Time")
for i, process_id in enumerate(list(range(1, 5))):
print(
F"""{process_id}\t{burst_time[i]}\t\t\t{arrival_time[i]}\t\t\t\t"""
F"""{waiting_time[i]}\t\t\t\t{turn_around_time[i]}"""
)
print(F"""\nAverage waiting time = {mean(waiting_time):.5f}""")
print(F"""Average turnaround time = {mean(turn_around_time):.5f}""")
| 654 | 1 |
'''simple docstring'''
import json
import os
import tempfile
import transformers
import datasets
from utils import generate_example_dataset, get_duration
__lowerCAmelCase : Optional[int] = 500_000
__lowerCAmelCase , __lowerCAmelCase : str = os.path.split(__file__)
__lowerCAmelCase : str = os.path.join(RESULTS_BASEPATH, "results", RESULTS_FILENAME.replace(".py", ".json"))
@get_duration
def lowerCAmelCase ( UpperCamelCase__ : datasets.Dataset , **UpperCamelCase__ : List[str] ):
"""simple docstring"""
__UpperCAmelCase = dataset.map(**UpperCamelCase__ )
@get_duration
def lowerCAmelCase ( UpperCamelCase__ : datasets.Dataset , **UpperCamelCase__ : Optional[Any] ):
"""simple docstring"""
__UpperCAmelCase = dataset.filter(**UpperCamelCase__ )
def lowerCAmelCase ( ):
"""simple docstring"""
__UpperCAmelCase = {'''num examples''': SPEED_TEST_N_EXAMPLES}
with tempfile.TemporaryDirectory() as tmp_dir:
__UpperCAmelCase = datasets.Features({'''text''': datasets.Value('''string''' ), '''numbers''': datasets.Value('''float32''' )} )
__UpperCAmelCase = generate_example_dataset(
os.path.join(UpperCamelCase__ , '''dataset.arrow''' ) , UpperCamelCase__ , num_examples=UpperCamelCase__ )
__UpperCAmelCase = transformers.AutoTokenizer.from_pretrained('''bert-base-cased''' , use_fast=UpperCamelCase__ )
def tokenize(UpperCamelCase__ : Optional[Any] ):
return tokenizer(examples['''text'''] )
__UpperCAmelCase = map(UpperCamelCase__ )
__UpperCAmelCase = map(UpperCamelCase__ , batched=UpperCamelCase__ )
__UpperCAmelCase = map(UpperCamelCase__ , function=lambda UpperCamelCase__ : None , batched=UpperCamelCase__ )
with dataset.formatted_as(type='''numpy''' ):
__UpperCAmelCase = map(UpperCamelCase__ , function=lambda UpperCamelCase__ : None , batched=UpperCamelCase__ )
with dataset.formatted_as(type='''pandas''' ):
__UpperCAmelCase = map(UpperCamelCase__ , function=lambda UpperCamelCase__ : None , batched=UpperCamelCase__ )
with dataset.formatted_as(type='''torch''' , columns='''numbers''' ):
__UpperCAmelCase = map(UpperCamelCase__ , function=lambda UpperCamelCase__ : None , batched=UpperCamelCase__ )
with dataset.formatted_as(type='''tensorflow''' , columns='''numbers''' ):
__UpperCAmelCase = map(UpperCamelCase__ , function=lambda UpperCamelCase__ : None , batched=UpperCamelCase__ )
__UpperCAmelCase = map(UpperCamelCase__ , function=UpperCamelCase__ , batched=UpperCamelCase__ )
__UpperCAmelCase = filter(UpperCamelCase__ )
# Activate later when tokenizer support batched inputs
# with dataset.formatted_as(type='numpy'):
# times[func.__name__ + " fast-tokenizer batched numpy"] = func(dataset, function=tokenize, batched=True)
with open(UpperCamelCase__ , '''wb''' ) as f:
f.write(json.dumps(UpperCamelCase__ ).encode('''utf-8''' ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_map_filter()
| 654 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Optional[int] , *__a : List[str] , **__a : Union[str, Any] ) -> Union[str, Any]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Any , *__a : List[str] , **__a : int ) -> Dict:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[str] , *__a : int , **__a : List[Any] ) -> Tuple:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : int , *__a : Optional[Any] , **__a : Tuple ) -> Dict:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Optional[Any] , *__a : Tuple , **__a : Optional[Any] ) -> int:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Dict , *__a : str , **__a : Tuple ) -> Optional[Any]:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Any , *__a : List[str] , **__a : int ) -> Optional[Any]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[str] , *__a : List[str] , **__a : Optional[int] ) -> List[Any]:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Any , *__a : str , **__a : Any ) -> Tuple:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : List[str] , *__a : Dict , **__a : List[str] ) -> List[str]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Dict , *__a : Any , **__a : List[Any] ) -> Union[str, Any]:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : str , *__a : List[str] , **__a : Union[str, Any] ) -> List[str]:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : str , *__a : Optional[int] , **__a : Optional[int] ) -> Any:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Optional[Any] , *__a : Dict , **__a : List[Any] ) -> str:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Optional[int] , *__a : List[str] , **__a : List[str] ) -> Union[str, Any]:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Dict , *__a : Optional[Any] , **__a : int ) -> Union[str, Any]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : int , *__a : Optional[int] , **__a : Dict ) -> Dict:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[str] , *__a : Tuple , **__a : Any ) -> List[str]:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Optional[Any] , *__a : Optional[Any] , **__a : int ) -> List[str]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[str] , *__a : Tuple , **__a : Optional[int] ) -> Tuple:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Any , *__a : str , **__a : List[Any] ) -> Optional[Any]:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Dict , *__a : List[Any] , **__a : int ) -> List[str]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Union[str, Any] , *__a : int , **__a : str ) -> List[Any]:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Optional[int] , *__a : List[Any] , **__a : Any ) -> Union[str, Any]:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Tuple , *__a : str , **__a : Tuple ) -> Dict:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : str , *__a : List[str] , **__a : Optional[Any] ) -> str:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Tuple , *__a : Optional[int] , **__a : Union[str, Any] ) -> int:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Any , *__a : Union[str, Any] , **__a : List[str] ) -> str:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Union[str, Any] , *__a : Dict , **__a : Union[str, Any] ) -> List[Any]:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Any , *__a : Union[str, Any] , **__a : Any ) -> List[str]:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : str , *__a : int , **__a : int ) -> List[str]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[str] , *__a : Optional[int] , **__a : int ) -> Dict:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Dict , *__a : Tuple , **__a : str ) -> Dict:
requires_backends(cls , ['''torch'''] )
def lowerCAmelCase ( *UpperCamelCase__ : int , **UpperCamelCase__ : Optional[int] ):
"""simple docstring"""
requires_backends(UpperCamelCase__ , ['''torch'''] )
def lowerCAmelCase ( *UpperCamelCase__ : Optional[int] , **UpperCamelCase__ : List[Any] ):
"""simple docstring"""
requires_backends(UpperCamelCase__ , ['''torch'''] )
def lowerCAmelCase ( *UpperCamelCase__ : Any , **UpperCamelCase__ : List[str] ):
"""simple docstring"""
requires_backends(UpperCamelCase__ , ['''torch'''] )
def lowerCAmelCase ( *UpperCamelCase__ : str , **UpperCamelCase__ : str ):
"""simple docstring"""
requires_backends(UpperCamelCase__ , ['''torch'''] )
def lowerCAmelCase ( *UpperCamelCase__ : Dict , **UpperCamelCase__ : Dict ):
"""simple docstring"""
requires_backends(UpperCamelCase__ , ['''torch'''] )
def lowerCAmelCase ( *UpperCamelCase__ : Tuple , **UpperCamelCase__ : int ):
"""simple docstring"""
requires_backends(UpperCamelCase__ , ['''torch'''] )
def lowerCAmelCase ( *UpperCamelCase__ : Union[str, Any] , **UpperCamelCase__ : Tuple ):
"""simple docstring"""
requires_backends(UpperCamelCase__ , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Union[str, Any] , *__a : str , **__a : List[Any] ) -> Any:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : str , *__a : int , **__a : int ) -> Any:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Optional[Any] , *__a : Dict , **__a : List[str] ) -> Any:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Optional[Any] , *__a : str , **__a : List[str] ) -> Optional[Any]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Union[str, Any] , *__a : Dict , **__a : Tuple ) -> int:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Optional[Any] , *__a : List[Any] , **__a : List[str] ) -> Any:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Optional[Any] , *__a : List[Any] , **__a : List[Any] ) -> int:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Any , *__a : Tuple , **__a : int ) -> Union[str, Any]:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Tuple , *__a : Any , **__a : int ) -> Optional[Any]:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Any , *__a : Optional[Any] , **__a : Tuple ) -> List[Any]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Optional[int] , *__a : str , **__a : int ) -> Any:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[str] , *__a : Dict , **__a : Any ) -> str:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Any , *__a : str , **__a : str ) -> List[Any]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[str] , *__a : Union[str, Any] , **__a : str ) -> Union[str, Any]:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[str] , *__a : int , **__a : Tuple ) -> str:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Optional[int] , *__a : Tuple , **__a : str ) -> Union[str, Any]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Tuple , *__a : Any , **__a : Dict ) -> Dict:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Union[str, Any] , *__a : str , **__a : List[str] ) -> str:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Any , *__a : int , **__a : int ) -> Union[str, Any]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : int , *__a : str , **__a : List[Any] ) -> Union[str, Any]:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Tuple , *__a : int , **__a : List[str] ) -> Optional[Any]:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : int , *__a : Union[str, Any] , **__a : Optional[Any] ) -> Any:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Tuple , *__a : List[Any] , **__a : str ) -> int:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : int , *__a : Optional[Any] , **__a : Any ) -> Dict:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : List[str] , *__a : List[str] , **__a : Dict ) -> Optional[int]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Optional[Any] , *__a : Union[str, Any] , **__a : Optional[int] ) -> Tuple:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[Any] , *__a : int , **__a : Dict ) -> Tuple:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : List[str] , *__a : List[str] , **__a : Union[str, Any] ) -> str:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Union[str, Any] , *__a : int , **__a : Dict ) -> List[Any]:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Tuple , *__a : Tuple , **__a : List[str] ) -> Any:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Any , *__a : str , **__a : Any ) -> int:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : str , *__a : Dict , **__a : Optional[Any] ) -> Any:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Any , *__a : str , **__a : Union[str, Any] ) -> Tuple:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Any , *__a : Optional[Any] , **__a : List[str] ) -> str:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Union[str, Any] , *__a : Union[str, Any] , **__a : Any ) -> Any:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Tuple , *__a : Union[str, Any] , **__a : List[Any] ) -> Optional[Any]:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Tuple , *__a : Optional[int] , **__a : List[Any] ) -> Any:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Tuple , *__a : List[str] , **__a : Dict ) -> List[str]:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Optional[int] , *__a : Optional[Any] , **__a : List[Any] ) -> Any:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Optional[int] , *__a : Any , **__a : str ) -> List[Any]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Optional[int] , *__a : Optional[Any] , **__a : int ) -> Union[str, Any]:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Optional[Any] , *__a : int , **__a : Optional[Any] ) -> int:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : str , *__a : List[str] , **__a : Dict ) -> List[str]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Tuple , *__a : Any , **__a : Union[str, Any] ) -> Optional[int]:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Optional[Any] , *__a : Dict , **__a : Union[str, Any] ) -> List[str]:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Any , *__a : Tuple , **__a : Optional[int] ) -> str:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Dict , *__a : List[Any] , **__a : List[str] ) -> Any:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[str] , *__a : Union[str, Any] , **__a : Union[str, Any] ) -> Any:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Dict , *__a : List[Any] , **__a : int ) -> int:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[str] , *__a : Optional[Any] , **__a : str ) -> int:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Any , *__a : Tuple , **__a : int ) -> Any:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Optional[Any] , *__a : Optional[int] , **__a : Tuple ) -> Optional[int]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : int , *__a : Union[str, Any] , **__a : List[Any] ) -> Dict:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : int , *__a : List[Any] , **__a : List[str] ) -> str:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : List[Any] , *__a : Optional[int] , **__a : int ) -> Dict:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[str] , *__a : Union[str, Any] , **__a : List[Any] ) -> Optional[Any]:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Any , *__a : int , **__a : Any ) -> str:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : str , *__a : Any , **__a : int ) -> Tuple:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Union[str, Any] , *__a : List[Any] , **__a : Union[str, Any] ) -> Union[str, Any]:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Optional[int] , *__a : List[Any] , **__a : Dict ) -> int:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : List[Any] , *__a : int , **__a : Optional[int] ) -> List[str]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Dict , *__a : List[Any] , **__a : Any ) -> str:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Dict , *__a : Tuple , **__a : List[Any] ) -> Any:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Dict , *__a : Dict , **__a : Optional[int] ) -> Dict:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Optional[int] , *__a : Any , **__a : Dict ) -> Tuple:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : str , *__a : Any , **__a : Any ) -> Optional[Any]:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Union[str, Any] , *__a : Tuple , **__a : Optional[int] ) -> Optional[Any]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : str , *__a : Optional[Any] , **__a : Optional[Any] ) -> str:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Optional[Any] , *__a : List[Any] , **__a : Dict ) -> Union[str, Any]:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Dict , *__a : Union[str, Any] , **__a : Optional[int] ) -> Dict:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Union[str, Any] , *__a : Any , **__a : Optional[int] ) -> Optional[int]:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Dict , *__a : Union[str, Any] , **__a : List[str] ) -> Optional[Any]:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Optional[Any] , *__a : Optional[int] , **__a : List[Any] ) -> str:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Any , *__a : Dict , **__a : int ) -> Optional[Any]:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[str] , *__a : Optional[int] , **__a : Union[str, Any] ) -> Dict:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Optional[int] , *__a : Union[str, Any] , **__a : int ) -> Optional[int]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[Any] , *__a : Optional[Any] , **__a : int ) -> Tuple:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[Any] , *__a : List[Any] , **__a : Optional[int] ) -> Optional[int]:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Optional[int] , *__a : Optional[Any] , **__a : Optional[int] ) -> Tuple:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Optional[int] , *__a : Optional[int] , **__a : List[Any] ) -> Dict:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : int , *__a : List[str] , **__a : Union[str, Any] ) -> Tuple:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : str , *__a : Tuple , **__a : Tuple ) -> str:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[Any] , *__a : List[str] , **__a : Tuple ) -> Tuple:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Union[str, Any] , *__a : Dict , **__a : Tuple ) -> Tuple:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Optional[Any] , *__a : Union[str, Any] , **__a : Optional[int] ) -> List[Any]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Dict , *__a : List[str] , **__a : int ) -> str:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : int , *__a : Tuple , **__a : Optional[Any] ) -> Optional[Any]:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Optional[int] , *__a : Union[str, Any] , **__a : Union[str, Any] ) -> Optional[int]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Optional[Any] , *__a : Any , **__a : List[str] ) -> str:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : str , *__a : int , **__a : int ) -> List[Any]:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Dict , *__a : Any , **__a : List[Any] ) -> Union[str, Any]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Dict , *__a : int , **__a : str ) -> Any:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[Any] , *__a : Dict , **__a : Optional[Any] ) -> Dict:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Union[str, Any] , *__a : List[str] , **__a : int ) -> Union[str, Any]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Union[str, Any] , *__a : List[Any] , **__a : Any ) -> int:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[str] , *__a : str , **__a : Optional[Any] ) -> Dict:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Optional[int] , *__a : List[Any] , **__a : List[str] ) -> Optional[int]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Union[str, Any] , *__a : Optional[Any] , **__a : str ) -> int:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : str , *__a : List[Any] , **__a : Union[str, Any] ) -> Optional[int]:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Union[str, Any] , *__a : List[Any] , **__a : Optional[Any] ) -> int:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Union[str, Any] , *__a : List[Any] , **__a : Any ) -> int:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Optional[Any] , *__a : List[str] , **__a : Any ) -> str:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Any , *__a : List[str] , **__a : Dict ) -> List[str]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[Any] , *__a : Dict , **__a : Optional[Any] ) -> Union[str, Any]:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[Any] , *__a : str , **__a : Any ) -> Dict:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : List[str] , *__a : Union[str, Any] , **__a : Optional[int] ) -> List[Any]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : str , *__a : Dict , **__a : Tuple ) -> Optional[int]:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : str , *__a : Any , **__a : Any ) -> Any:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : int , *__a : Any , **__a : Optional[Any] ) -> int:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Dict , *__a : int , **__a : List[Any] ) -> str:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[str] , *__a : Dict , **__a : Tuple ) -> Union[str, Any]:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Tuple , *__a : int , **__a : Optional[Any] ) -> List[Any]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[Any] , *__a : Union[str, Any] , **__a : Union[str, Any] ) -> str:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Tuple , *__a : Any , **__a : Optional[int] ) -> str:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Dict , *__a : List[str] , **__a : List[Any] ) -> Optional[Any]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[Any] , *__a : int , **__a : Any ) -> Any:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[Any] , *__a : List[str] , **__a : List[str] ) -> List[Any]:
requires_backends(cls , ['''torch'''] )
| 654 | 1 |
'''simple docstring'''
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
ConditionalDetrConfig,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCAmelCase : List[str] = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
__lowerCAmelCase : List[Any] = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F"""transformer.encoder.layers.{i}.self_attn.out_proj.weight""", F"""encoder.layers.{i}.self_attn.out_proj.weight""")
)
rename_keys.append(
(F"""transformer.encoder.layers.{i}.self_attn.out_proj.bias""", F"""encoder.layers.{i}.self_attn.out_proj.bias""")
)
rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.weight""", F"""encoder.layers.{i}.fc1.weight"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.bias""", F"""encoder.layers.{i}.fc1.bias"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.weight""", F"""encoder.layers.{i}.fc2.weight"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.bias""", F"""encoder.layers.{i}.fc2.bias"""))
rename_keys.append(
(F"""transformer.encoder.layers.{i}.norm1.weight""", F"""encoder.layers.{i}.self_attn_layer_norm.weight""")
)
rename_keys.append((F"""transformer.encoder.layers.{i}.norm1.bias""", F"""encoder.layers.{i}.self_attn_layer_norm.bias"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.norm2.weight""", F"""encoder.layers.{i}.final_layer_norm.weight"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.norm2.bias""", F"""encoder.layers.{i}.final_layer_norm.bias"""))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(F"""transformer.decoder.layers.{i}.self_attn.out_proj.weight""", F"""decoder.layers.{i}.self_attn.out_proj.weight""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.self_attn.out_proj.bias""", F"""decoder.layers.{i}.self_attn.out_proj.bias""")
)
rename_keys.append(
(
F"""transformer.decoder.layers.{i}.cross_attn.out_proj.weight""",
F"""decoder.layers.{i}.encoder_attn.out_proj.weight""",
)
)
rename_keys.append(
(
F"""transformer.decoder.layers.{i}.cross_attn.out_proj.bias""",
F"""decoder.layers.{i}.encoder_attn.out_proj.bias""",
)
)
rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.weight""", F"""decoder.layers.{i}.fc1.weight"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.bias""", F"""decoder.layers.{i}.fc1.bias"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.weight""", F"""decoder.layers.{i}.fc2.weight"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.bias""", F"""decoder.layers.{i}.fc2.bias"""))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm1.weight""", F"""decoder.layers.{i}.self_attn_layer_norm.weight""")
)
rename_keys.append((F"""transformer.decoder.layers.{i}.norm1.bias""", F"""decoder.layers.{i}.self_attn_layer_norm.bias"""))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm2.weight""", F"""decoder.layers.{i}.encoder_attn_layer_norm.weight""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm2.bias""", F"""decoder.layers.{i}.encoder_attn_layer_norm.bias""")
)
rename_keys.append((F"""transformer.decoder.layers.{i}.norm3.weight""", F"""decoder.layers.{i}.final_layer_norm.weight"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.norm3.bias""", F"""decoder.layers.{i}.final_layer_norm.bias"""))
# q, k, v projections in self/cross-attention in decoder for conditional DETR
rename_keys.append(
(F"""transformer.decoder.layers.{i}.sa_qcontent_proj.weight""", F"""decoder.layers.{i}.sa_qcontent_proj.weight""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.sa_kcontent_proj.weight""", F"""decoder.layers.{i}.sa_kcontent_proj.weight""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.sa_qpos_proj.weight""", F"""decoder.layers.{i}.sa_qpos_proj.weight""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.sa_kpos_proj.weight""", F"""decoder.layers.{i}.sa_kpos_proj.weight""")
)
rename_keys.append((F"""transformer.decoder.layers.{i}.sa_v_proj.weight""", F"""decoder.layers.{i}.sa_v_proj.weight"""))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.ca_qcontent_proj.weight""", F"""decoder.layers.{i}.ca_qcontent_proj.weight""")
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight"))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.ca_kcontent_proj.weight""", F"""decoder.layers.{i}.ca_kcontent_proj.weight""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.ca_kpos_proj.weight""", F"""decoder.layers.{i}.ca_kpos_proj.weight""")
)
rename_keys.append((F"""transformer.decoder.layers.{i}.ca_v_proj.weight""", F"""decoder.layers.{i}.ca_v_proj.weight"""))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight""", F"""decoder.layers.{i}.ca_qpos_sine_proj.weight""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.sa_qcontent_proj.bias""", F"""decoder.layers.{i}.sa_qcontent_proj.bias""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.sa_kcontent_proj.bias""", F"""decoder.layers.{i}.sa_kcontent_proj.bias""")
)
rename_keys.append((F"""transformer.decoder.layers.{i}.sa_qpos_proj.bias""", F"""decoder.layers.{i}.sa_qpos_proj.bias"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.sa_kpos_proj.bias""", F"""decoder.layers.{i}.sa_kpos_proj.bias"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.sa_v_proj.bias""", F"""decoder.layers.{i}.sa_v_proj.bias"""))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.ca_qcontent_proj.bias""", F"""decoder.layers.{i}.ca_qcontent_proj.bias""")
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias"))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.ca_kcontent_proj.bias""", F"""decoder.layers.{i}.ca_kcontent_proj.bias""")
)
rename_keys.append((F"""transformer.decoder.layers.{i}.ca_kpos_proj.bias""", F"""decoder.layers.{i}.ca_kpos_proj.bias"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.ca_v_proj.bias""", F"""decoder.layers.{i}.ca_v_proj.bias"""))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias""", F"""decoder.layers.{i}.ca_qpos_sine_proj.bias""")
)
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
# for conditional DETR, also convert reference point head and query scale MLP
rename_keys.extend(
[
("input_proj.weight", "input_projection.weight"),
("input_proj.bias", "input_projection.bias"),
("query_embed.weight", "query_position_embeddings.weight"),
("transformer.decoder.norm.weight", "decoder.layernorm.weight"),
("transformer.decoder.norm.bias", "decoder.layernorm.bias"),
("class_embed.weight", "class_labels_classifier.weight"),
("class_embed.bias", "class_labels_classifier.bias"),
("bbox_embed.layers.0.weight", "bbox_predictor.layers.0.weight"),
("bbox_embed.layers.0.bias", "bbox_predictor.layers.0.bias"),
("bbox_embed.layers.1.weight", "bbox_predictor.layers.1.weight"),
("bbox_embed.layers.1.bias", "bbox_predictor.layers.1.bias"),
("bbox_embed.layers.2.weight", "bbox_predictor.layers.2.weight"),
("bbox_embed.layers.2.bias", "bbox_predictor.layers.2.bias"),
("transformer.decoder.ref_point_head.layers.0.weight", "decoder.ref_point_head.layers.0.weight"),
("transformer.decoder.ref_point_head.layers.0.bias", "decoder.ref_point_head.layers.0.bias"),
("transformer.decoder.ref_point_head.layers.1.weight", "decoder.ref_point_head.layers.1.weight"),
("transformer.decoder.ref_point_head.layers.1.bias", "decoder.ref_point_head.layers.1.bias"),
("transformer.decoder.query_scale.layers.0.weight", "decoder.query_scale.layers.0.weight"),
("transformer.decoder.query_scale.layers.0.bias", "decoder.query_scale.layers.0.bias"),
("transformer.decoder.query_scale.layers.1.weight", "decoder.query_scale.layers.1.weight"),
("transformer.decoder.query_scale.layers.1.bias", "decoder.query_scale.layers.1.bias"),
("transformer.decoder.layers.0.ca_qpos_proj.weight", "decoder.layers.0.ca_qpos_proj.weight"),
("transformer.decoder.layers.0.ca_qpos_proj.bias", "decoder.layers.0.ca_qpos_proj.bias"),
]
)
def lowerCAmelCase ( UpperCamelCase__ : str , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[str] ):
"""simple docstring"""
__UpperCAmelCase = state_dict.pop(UpperCamelCase__ )
__UpperCAmelCase = val
def lowerCAmelCase ( UpperCamelCase__ : str ):
"""simple docstring"""
__UpperCAmelCase = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
__UpperCAmelCase = key.replace('''backbone.0.body''' , '''backbone.conv_encoder.model''' )
__UpperCAmelCase = value
else:
__UpperCAmelCase = value
return new_state_dict
def lowerCAmelCase ( UpperCamelCase__ : Any , UpperCamelCase__ : int=False ):
"""simple docstring"""
__UpperCAmelCase = ''''''
if is_panoptic:
__UpperCAmelCase = '''conditional_detr.'''
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
__UpperCAmelCase = state_dict.pop(f"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight""" )
__UpperCAmelCase = state_dict.pop(f"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
__UpperCAmelCase = in_proj_weight[:2_5_6, :]
__UpperCAmelCase = in_proj_bias[:2_5_6]
__UpperCAmelCase = in_proj_weight[2_5_6:5_1_2, :]
__UpperCAmelCase = in_proj_bias[2_5_6:5_1_2]
__UpperCAmelCase = in_proj_weight[-2_5_6:, :]
__UpperCAmelCase = in_proj_bias[-2_5_6:]
def lowerCAmelCase ( ):
"""simple docstring"""
__UpperCAmelCase = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
__UpperCAmelCase = Image.open(requests.get(UpperCamelCase__ , stream=UpperCamelCase__ ).raw )
return im
@torch.no_grad()
def lowerCAmelCase ( UpperCamelCase__ : Any , UpperCamelCase__ : int ):
"""simple docstring"""
__UpperCAmelCase = ConditionalDetrConfig()
# set backbone and dilation attributes
if "resnet101" in model_name:
__UpperCAmelCase = '''resnet101'''
if "dc5" in model_name:
__UpperCAmelCase = True
__UpperCAmelCase = '''panoptic''' in model_name
if is_panoptic:
__UpperCAmelCase = 2_5_0
else:
__UpperCAmelCase = 9_1
__UpperCAmelCase = '''huggingface/label-files'''
__UpperCAmelCase = '''coco-detection-id2label.json'''
__UpperCAmelCase = json.load(open(hf_hub_download(UpperCamelCase__ , UpperCamelCase__ , repo_type='''dataset''' ) , '''r''' ) )
__UpperCAmelCase = {int(UpperCamelCase__ ): v for k, v in idalabel.items()}
__UpperCAmelCase = idalabel
__UpperCAmelCase = {v: k for k, v in idalabel.items()}
# load image processor
__UpperCAmelCase = '''coco_panoptic''' if is_panoptic else '''coco_detection'''
__UpperCAmelCase = ConditionalDetrImageProcessor(format=UpperCamelCase__ )
# prepare image
__UpperCAmelCase = prepare_img()
__UpperCAmelCase = image_processor(images=UpperCamelCase__ , return_tensors='''pt''' )
__UpperCAmelCase = encoding['''pixel_values''']
logger.info(f"""Converting model {model_name}...""" )
# load original model from torch hub
__UpperCAmelCase = torch.hub.load('''DeppMeng/ConditionalDETR''' , UpperCamelCase__ , pretrained=UpperCamelCase__ ).eval()
__UpperCAmelCase = conditional_detr.state_dict()
# rename keys
for src, dest in rename_keys:
if is_panoptic:
__UpperCAmelCase = '''conditional_detr.''' + src
rename_key(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
__UpperCAmelCase = rename_backbone_keys(UpperCamelCase__ )
# query, key and value matrices need special treatment
read_in_q_k_v(UpperCamelCase__ , is_panoptic=UpperCamelCase__ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
__UpperCAmelCase = '''conditional_detr.model.''' if is_panoptic else '''model.'''
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith('''conditional_detr''' )
and not key.startswith('''class_labels_classifier''' )
and not key.startswith('''bbox_predictor''' )
):
__UpperCAmelCase = state_dict.pop(UpperCamelCase__ )
__UpperCAmelCase = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
__UpperCAmelCase = state_dict.pop(UpperCamelCase__ )
__UpperCAmelCase = val
elif key.startswith('''bbox_attention''' ) or key.startswith('''mask_head''' ):
continue
else:
__UpperCAmelCase = state_dict.pop(UpperCamelCase__ )
__UpperCAmelCase = val
else:
if not key.startswith('''class_labels_classifier''' ) and not key.startswith('''bbox_predictor''' ):
__UpperCAmelCase = state_dict.pop(UpperCamelCase__ )
__UpperCAmelCase = val
# finally, create HuggingFace model and load state dict
__UpperCAmelCase = ConditionalDetrForSegmentation(UpperCamelCase__ ) if is_panoptic else ConditionalDetrForObjectDetection(UpperCamelCase__ )
model.load_state_dict(UpperCamelCase__ )
model.eval()
model.push_to_hub(repo_id=UpperCamelCase__ , organization='''DepuMeng''' , commit_message='''Add model''' )
# verify our conversion
__UpperCAmelCase = conditional_detr(UpperCamelCase__ )
__UpperCAmelCase = model(UpperCamelCase__ )
assert torch.allclose(outputs.logits , original_outputs['''pred_logits'''] , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes , original_outputs['''pred_boxes'''] , atol=1E-4 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs['''pred_masks'''] , atol=1E-4 )
# Save model and image processor
logger.info(f"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" )
Path(UpperCamelCase__ ).mkdir(exist_ok=UpperCamelCase__ )
model.save_pretrained(UpperCamelCase__ )
image_processor.save_pretrained(UpperCamelCase__ )
if __name__ == "__main__":
__lowerCAmelCase : Tuple = argparse.ArgumentParser()
parser.add_argument(
"--model_name",
default="conditional_detr_resnet50",
type=str,
help="Name of the CONDITIONAL_DETR model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
__lowerCAmelCase : Optional[Any] = parser.parse_args()
convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 654 |
'''simple docstring'''
import warnings
from diffusers import StableDiffusionImgaImgPipeline # noqa F401
warnings.warn(
"The `image_to_image.py` script is outdated. Please use directly `from diffusers import"
" StableDiffusionImg2ImgPipeline` instead."
)
| 654 | 1 |
'''simple docstring'''
import sys
from typing import Tuple
import numpy as np
import torch
from PIL import Image
from torch import nn
from transformers.image_utils import PILImageResampling
from utils import img_tensorize
class A :
def __init__( self : List[Any] , __a : int , __a : Optional[Any]=sys.maxsize ) -> Union[str, Any]:
__UpperCAmelCase = '''bilinear'''
__UpperCAmelCase = max_size
__UpperCAmelCase = short_edge_length
def __call__( self : List[Any] , __a : Optional[Any] ) -> int:
__UpperCAmelCase = []
for img in imgs:
__UpperCAmelCase , __UpperCAmelCase = img.shape[:2]
# later: provide list and randomly choose index for resize
__UpperCAmelCase = np.random.randint(self.short_edge_length[0] , self.short_edge_length[1] + 1 )
if size == 0:
return img
__UpperCAmelCase = size * 1.0 / min(__a , __a )
if h < w:
__UpperCAmelCase , __UpperCAmelCase = size, scale * w
else:
__UpperCAmelCase , __UpperCAmelCase = scale * h, size
if max(__a , __a ) > self.max_size:
__UpperCAmelCase = self.max_size * 1.0 / max(__a , __a )
__UpperCAmelCase = newh * scale
__UpperCAmelCase = neww * scale
__UpperCAmelCase = int(neww + 0.5 )
__UpperCAmelCase = int(newh + 0.5 )
if img.dtype == np.uinta:
__UpperCAmelCase = Image.fromarray(__a )
__UpperCAmelCase = pil_image.resize((neww, newh) , PILImageResampling.BILINEAR )
__UpperCAmelCase = np.asarray(__a )
else:
__UpperCAmelCase = img.permute(2 , 0 , 1 ).unsqueeze(0 ) # 3, 0, 1) # hw(c) -> nchw
__UpperCAmelCase = nn.functional.interpolate(
__a , (newh, neww) , mode=self.interp_method , align_corners=__a ).squeeze(0 )
img_augs.append(__a )
return img_augs
class A :
def __init__( self : Union[str, Any] , __a : Any ) -> Optional[int]:
__UpperCAmelCase = ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST] , cfg.INPUT.MAX_SIZE_TEST )
__UpperCAmelCase = cfg.INPUT.FORMAT
__UpperCAmelCase = cfg.SIZE_DIVISIBILITY
__UpperCAmelCase = cfg.PAD_VALUE
__UpperCAmelCase = cfg.INPUT.MAX_SIZE_TEST
__UpperCAmelCase = cfg.MODEL.DEVICE
__UpperCAmelCase = torch.tensor(cfg.MODEL.PIXEL_STD ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
__UpperCAmelCase = torch.tensor(cfg.MODEL.PIXEL_MEAN ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
__UpperCAmelCase = lambda __a : (x - self.pixel_mean) / self.pixel_std
def snake_case__ ( self : Optional[Any] , __a : Optional[Any] ) -> str:
__UpperCAmelCase = tuple(max(__a ) for s in zip(*[img.shape for img in images] ) )
__UpperCAmelCase = [im.shape[-2:] for im in images]
__UpperCAmelCase = [
nn.functional.pad(
__a , [0, max_size[-1] - size[1], 0, max_size[-2] - size[0]] , value=self.pad_value , )
for size, im in zip(__a , __a )
]
return torch.stack(__a ), torch.tensor(__a )
def __call__( self : Tuple , __a : Optional[int] , __a : Union[str, Any]=False ) -> Union[str, Any]:
with torch.no_grad():
if not isinstance(__a , __a ):
__UpperCAmelCase = [images]
if single_image:
assert len(__a ) == 1
for i in range(len(__a ) ):
if isinstance(images[i] , torch.Tensor ):
images.insert(__a , images.pop(__a ).to(self.device ).float() )
elif not isinstance(images[i] , torch.Tensor ):
images.insert(
__a , torch.as_tensor(img_tensorize(images.pop(__a ) , input_format=self.input_format ) )
.to(self.device )
.float() , )
# resize smallest edge
__UpperCAmelCase = torch.tensor([im.shape[:2] for im in images] )
__UpperCAmelCase = self.aug(__a )
# transpose images and convert to torch tensors
# images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images]
# now normalize before pad to avoid useless arithmetic
__UpperCAmelCase = [self.normalizer(__a ) for x in images]
# now pad them to do the following operations
__UpperCAmelCase , __UpperCAmelCase = self.pad(__a )
# Normalize
if self.size_divisibility > 0:
raise NotImplementedError()
# pad
__UpperCAmelCase = torch.true_divide(__a , __a )
if single_image:
return images[0], sizes[0], scales_yx[0]
else:
return images, sizes, scales_yx
def lowerCAmelCase ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Any ):
"""simple docstring"""
boxes[:, 0::2] *= scale_yx[:, 1]
boxes[:, 1::2] *= scale_yx[:, 0]
return boxes
def lowerCAmelCase ( UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Tuple[int, int] ):
"""simple docstring"""
assert torch.isfinite(UpperCamelCase__ ).all(), "Box tensor contains infinite or NaN!"
__UpperCAmelCase , __UpperCAmelCase = box_size
tensor[:, 0].clamp_(min=0 , max=UpperCamelCase__ )
tensor[:, 1].clamp_(min=0 , max=UpperCamelCase__ )
tensor[:, 2].clamp_(min=0 , max=UpperCamelCase__ )
tensor[:, 3].clamp_(min=0 , max=UpperCamelCase__ )
| 654 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__lowerCAmelCase : Optional[Any] = {
"configuration_llama": ["LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP", "LlamaConfig"],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : str = ["LlamaTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : str = ["LlamaTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Union[str, Any] = [
"LlamaForCausalLM",
"LlamaModel",
"LlamaPreTrainedModel",
"LlamaForSequenceClassification",
]
if TYPE_CHECKING:
from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama import LlamaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama_fast import LlamaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel
else:
import sys
__lowerCAmelCase : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 654 | 1 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
__lowerCAmelCase : Any = {
"Acehnese Arabic": "ace_Arab",
"Acehnese Latin": "ace_Latn",
"Mesopotamian Arabic": "acm_Arab",
"Ta'izzi-Adeni Arabic": "acq_Arab",
"Tunisian Arabic": "aeb_Arab",
"Afrikaans": "afr_Latn",
"South Levantine Arabic": "ajp_Arab",
"Akan": "aka_Latn",
"Amharic": "amh_Ethi",
"North Levantine Arabic": "apc_Arab",
"Modern Standard Arabic": "arb_Arab",
"Modern Standard Arabic Romanized": "arb_Latn",
"Najdi Arabic": "ars_Arab",
"Moroccan Arabic": "ary_Arab",
"Egyptian Arabic": "arz_Arab",
"Assamese": "asm_Beng",
"Asturian": "ast_Latn",
"Awadhi": "awa_Deva",
"Central Aymara": "ayr_Latn",
"South Azerbaijani": "azb_Arab",
"North Azerbaijani": "azj_Latn",
"Bashkir": "bak_Cyrl",
"Bambara": "bam_Latn",
"Balinese": "ban_Latn",
"Belarusian": "bel_Cyrl",
"Bemba": "bem_Latn",
"Bengali": "ben_Beng",
"Bhojpuri": "bho_Deva",
"Banjar Arabic": "bjn_Arab",
"Banjar Latin": "bjn_Latn",
"Standard Tibetan": "bod_Tibt",
"Bosnian": "bos_Latn",
"Buginese": "bug_Latn",
"Bulgarian": "bul_Cyrl",
"Catalan": "cat_Latn",
"Cebuano": "ceb_Latn",
"Czech": "ces_Latn",
"Chokwe": "cjk_Latn",
"Central Kurdish": "ckb_Arab",
"Crimean Tatar": "crh_Latn",
"Welsh": "cym_Latn",
"Danish": "dan_Latn",
"German": "deu_Latn",
"Southwestern Dinka": "dik_Latn",
"Dyula": "dyu_Latn",
"Dzongkha": "dzo_Tibt",
"Greek": "ell_Grek",
"English": "eng_Latn",
"Esperanto": "epo_Latn",
"Estonian": "est_Latn",
"Basque": "eus_Latn",
"Ewe": "ewe_Latn",
"Faroese": "fao_Latn",
"Fijian": "fij_Latn",
"Finnish": "fin_Latn",
"Fon": "fon_Latn",
"French": "fra_Latn",
"Friulian": "fur_Latn",
"Nigerian Fulfulde": "fuv_Latn",
"Scottish Gaelic": "gla_Latn",
"Irish": "gle_Latn",
"Galician": "glg_Latn",
"Guarani": "grn_Latn",
"Gujarati": "guj_Gujr",
"Haitian Creole": "hat_Latn",
"Hausa": "hau_Latn",
"Hebrew": "heb_Hebr",
"Hindi": "hin_Deva",
"Chhattisgarhi": "hne_Deva",
"Croatian": "hrv_Latn",
"Hungarian": "hun_Latn",
"Armenian": "hye_Armn",
"Igbo": "ibo_Latn",
"Ilocano": "ilo_Latn",
"Indonesian": "ind_Latn",
"Icelandic": "isl_Latn",
"Italian": "ita_Latn",
"Javanese": "jav_Latn",
"Japanese": "jpn_Jpan",
"Kabyle": "kab_Latn",
"Jingpho": "kac_Latn",
"Kamba": "kam_Latn",
"Kannada": "kan_Knda",
"Kashmiri Arabic": "kas_Arab",
"Kashmiri Devanagari": "kas_Deva",
"Georgian": "kat_Geor",
"Central Kanuri Arabic": "knc_Arab",
"Central Kanuri Latin": "knc_Latn",
"Kazakh": "kaz_Cyrl",
"Kabiyè": "kbp_Latn",
"Kabuverdianu": "kea_Latn",
"Khmer": "khm_Khmr",
"Kikuyu": "kik_Latn",
"Kinyarwanda": "kin_Latn",
"Kyrgyz": "kir_Cyrl",
"Kimbundu": "kmb_Latn",
"Northern Kurdish": "kmr_Latn",
"Kikongo": "kon_Latn",
"Korean": "kor_Hang",
"Lao": "lao_Laoo",
"Ligurian": "lij_Latn",
"Limburgish": "lim_Latn",
"Lingala": "lin_Latn",
"Lithuanian": "lit_Latn",
"Lombard": "lmo_Latn",
"Latgalian": "ltg_Latn",
"Luxembourgish": "ltz_Latn",
"Luba-Kasai": "lua_Latn",
"Ganda": "lug_Latn",
"Luo": "luo_Latn",
"Mizo": "lus_Latn",
"Standard Latvian": "lvs_Latn",
"Magahi": "mag_Deva",
"Maithili": "mai_Deva",
"Malayalam": "mal_Mlym",
"Marathi": "mar_Deva",
"Minangkabau Arabic ": "min_Arab",
"Minangkabau Latin": "min_Latn",
"Macedonian": "mkd_Cyrl",
"Plateau Malagasy": "plt_Latn",
"Maltese": "mlt_Latn",
"Meitei Bengali": "mni_Beng",
"Halh Mongolian": "khk_Cyrl",
"Mossi": "mos_Latn",
"Maori": "mri_Latn",
"Burmese": "mya_Mymr",
"Dutch": "nld_Latn",
"Norwegian Nynorsk": "nno_Latn",
"Norwegian Bokmål": "nob_Latn",
"Nepali": "npi_Deva",
"Northern Sotho": "nso_Latn",
"Nuer": "nus_Latn",
"Nyanja": "nya_Latn",
"Occitan": "oci_Latn",
"West Central Oromo": "gaz_Latn",
"Odia": "ory_Orya",
"Pangasinan": "pag_Latn",
"Eastern Panjabi": "pan_Guru",
"Papiamento": "pap_Latn",
"Western Persian": "pes_Arab",
"Polish": "pol_Latn",
"Portuguese": "por_Latn",
"Dari": "prs_Arab",
"Southern Pashto": "pbt_Arab",
"Ayacucho Quechua": "quy_Latn",
"Romanian": "ron_Latn",
"Rundi": "run_Latn",
"Russian": "rus_Cyrl",
"Sango": "sag_Latn",
"Sanskrit": "san_Deva",
"Santali": "sat_Olck",
"Sicilian": "scn_Latn",
"Shan": "shn_Mymr",
"Sinhala": "sin_Sinh",
"Slovak": "slk_Latn",
"Slovenian": "slv_Latn",
"Samoan": "smo_Latn",
"Shona": "sna_Latn",
"Sindhi": "snd_Arab",
"Somali": "som_Latn",
"Southern Sotho": "sot_Latn",
"Spanish": "spa_Latn",
"Tosk Albanian": "als_Latn",
"Sardinian": "srd_Latn",
"Serbian": "srp_Cyrl",
"Swati": "ssw_Latn",
"Sundanese": "sun_Latn",
"Swedish": "swe_Latn",
"Swahili": "swh_Latn",
"Silesian": "szl_Latn",
"Tamil": "tam_Taml",
"Tatar": "tat_Cyrl",
"Telugu": "tel_Telu",
"Tajik": "tgk_Cyrl",
"Tagalog": "tgl_Latn",
"Thai": "tha_Thai",
"Tigrinya": "tir_Ethi",
"Tamasheq Latin": "taq_Latn",
"Tamasheq Tifinagh": "taq_Tfng",
"Tok Pisin": "tpi_Latn",
"Tswana": "tsn_Latn",
"Tsonga": "tso_Latn",
"Turkmen": "tuk_Latn",
"Tumbuka": "tum_Latn",
"Turkish": "tur_Latn",
"Twi": "twi_Latn",
"Central Atlas Tamazight": "tzm_Tfng",
"Uyghur": "uig_Arab",
"Ukrainian": "ukr_Cyrl",
"Umbundu": "umb_Latn",
"Urdu": "urd_Arab",
"Northern Uzbek": "uzn_Latn",
"Venetian": "vec_Latn",
"Vietnamese": "vie_Latn",
"Waray": "war_Latn",
"Wolof": "wol_Latn",
"Xhosa": "xho_Latn",
"Eastern Yiddish": "ydd_Hebr",
"Yoruba": "yor_Latn",
"Yue Chinese": "yue_Hant",
"Chinese Simplified": "zho_Hans",
"Chinese Traditional": "zho_Hant",
"Standard Malay": "zsm_Latn",
"Zulu": "zul_Latn",
}
class A ( UpperCAmelCase ):
a_ = '''facebook/nllb-200-distilled-600M'''
a_ = (
'''This is a tool that translates text from a language to another. It takes three inputs: `text`, which should '''
'''be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, '''
'''which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in '''
'''plain English, such as \'Romanian\', or \'Albanian\'. It returns the text translated in `tgt_lang`.'''
)
a_ = '''translator'''
a_ = AutoTokenizer
a_ = AutoModelForSeqaSeqLM
a_ = LANGUAGE_CODES
a_ = ['''text''', '''text''', '''text''']
a_ = ['''text''']
def snake_case__ ( self : Any , __a : int , __a : List[str] , __a : Optional[Any] ) -> str:
if src_lang not in self.lang_to_code:
raise ValueError(f"""{src_lang} is not a supported language.""" )
if tgt_lang not in self.lang_to_code:
raise ValueError(f"""{tgt_lang} is not a supported language.""" )
__UpperCAmelCase = self.lang_to_code[src_lang]
__UpperCAmelCase = self.lang_to_code[tgt_lang]
return self.pre_processor._build_translation_inputs(
__a , return_tensors='''pt''' , src_lang=__a , tgt_lang=__a )
def snake_case__ ( self : Optional[Any] , __a : str ) -> Any:
return self.model.generate(**__a )
def snake_case__ ( self : Any , __a : Any ) -> Any:
return self.post_processor.decode(outputs[0].tolist() , skip_special_tokens=__a )
| 654 |
'''simple docstring'''
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def lowerCAmelCase ( UpperCamelCase__ : str , UpperCamelCase__ : str , UpperCamelCase__ : str , UpperCamelCase__ : PreTrainedTokenizer , UpperCamelCase__ : int , UpperCamelCase__ : Optional[int] = None , ):
"""simple docstring"""
__UpperCAmelCase = {}
if train_file is not None:
__UpperCAmelCase = [train_file]
if eval_file is not None:
__UpperCAmelCase = [eval_file]
if test_file is not None:
__UpperCAmelCase = [test_file]
__UpperCAmelCase = datasets.load_dataset('''csv''' , data_files=UpperCamelCase__ )
__UpperCAmelCase = list(ds[list(files.keys() )[0]].features.keys() )
__UpperCAmelCase = features_name.pop(UpperCamelCase__ )
__UpperCAmelCase = list(set(ds[list(files.keys() )[0]][label_name] ) )
__UpperCAmelCase = {label: i for i, label in enumerate(UpperCamelCase__ )}
__UpperCAmelCase = tokenizer.model_input_names
__UpperCAmelCase = {}
if len(UpperCamelCase__ ) == 1:
for k in files.keys():
__UpperCAmelCase = ds[k].map(
lambda UpperCamelCase__ : tokenizer.batch_encode_plus(
example[features_name[0]] , truncation=UpperCamelCase__ , max_length=UpperCamelCase__ , padding='''max_length''' ) , batched=UpperCamelCase__ , )
elif len(UpperCamelCase__ ) == 2:
for k in files.keys():
__UpperCAmelCase = ds[k].map(
lambda UpperCamelCase__ : tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]) , truncation=UpperCamelCase__ , max_length=UpperCamelCase__ , padding='''max_length''' , ) , batched=UpperCamelCase__ , )
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
__UpperCAmelCase = {k: v for k, v in ex.items() if k in input_names}
__UpperCAmelCase = labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
__UpperCAmelCase = {k: v for k, v in ex.items() if k in input_names}
__UpperCAmelCase = labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
__UpperCAmelCase = {k: v for k, v in ex.items() if k in input_names}
__UpperCAmelCase = labelaid[ex[label_name]]
yield (d, label)
__UpperCAmelCase = (
tf.data.Dataset.from_generator(
UpperCamelCase__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
__UpperCAmelCase = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) )
__UpperCAmelCase = (
tf.data.Dataset.from_generator(
UpperCamelCase__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
__UpperCAmelCase = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) )
__UpperCAmelCase = (
tf.data.Dataset.from_generator(
UpperCamelCase__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
__UpperCAmelCase = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) )
return train_ds, val_ds, test_ds, labelaid
__lowerCAmelCase : List[Any] = logging.getLogger(__name__)
@dataclass
class A :
a_ = field(metadata={'''help''': '''Which column contains the label'''} )
a_ = field(default=UpperCAmelCase , metadata={'''help''': '''The path of the training file'''} )
a_ = field(default=UpperCAmelCase , metadata={'''help''': '''The path of the development file'''} )
a_ = field(default=UpperCAmelCase , metadata={'''help''': '''The path of the test file'''} )
a_ = field(
default=1_2_8 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
a_ = field(
default=UpperCAmelCase , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
@dataclass
class A :
a_ = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
a_ = field(
default=UpperCAmelCase , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
a_ = field(
default=UpperCAmelCase , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
a_ = field(default=UpperCAmelCase , metadata={'''help''': '''Set this flag to use fast tokenization.'''} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
a_ = field(
default=UpperCAmelCase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
def lowerCAmelCase ( ):
"""simple docstring"""
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__UpperCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
''' --overwrite_output_dir to overcome.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO , )
logger.info(
f"""n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, """
f"""16-bits training: {training_args.fpaa}""" )
logger.info(f"""Training/evaluation parameters {training_args}""" )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__UpperCAmelCase = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = get_tfds(
train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=UpperCamelCase__ , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , )
__UpperCAmelCase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(UpperCamelCase__ ) , labelaid=UpperCamelCase__ , idalabel={id: label for label, id in labelaid.items()} , finetuning_task='''text-classification''' , cache_dir=model_args.cache_dir , )
with training_args.strategy.scope():
__UpperCAmelCase = TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_pt=bool('''.bin''' in model_args.model_name_or_path ) , config=UpperCamelCase__ , cache_dir=model_args.cache_dir , )
def compute_metrics(UpperCamelCase__ : EvalPrediction ) -> Dict:
__UpperCAmelCase = np.argmax(p.predictions , axis=1 )
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
__UpperCAmelCase = TFTrainer(
model=UpperCamelCase__ , args=UpperCamelCase__ , train_dataset=UpperCamelCase__ , eval_dataset=UpperCamelCase__ , compute_metrics=UpperCamelCase__ , )
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
__UpperCAmelCase = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
__UpperCAmelCase = trainer.evaluate()
__UpperCAmelCase = os.path.join(training_args.output_dir , '''eval_results.txt''' )
with open(UpperCamelCase__ , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key, value in result.items():
logger.info(f""" {key} = {value}""" )
writer.write(f"""{key} = {value}\n""" )
results.update(UpperCamelCase__ )
return results
if __name__ == "__main__":
main()
| 654 | 1 |
'''simple docstring'''
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import ScoreSdeVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class A ( UpperCAmelCase ):
a_ = 42
a_ = 42
def __init__( self : str , __a : UNetaDModel , __a : ScoreSdeVeScheduler ) -> Optional[int]:
super().__init__()
self.register_modules(unet=__a , scheduler=__a )
@torch.no_grad()
def __call__( self : Any , __a : int = 1 , __a : int = 2_0_0_0 , __a : Optional[Union[torch.Generator, List[torch.Generator]]] = None , __a : Optional[str] = "pil" , __a : bool = True , **__a : List[Any] , ) -> Union[ImagePipelineOutput, Tuple]:
__UpperCAmelCase = self.unet.config.sample_size
__UpperCAmelCase = (batch_size, 3, img_size, img_size)
__UpperCAmelCase = self.unet
__UpperCAmelCase = randn_tensor(__a , generator=__a ) * self.scheduler.init_noise_sigma
__UpperCAmelCase = sample.to(self.device )
self.scheduler.set_timesteps(__a )
self.scheduler.set_sigmas(__a )
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
__UpperCAmelCase = self.scheduler.sigmas[i] * torch.ones(shape[0] , device=self.device )
# correction step
for _ in range(self.scheduler.config.correct_steps ):
__UpperCAmelCase = self.unet(__a , __a ).sample
__UpperCAmelCase = self.scheduler.step_correct(__a , __a , generator=__a ).prev_sample
# prediction step
__UpperCAmelCase = model(__a , __a ).sample
__UpperCAmelCase = self.scheduler.step_pred(__a , __a , __a , generator=__a )
__UpperCAmelCase , __UpperCAmelCase = output.prev_sample, output.prev_sample_mean
__UpperCAmelCase = sample_mean.clamp(0 , 1 )
__UpperCAmelCase = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__UpperCAmelCase = self.numpy_to_pil(__a )
if not return_dict:
return (sample,)
return ImagePipelineOutput(images=__a )
| 654 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import DistilBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.distilbert.modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertModel,
)
class A :
def __init__( self : List[Any] , __a : Any , ) -> Dict:
__UpperCAmelCase = parent
__UpperCAmelCase = 1_3
__UpperCAmelCase = 7
__UpperCAmelCase = True
__UpperCAmelCase = True
__UpperCAmelCase = False
__UpperCAmelCase = True
__UpperCAmelCase = 9_9
__UpperCAmelCase = 3_2
__UpperCAmelCase = 2
__UpperCAmelCase = 4
__UpperCAmelCase = 3_7
__UpperCAmelCase = '''gelu'''
__UpperCAmelCase = 0.1
__UpperCAmelCase = 0.1
__UpperCAmelCase = 5_1_2
__UpperCAmelCase = 1_6
__UpperCAmelCase = 2
__UpperCAmelCase = 0.0_2
__UpperCAmelCase = 3
__UpperCAmelCase = 4
__UpperCAmelCase = None
def snake_case__ ( self : Optional[int] ) -> Dict:
__UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCAmelCase = None
if self.use_input_mask:
__UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCAmelCase = None
__UpperCAmelCase = None
__UpperCAmelCase = None
if self.use_labels:
__UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
__UpperCAmelCase = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case__ ( self : Union[str, Any] , __a : List[str] , __a : int , __a : Union[str, Any] , __a : Union[str, Any] , __a : List[Any] , __a : int ) -> Any:
__UpperCAmelCase = TFDistilBertModel(config=__a )
__UpperCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
__UpperCAmelCase = model(__a )
__UpperCAmelCase = [input_ids, input_mask]
__UpperCAmelCase = model(__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case__ ( self : Tuple , __a : List[Any] , __a : int , __a : Tuple , __a : List[Any] , __a : Union[str, Any] , __a : List[Any] ) -> int:
__UpperCAmelCase = TFDistilBertForMaskedLM(config=__a )
__UpperCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
__UpperCAmelCase = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case__ ( self : Optional[int] , __a : Any , __a : Union[str, Any] , __a : Optional[int] , __a : int , __a : Optional[Any] , __a : Optional[int] ) -> Dict:
__UpperCAmelCase = TFDistilBertForQuestionAnswering(config=__a )
__UpperCAmelCase = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
}
__UpperCAmelCase = model(__a )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def snake_case__ ( self : Any , __a : Optional[Any] , __a : List[str] , __a : Dict , __a : Dict , __a : int , __a : List[Any] ) -> Dict:
__UpperCAmelCase = self.num_labels
__UpperCAmelCase = TFDistilBertForSequenceClassification(__a )
__UpperCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
__UpperCAmelCase = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case__ ( self : Union[str, Any] , __a : int , __a : str , __a : Union[str, Any] , __a : Optional[int] , __a : List[str] , __a : Dict ) -> str:
__UpperCAmelCase = self.num_choices
__UpperCAmelCase = TFDistilBertForMultipleChoice(__a )
__UpperCAmelCase = tf.tile(tf.expand_dims(__a , 1 ) , (1, self.num_choices, 1) )
__UpperCAmelCase = tf.tile(tf.expand_dims(__a , 1 ) , (1, self.num_choices, 1) )
__UpperCAmelCase = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
}
__UpperCAmelCase = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def snake_case__ ( self : int , __a : Optional[Any] , __a : int , __a : Tuple , __a : int , __a : Optional[int] , __a : Optional[int] ) -> int:
__UpperCAmelCase = self.num_labels
__UpperCAmelCase = TFDistilBertForTokenClassification(__a )
__UpperCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
__UpperCAmelCase = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def snake_case__ ( self : str ) -> Any:
__UpperCAmelCase = self.prepare_config_and_inputs()
((__UpperCAmelCase) , (__UpperCAmelCase) , (__UpperCAmelCase) , (__UpperCAmelCase) , (__UpperCAmelCase) , (__UpperCAmelCase)) = config_and_inputs
__UpperCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class A ( UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
a_ = (
(
TFDistilBertModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertForMultipleChoice,
)
if is_tf_available()
else None
)
a_ = (
{
'''feature-extraction''': TFDistilBertModel,
'''fill-mask''': TFDistilBertForMaskedLM,
'''question-answering''': TFDistilBertForQuestionAnswering,
'''text-classification''': TFDistilBertForSequenceClassification,
'''token-classification''': TFDistilBertForTokenClassification,
'''zero-shot''': TFDistilBertForSequenceClassification,
}
if is_tf_available()
else {}
)
a_ = False
a_ = False
def snake_case__ ( self : Any ) -> Any:
__UpperCAmelCase = TFDistilBertModelTester(self )
__UpperCAmelCase = ConfigTester(self , config_class=__a , dim=3_7 )
def snake_case__ ( self : List[Any] ) -> Optional[int]:
self.config_tester.run_common_tests()
def snake_case__ ( self : Any ) -> str:
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*__a )
def snake_case__ ( self : Tuple ) -> Dict:
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*__a )
def snake_case__ ( self : Union[str, Any] ) -> Any:
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*__a )
def snake_case__ ( self : Optional[Any] ) -> Dict:
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*__a )
def snake_case__ ( self : Any ) -> int:
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*__a )
def snake_case__ ( self : List[str] ) -> List[Any]:
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*__a )
@slow
def snake_case__ ( self : Dict ) -> Tuple:
for model_name in list(TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1] ):
__UpperCAmelCase = TFDistilBertModel.from_pretrained(__a )
self.assertIsNotNone(__a )
@require_tf
class A ( unittest.TestCase ):
@slow
def snake_case__ ( self : int ) -> Dict:
__UpperCAmelCase = TFDistilBertModel.from_pretrained('''distilbert-base-uncased''' )
__UpperCAmelCase = tf.constant([[0, 1, 2, 3, 4, 5]] )
__UpperCAmelCase = model(__a )[0]
__UpperCAmelCase = [1, 6, 7_6_8]
self.assertEqual(output.shape , __a )
__UpperCAmelCase = tf.constant(
[
[
[0.1_9_2_6_1_8_8_5, -0.1_3_7_3_2_9_5_5, 0.4_1_1_9_7_9_9],
[0.2_2_1_5_0_1_5_6, -0.0_7_4_2_2_6_6_1, 0.3_9_0_3_7_2_0_4],
[0.2_2_7_5_6_0_1_8, -0.0_8_9_6_4_1_4, 0.3_7_0_1_4_6_7],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , __a , atol=1e-4 )
| 654 | 1 |
'''simple docstring'''
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin, SchedulerOutput
@dataclass
class A ( UpperCAmelCase ):
a_ = 42
a_ = 42
class A ( UpperCAmelCase , UpperCAmelCase ):
a_ = 1
@register_to_config
def __init__( self : List[str] , __a : int = 2_0_0_0 , __a : float = 0.1_5 , __a : float = 0.0_1 , __a : float = 1_3_4_8.0 , __a : float = 1e-5 , __a : int = 1 , ) -> Tuple:
# standard deviation of the initial noise distribution
__UpperCAmelCase = sigma_max
# setable values
__UpperCAmelCase = None
self.set_sigmas(__a , __a , __a , __a )
def snake_case__ ( self : List[Any] , __a : torch.FloatTensor , __a : Optional[int] = None ) -> torch.FloatTensor:
return sample
def snake_case__ ( self : Union[str, Any] , __a : int , __a : float = None , __a : Union[str, torch.device] = None ) -> Tuple:
__UpperCAmelCase = sampling_eps if sampling_eps is not None else self.config.sampling_eps
__UpperCAmelCase = torch.linspace(1 , __a , __a , device=__a )
def snake_case__ ( self : Optional[Any] , __a : int , __a : float = None , __a : float = None , __a : float = None ) -> int:
__UpperCAmelCase = sigma_min if sigma_min is not None else self.config.sigma_min
__UpperCAmelCase = sigma_max if sigma_max is not None else self.config.sigma_max
__UpperCAmelCase = sampling_eps if sampling_eps is not None else self.config.sampling_eps
if self.timesteps is None:
self.set_timesteps(__a , __a )
__UpperCAmelCase = sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps)
__UpperCAmelCase = torch.exp(torch.linspace(math.log(__a ) , math.log(__a ) , __a ) )
__UpperCAmelCase = torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps] )
def snake_case__ ( self : Any , __a : Union[str, Any] , __a : Dict ) -> Any:
return torch.where(
timesteps == 0 , torch.zeros_like(t.to(timesteps.device ) ) , self.discrete_sigmas[timesteps - 1].to(timesteps.device ) , )
def snake_case__ ( self : int , __a : torch.FloatTensor , __a : int , __a : torch.FloatTensor , __a : Optional[torch.Generator] = None , __a : bool = True , ) -> Union[SdeVeOutput, Tuple]:
if self.timesteps is None:
raise ValueError(
'''`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler''' )
__UpperCAmelCase = timestep * torch.ones(
sample.shape[0] , device=sample.device ) # torch.repeat_interleave(timestep, sample.shape[0])
__UpperCAmelCase = (timestep * (len(self.timesteps ) - 1)).long()
# mps requires indices to be in the same device, so we use cpu as is the default with cuda
__UpperCAmelCase = timesteps.to(self.discrete_sigmas.device )
__UpperCAmelCase = self.discrete_sigmas[timesteps].to(sample.device )
__UpperCAmelCase = self.get_adjacent_sigma(__a , __a ).to(sample.device )
__UpperCAmelCase = torch.zeros_like(__a )
__UpperCAmelCase = (sigma**2 - adjacent_sigma**2) ** 0.5
# equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x)
# also equation 47 shows the analog from SDE models to ancestral sampling methods
__UpperCAmelCase = diffusion.flatten()
while len(diffusion.shape ) < len(sample.shape ):
__UpperCAmelCase = diffusion.unsqueeze(-1 )
__UpperCAmelCase = drift - diffusion**2 * model_output
# equation 6: sample noise for the diffusion term of
__UpperCAmelCase = randn_tensor(
sample.shape , layout=sample.layout , generator=__a , device=sample.device , dtype=sample.dtype )
__UpperCAmelCase = sample - drift # subtract because `dt` is a small negative timestep
# TODO is the variable diffusion the correct scaling term for the noise?
__UpperCAmelCase = prev_sample_mean + diffusion * noise # add impact of diffusion field g
if not return_dict:
return (prev_sample, prev_sample_mean)
return SdeVeOutput(prev_sample=__a , prev_sample_mean=__a )
def snake_case__ ( self : Union[str, Any] , __a : torch.FloatTensor , __a : torch.FloatTensor , __a : Optional[torch.Generator] = None , __a : bool = True , ) -> Union[SchedulerOutput, Tuple]:
if self.timesteps is None:
raise ValueError(
'''`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler''' )
# For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z"
# sample noise for correction
__UpperCAmelCase = randn_tensor(sample.shape , layout=sample.layout , generator=__a ).to(sample.device )
# compute step size from the model_output, the noise, and the snr
__UpperCAmelCase = torch.norm(model_output.reshape(model_output.shape[0] , -1 ) , dim=-1 ).mean()
__UpperCAmelCase = torch.norm(noise.reshape(noise.shape[0] , -1 ) , dim=-1 ).mean()
__UpperCAmelCase = (self.config.snr * noise_norm / grad_norm) ** 2 * 2
__UpperCAmelCase = step_size * torch.ones(sample.shape[0] ).to(sample.device )
# self.repeat_scalar(step_size, sample.shape[0])
# compute corrected sample: model_output term and noise term
__UpperCAmelCase = step_size.flatten()
while len(step_size.shape ) < len(sample.shape ):
__UpperCAmelCase = step_size.unsqueeze(-1 )
__UpperCAmelCase = sample + step_size * model_output
__UpperCAmelCase = prev_sample_mean + ((step_size * 2) ** 0.5) * noise
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=__a )
def snake_case__ ( self : List[Any] , __a : torch.FloatTensor , __a : torch.FloatTensor , __a : torch.FloatTensor , ) -> torch.FloatTensor:
# Make sure sigmas and timesteps have the same device and dtype as original_samples
__UpperCAmelCase = timesteps.to(original_samples.device )
__UpperCAmelCase = self.discrete_sigmas.to(original_samples.device )[timesteps]
__UpperCAmelCase = (
noise * sigmas[:, None, None, None]
if noise is not None
else torch.randn_like(__a ) * sigmas[:, None, None, None]
)
__UpperCAmelCase = noise + original_samples
return noisy_samples
def __len__( self : Optional[int] ) -> List[Any]:
return self.config.num_train_timesteps
| 654 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_speech_available, is_torch_available
__lowerCAmelCase : List[Any] = {
"configuration_audio_spectrogram_transformer": [
"AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"ASTConfig",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : str = [
"AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"ASTForAudioClassification",
"ASTModel",
"ASTPreTrainedModel",
]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Union[str, Any] = ["ASTFeatureExtractor"]
if TYPE_CHECKING:
from .configuration_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
ASTConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ASTForAudioClassification,
ASTModel,
ASTPreTrainedModel,
)
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_audio_spectrogram_transformer import ASTFeatureExtractor
else:
import sys
__lowerCAmelCase : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 654 | 1 |
'''simple docstring'''
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
__lowerCAmelCase : Dict = logging.get_logger(__name__)
class A ( UpperCAmelCase ):
a_ = ['''input_features''']
def __init__( self : List[str] , __a : Union[str, Any]=8_0 , __a : List[Any]=1_6_0_0_0 , __a : Any=1_6_0 , __a : Any=3_0 , __a : Union[str, Any]=4_0_0 , __a : Tuple=0.0 , __a : Tuple=False , **__a : Any , ) -> Union[str, Any]:
super().__init__(
feature_size=__a , sampling_rate=__a , padding_value=__a , return_attention_mask=__a , **__a , )
__UpperCAmelCase = n_fft
__UpperCAmelCase = hop_length
__UpperCAmelCase = chunk_length
__UpperCAmelCase = chunk_length * sampling_rate
__UpperCAmelCase = self.n_samples // hop_length
__UpperCAmelCase = sampling_rate
__UpperCAmelCase = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=__a , min_frequency=0.0 , max_frequency=8_0_0_0.0 , sampling_rate=__a , norm='''slaney''' , mel_scale='''slaney''' , )
def snake_case__ ( self : Optional[int] , __a : np.array ) -> np.ndarray:
__UpperCAmelCase = spectrogram(
__a , window_function(self.n_fft , '''hann''' ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters , log_mel='''log10''' , )
__UpperCAmelCase = log_spec[:, :-1]
__UpperCAmelCase = np.maximum(__a , log_spec.max() - 8.0 )
__UpperCAmelCase = (log_spec + 4.0) / 4.0
return log_spec
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def snake_case__ ( __a : List[np.ndarray] , __a : List[np.ndarray] , __a : float = 0.0 ) -> List[np.ndarray]:
if attention_mask is not None:
__UpperCAmelCase = np.array(__a , np.intaa )
__UpperCAmelCase = []
for vector, length in zip(__a , attention_mask.sum(-1 ) ):
__UpperCAmelCase = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1e-7 )
if length < normed_slice.shape[0]:
__UpperCAmelCase = padding_value
normed_input_values.append(__a )
else:
__UpperCAmelCase = [(x - x.mean()) / np.sqrt(x.var() + 1e-7 ) for x in input_values]
return normed_input_values
def __call__( self : Tuple , __a : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , __a : bool = True , __a : Optional[int] = None , __a : Optional[Union[str, TensorType]] = None , __a : Optional[bool] = None , __a : Optional[str] = "max_length" , __a : Optional[int] = None , __a : Optional[int] = None , __a : Optional[bool] = None , **__a : Optional[Any] , ) -> BatchFeature:
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"""The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a"""
f""" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input"""
f""" was sampled with {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
__UpperCAmelCase = isinstance(__a , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f"""Only mono-channel audio is supported for input to {self}""" )
__UpperCAmelCase = is_batched_numpy or (
isinstance(__a , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
__UpperCAmelCase = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(__a , np.ndarray ):
__UpperCAmelCase = np.asarray(__a , dtype=np.floataa )
elif isinstance(__a , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
__UpperCAmelCase = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
__UpperCAmelCase = [np.asarray([raw_speech] ).T]
__UpperCAmelCase = BatchFeature({'''input_features''': raw_speech} )
# convert into correct format for padding
__UpperCAmelCase = self.pad(
__a , padding=__a , max_length=max_length if max_length else self.n_samples , truncation=__a , pad_to_multiple_of=__a , return_attention_mask=return_attention_mask or do_normalize , )
# zero-mean and unit-variance normalization
if do_normalize:
__UpperCAmelCase = self.zero_mean_unit_var_norm(
padded_inputs['''input_features'''] , attention_mask=padded_inputs['''attention_mask'''] , padding_value=self.padding_value , )
__UpperCAmelCase = np.stack(padded_inputs['''input_features'''] , axis=0 )
# make sure list is in array format
__UpperCAmelCase = padded_inputs.get('''input_features''' ).transpose(2 , 0 , 1 )
__UpperCAmelCase = [self._np_extract_fbank_features(__a ) for waveform in input_features[0]]
if isinstance(input_features[0] , __a ):
__UpperCAmelCase = [np.asarray(__a , dtype=np.floataa ) for feature in input_features]
else:
__UpperCAmelCase = input_features
if return_attention_mask:
# rescale from sample (48000) to feature (3000)
__UpperCAmelCase = padded_inputs['''attention_mask'''][:, :: self.hop_length]
if return_tensors is not None:
__UpperCAmelCase = padded_inputs.convert_to_tensors(__a )
return padded_inputs
def snake_case__ ( self : List[str] ) -> Dict[str, Any]:
__UpperCAmelCase = copy.deepcopy(self.__dict__ )
__UpperCAmelCase = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
return output
| 654 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
class A ( UpperCAmelCase ):
a_ = '''bert-generation'''
def __init__( self : str , __a : str=5_0_3_5_8 , __a : int=1_0_2_4 , __a : Optional[Any]=2_4 , __a : Any=1_6 , __a : int=4_0_9_6 , __a : Any="gelu" , __a : Union[str, Any]=0.1 , __a : Any=0.1 , __a : Union[str, Any]=5_1_2 , __a : int=0.0_2 , __a : str=1e-12 , __a : List[str]=0 , __a : Optional[int]=2 , __a : Tuple=1 , __a : str="absolute" , __a : Optional[Any]=True , **__a : Tuple , ) -> Any:
super().__init__(pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , **__a )
__UpperCAmelCase = vocab_size
__UpperCAmelCase = hidden_size
__UpperCAmelCase = num_hidden_layers
__UpperCAmelCase = num_attention_heads
__UpperCAmelCase = hidden_act
__UpperCAmelCase = intermediate_size
__UpperCAmelCase = hidden_dropout_prob
__UpperCAmelCase = attention_probs_dropout_prob
__UpperCAmelCase = max_position_embeddings
__UpperCAmelCase = initializer_range
__UpperCAmelCase = layer_norm_eps
__UpperCAmelCase = position_embedding_type
__UpperCAmelCase = use_cache
| 654 | 1 |
'''simple docstring'''
import gc
import importlib.metadata
import tempfile
import unittest
from packaging import version
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoTokenizer,
BitsAndBytesConfig,
pipeline,
)
from transformers.testing_utils import (
is_torch_available,
require_accelerate,
require_bitsandbytes,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
def lowerCAmelCase ( UpperCamelCase__ : List[str] ):
"""simple docstring"""
if model.config.model_type == "gpt2":
return model.transformer.h[0].mlp.c_fc
return model.transformer.h[0].mlp.dense_ah_to_h
if is_torch_available():
import torch
import torch.nn as nn
class A ( nn.Module ):
def __init__( self : Tuple , __a : nn.Module , __a : int ) -> Optional[int]:
super().__init__()
__UpperCAmelCase = module
__UpperCAmelCase = nn.Sequential(
nn.Linear(module.in_features , __a , bias=__a ) , nn.Linear(__a , module.out_features , bias=__a ) , )
__UpperCAmelCase = (2.0 / (5 * min(module.in_features , module.out_features ))) ** 0.5
nn.init.normal_(self.adapter[0].weight , std=__a )
nn.init.zeros_(self.adapter[1].weight )
self.adapter.to(module.weight.device )
def snake_case__ ( self : List[Any] , __a : List[Any] , *__a : Tuple , **__a : Dict ) -> str:
return self.module(__a , *__a , **__a ) + self.adapter(__a )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class A ( unittest.TestCase ):
# We keep the constants inside the init function and model loading inside setUp function
# We need to test on relatively large models (aka >1b parameters otherwise the quantiztion may not work as expected)
# Therefore here we use only bloom-1b3 to test our module
a_ = '''bigscience/bloom-1b7'''
# Constant values
a_ = 2.109_659_552_692_574
a_ = '''Hello my name is'''
a_ = set()
EXPECTED_OUTPUTS.add('''Hello my name is John and I am a professional photographer. I''' )
EXPECTED_OUTPUTS.add('''Hello my name is John.\nI am a friend of your father.\n''' )
EXPECTED_OUTPUTS.add('''Hello my name is John Doe, I am a student at the University''' )
a_ = 1_0
def snake_case__ ( self : str ) -> str:
# Models and tokenizer
__UpperCAmelCase = AutoTokenizer.from_pretrained(self.model_name )
class A ( UpperCAmelCase ):
def snake_case__ ( self : Dict ) -> List[str]:
super().setUp()
# Models and tokenizer
__UpperCAmelCase = AutoModelForCausalLM.from_pretrained(
self.model_name , torch_dtype=torch.floataa , device_map='''auto''' )
__UpperCAmelCase = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=__a , device_map='''auto''' )
def snake_case__ ( self : str ) -> Tuple:
del self.model_fpaa
del self.model_abit
gc.collect()
torch.cuda.empty_cache()
def snake_case__ ( self : Optional[int] ) -> List[str]:
__UpperCAmelCase = self.model_abit.config
self.assertTrue(hasattr(__a , '''quantization_config''' ) )
__UpperCAmelCase = config.to_dict()
__UpperCAmelCase = config.to_diff_dict()
__UpperCAmelCase = config.to_json_string()
def snake_case__ ( self : Tuple ) -> Any:
from bitsandbytes.nn import Paramsabit
__UpperCAmelCase = self.model_fpaa.get_memory_footprint()
__UpperCAmelCase = self.model_abit.get_memory_footprint()
self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE )
__UpperCAmelCase = get_some_linear_layer(self.model_abit )
self.assertTrue(linear.weight.__class__ == Paramsabit )
def snake_case__ ( self : Optional[Any] ) -> Optional[Any]:
from transformers import TaPreTrainedModel
self.model_fpaa.get_memory_footprint()
self.model_abit.get_memory_footprint()
for name, module in self.model_abit.named_modules():
if isinstance(__a , torch.nn.Linear ):
if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules:
# 4-bit parameters are packed in uint8 variables
self.assertTrue(module.weight.dtype == torch.uinta )
def snake_case__ ( self : List[Any] ) -> Dict:
__UpperCAmelCase = self.tokenizer(self.input_text , return_tensors='''pt''' )
__UpperCAmelCase = self.model_abit.generate(input_ids=encoded_input['''input_ids'''].to(0 ) , max_new_tokens=1_0 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=__a ) , self.EXPECTED_OUTPUTS )
def snake_case__ ( self : Union[str, Any] ) -> str:
__UpperCAmelCase = BitsAndBytesConfig()
__UpperCAmelCase = True
__UpperCAmelCase = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=__a , device_map='''auto''' )
__UpperCAmelCase = self.tokenizer(self.input_text , return_tensors='''pt''' )
__UpperCAmelCase = model_abit_from_config.generate(
input_ids=encoded_input['''input_ids'''].to(0 ) , max_new_tokens=1_0 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=__a ) , self.EXPECTED_OUTPUTS )
def snake_case__ ( self : Optional[Any] ) -> str:
with self.assertRaises(__a ), tempfile.TemporaryDirectory() as tmpdirname:
self.model_abit.save_pretrained(__a )
def snake_case__ ( self : List[Any] ) -> Optional[int]:
__UpperCAmelCase = BitsAndBytesConfig()
with self.assertRaises(__a ):
__UpperCAmelCase = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=__a , load_in_abit=__a , device_map='''auto''' , bnb_abit_quant_type='''nf4''' , )
def snake_case__ ( self : List[Any] ) -> Optional[int]:
with self.assertRaises(__a ):
# Tries with `str`
self.model_abit.to('''cpu''' )
with self.assertRaises(__a ):
# Tries with a `dtype``
self.model_abit.to(torch.floataa )
with self.assertRaises(__a ):
# Tries with a `device`
self.model_abit.to(torch.device('''cuda:0''' ) )
with self.assertRaises(__a ):
# Tries with a `device`
self.model_abit.float()
with self.assertRaises(__a ):
# Tries with a `device`
self.model_abit.half()
# Test if we did not break anything
__UpperCAmelCase = self.tokenizer(self.input_text , return_tensors='''pt''' )
__UpperCAmelCase = self.model_fpaa.to(torch.floataa )
__UpperCAmelCase = self.model_fpaa.generate(input_ids=encoded_input['''input_ids'''].to(0 ) , max_new_tokens=1_0 )
# Check this does not throw an error
__UpperCAmelCase = self.model_fpaa.to('''cpu''' )
# Check this does not throw an error
__UpperCAmelCase = self.model_fpaa.half()
# Check this does not throw an error
__UpperCAmelCase = self.model_fpaa.float()
def snake_case__ ( self : str ) -> Dict:
__UpperCAmelCase = AutoModelForSeqaSeqLM.from_pretrained('''t5-small''' , load_in_abit=__a , device_map='''auto''' )
self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class A ( unittest.TestCase ):
@classmethod
def snake_case__ ( cls : int ) -> Any:
__UpperCAmelCase = '''t5-small'''
__UpperCAmelCase = '''google/flan-t5-small''' # flan-t5 uses dense-act instead of dense-relu-dense
__UpperCAmelCase = AutoTokenizer.from_pretrained(cls.model_name )
__UpperCAmelCase = '''Translate in German: Hello, my dog is cute'''
def snake_case__ ( self : Dict ) -> str:
gc.collect()
torch.cuda.empty_cache()
def snake_case__ ( self : List[Any] ) -> Dict:
from transformers import TaForConditionalGeneration
__UpperCAmelCase = TaForConditionalGeneration._keep_in_fpaa_modules
__UpperCAmelCase = None
# test with `t5-small`
__UpperCAmelCase = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=__a , device_map='''auto''' )
__UpperCAmelCase = self.tokenizer(self.input_text , return_tensors='''pt''' ).to(0 )
__UpperCAmelCase = model.generate(**__a )
# test with `flan-t5-small`
__UpperCAmelCase = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=__a , device_map='''auto''' )
__UpperCAmelCase = self.tokenizer(self.input_text , return_tensors='''pt''' ).to(0 )
__UpperCAmelCase = model.generate(**__a )
__UpperCAmelCase = modules
def snake_case__ ( self : str ) -> List[Any]:
import bitsandbytes as bnb
from transformers import TaForConditionalGeneration
# test with `t5-small`
__UpperCAmelCase = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=__a , device_map='''auto''' )
# there was a bug with decoders - this test checks that it is fixed
self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit ) )
__UpperCAmelCase = self.tokenizer(self.input_text , return_tensors='''pt''' ).to(0 )
__UpperCAmelCase = model.generate(**__a )
# test with `flan-t5-small`
__UpperCAmelCase = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=__a , device_map='''auto''' )
__UpperCAmelCase = self.tokenizer(self.input_text , return_tensors='''pt''' ).to(0 )
__UpperCAmelCase = model.generate(**__a )
class A ( UpperCAmelCase ):
def snake_case__ ( self : str ) -> List[Any]:
super().setUp()
# model_name
__UpperCAmelCase = '''bigscience/bloom-560m'''
__UpperCAmelCase = '''t5-small'''
# Different types of model
__UpperCAmelCase = AutoModel.from_pretrained(self.model_name , load_in_abit=__a , device_map='''auto''' )
# Sequence classification model
__UpperCAmelCase = AutoModelForSequenceClassification.from_pretrained(
self.model_name , load_in_abit=__a , device_map='''auto''' )
# CausalLM model
__UpperCAmelCase = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=__a , device_map='''auto''' )
# Seq2seq model
__UpperCAmelCase = AutoModelForSeqaSeqLM.from_pretrained(
self.seq_to_seq_name , load_in_abit=__a , device_map='''auto''' )
def snake_case__ ( self : Any ) -> int:
del self.base_model
del self.sequence_model
del self.model_abit
del self.seq_to_seq_model
gc.collect()
torch.cuda.empty_cache()
def snake_case__ ( self : str ) -> Optional[int]:
from bitsandbytes.nn import Paramsabit
self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit )
# Other heads should be nn.Parameter
self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter )
class A ( UpperCAmelCase ):
def snake_case__ ( self : str ) -> Optional[Any]:
super().setUp()
def snake_case__ ( self : Optional[int] ) -> Union[str, Any]:
del self.pipe
gc.collect()
torch.cuda.empty_cache()
def snake_case__ ( self : int ) -> List[str]:
__UpperCAmelCase = pipeline(
'''text-generation''' , model=self.model_name , model_kwargs={'''device_map''': '''auto''', '''load_in_4bit''': True, '''torch_dtype''': torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , )
# Real second forward pass
__UpperCAmelCase = self.pipe(self.input_text )
self.assertIn(pipeline_output[0]['''generated_text'''] , self.EXPECTED_OUTPUTS )
@require_torch_multi_gpu
class A ( UpperCAmelCase ):
def snake_case__ ( self : int ) -> int:
super().setUp()
def snake_case__ ( self : Dict ) -> Any:
__UpperCAmelCase = AutoModelForCausalLM.from_pretrained(
self.model_name , load_in_abit=__a , device_map='''balanced''' )
# Check correct device map
self.assertEqual(set(model_parallel.hf_device_map.values() ) , {0, 1} )
# Check that inference pass works on the model
__UpperCAmelCase = self.tokenizer(self.input_text , return_tensors='''pt''' )
# Second real batch
__UpperCAmelCase = model_parallel.generate(input_ids=encoded_input['''input_ids'''].to(0 ) , max_new_tokens=1_0 )
self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=__a ) , self.EXPECTED_OUTPUTS )
class A ( UpperCAmelCase ):
def snake_case__ ( self : Union[str, Any] ) -> int:
__UpperCAmelCase = '''facebook/opt-350m'''
super().setUp()
def snake_case__ ( self : Optional[Any] ) -> List[str]:
if version.parse(importlib.metadata.version('''bitsandbytes''' ) ) < version.parse('''0.37.0''' ):
return
# Step 1: freeze all parameters
__UpperCAmelCase = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=__a )
self.assertEqual(set(model.hf_device_map.values() ) , {torch.cuda.current_device()} )
for param in model.parameters():
__UpperCAmelCase = False # freeze the model - train adapters later
if param.ndim == 1:
# cast the small parameters (e.g. layernorm) to fp32 for stability
__UpperCAmelCase = param.data.to(torch.floataa )
# Step 2: add adapters
for _, module in model.named_modules():
if "OPTAttention" in repr(type(__a ) ):
__UpperCAmelCase = LoRALayer(module.q_proj , rank=1_6 )
__UpperCAmelCase = LoRALayer(module.k_proj , rank=1_6 )
__UpperCAmelCase = LoRALayer(module.v_proj , rank=1_6 )
# Step 3: dummy batch
__UpperCAmelCase = self.tokenizer('''Test batch ''' , return_tensors='''pt''' ).to(0 )
# Step 4: Check if the gradient is not None
with torch.cuda.amp.autocast():
__UpperCAmelCase = model.forward(**__a )
out.logits.norm().backward()
for module in model.modules():
if isinstance(__a , __a ):
self.assertTrue(module.adapter[1].weight.grad is not None )
self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 )
elif isinstance(__a , nn.Embedding ):
self.assertTrue(module.weight.grad is None )
class A ( UpperCAmelCase ):
a_ = '''gpt2-xl'''
a_ = 3.3_191_854_854_152_187
| 654 |
'''simple docstring'''
from math import sqrt
import numpy as np
from sympy import symbols
# Coefficient
# Speed of light (m/s)
__lowerCAmelCase : str = 299_792_458
# Symbols
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : Any = symbols("ct x y z")
def lowerCAmelCase ( UpperCamelCase__ : float ):
"""simple docstring"""
if velocity > c:
raise ValueError('''Speed must not exceed light speed 299,792,458 [m/s]!''' )
elif velocity < 1:
# Usually the speed should be much higher than 1 (c order of magnitude)
raise ValueError('''Speed must be greater than or equal to 1!''' )
return velocity / c
def lowerCAmelCase ( UpperCamelCase__ : float ):
"""simple docstring"""
return 1 / sqrt(1 - beta(UpperCamelCase__ ) ** 2 )
def lowerCAmelCase ( UpperCamelCase__ : float ):
"""simple docstring"""
return np.array(
[
[gamma(UpperCamelCase__ ), -gamma(UpperCamelCase__ ) * beta(UpperCamelCase__ ), 0, 0],
[-gamma(UpperCamelCase__ ) * beta(UpperCamelCase__ ), gamma(UpperCamelCase__ ), 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1],
] )
def lowerCAmelCase ( UpperCamelCase__ : float , UpperCamelCase__ : np.ndarray | None = None ):
"""simple docstring"""
# Ensure event is not empty
if event is None:
__UpperCAmelCase = np.array([ct, x, y, z] ) # Symbolic four vector
else:
event[0] *= c # x0 is ct (speed of light * time)
return transformation_matrix(UpperCamelCase__ ) @ event
if __name__ == "__main__":
import doctest
doctest.testmod()
# Example of symbolic vector:
__lowerCAmelCase : Dict = transform(29_979_245)
print("Example of four vector: ")
print(F"""ct' = {four_vector[0]}""")
print(F"""x' = {four_vector[1]}""")
print(F"""y' = {four_vector[2]}""")
print(F"""z' = {four_vector[3]}""")
# Substitute symbols with numerical values
__lowerCAmelCase : Union[str, Any] = {ct: c, x: 1, y: 1, z: 1}
__lowerCAmelCase : Optional[int] = [four_vector[i].subs(sub_dict) for i in range(4)]
print(F"""\n{numerical_vector}""")
| 654 | 1 |
'''simple docstring'''
def lowerCAmelCase ( UpperCamelCase__ : int = 3 , UpperCamelCase__ : int = 7 , UpperCamelCase__ : int = 1_0_0_0_0_0_0 ):
"""simple docstring"""
__UpperCAmelCase = 0
__UpperCAmelCase = 1
for current_denominator in range(1 , limit + 1 ):
__UpperCAmelCase = current_denominator * numerator // denominator
if current_denominator % denominator == 0:
current_numerator -= 1
if current_numerator * max_denominator > current_denominator * max_numerator:
__UpperCAmelCase = current_numerator
__UpperCAmelCase = current_denominator
return max_numerator
if __name__ == "__main__":
print(solution(numerator=3, denominator=7, limit=1_000_000))
| 654 |
'''simple docstring'''
import heapq
import sys
import numpy as np
__lowerCAmelCase : Any = tuple[int, int]
class A :
def __init__( self : Optional[int] ) -> int:
__UpperCAmelCase = []
__UpperCAmelCase = set()
def snake_case__ ( self : Optional[Any] ) -> List[Any]:
if not self.empty():
return self.elements[0][0]
else:
return float('''inf''' )
def snake_case__ ( self : Dict ) -> Optional[int]:
return len(self.elements ) == 0
def snake_case__ ( self : Optional[int] , __a : Optional[Any] , __a : Dict ) -> Optional[Any]:
if item not in self.set:
heapq.heappush(self.elements , (priority, item) )
self.set.add(__a )
else:
# update
# print("update", item)
__UpperCAmelCase = []
((__UpperCAmelCase) , (__UpperCAmelCase)) = heapq.heappop(self.elements )
while x != item:
temp.append((pri, x) )
((__UpperCAmelCase) , (__UpperCAmelCase)) = heapq.heappop(self.elements )
temp.append((priority, item) )
for pro, xxx in temp:
heapq.heappush(self.elements , (pro, xxx) )
def snake_case__ ( self : int , __a : Any ) -> int:
if item in self.set:
self.set.remove(__a )
__UpperCAmelCase = []
((__UpperCAmelCase) , (__UpperCAmelCase)) = heapq.heappop(self.elements )
while x != item:
temp.append((pro, x) )
((__UpperCAmelCase) , (__UpperCAmelCase)) = heapq.heappop(self.elements )
for prito, yyy in temp:
heapq.heappush(self.elements , (prito, yyy) )
def snake_case__ ( self : List[str] ) -> Dict:
return self.elements[0][1]
def snake_case__ ( self : Any ) -> List[str]:
((__UpperCAmelCase) , (__UpperCAmelCase)) = heapq.heappop(self.elements )
self.set.remove(__a )
return (priority, item)
def lowerCAmelCase ( UpperCamelCase__ : TPos , UpperCamelCase__ : TPos ):
"""simple docstring"""
# euclidean distance
__UpperCAmelCase = np.array(UpperCamelCase__ )
__UpperCAmelCase = np.array(UpperCamelCase__ )
return np.linalg.norm(a - b )
def lowerCAmelCase ( UpperCamelCase__ : TPos , UpperCamelCase__ : TPos ):
"""simple docstring"""
# integer division by time variable
return consistent_heuristic(UpperCamelCase__ , UpperCamelCase__ ) // t
def lowerCAmelCase ( UpperCamelCase__ : TPos , UpperCamelCase__ : TPos ):
"""simple docstring"""
# manhattan distance
return abs(p[0] - goal[0] ) + abs(p[1] - goal[1] )
def lowerCAmelCase ( UpperCamelCase__ : TPos , UpperCamelCase__ : int , UpperCamelCase__ : TPos , UpperCamelCase__ : dict[TPos, float] ):
"""simple docstring"""
__UpperCAmelCase = g_function[start] + Wa * heuristics[i](UpperCamelCase__ , UpperCamelCase__ )
return ans
def lowerCAmelCase ( UpperCamelCase__ : List[Any] , UpperCamelCase__ : int , UpperCamelCase__ : Optional[int] ):
"""simple docstring"""
__UpperCAmelCase = np.chararray((n, n) )
for i in range(UpperCamelCase__ ):
for j in range(UpperCamelCase__ ):
__UpperCAmelCase = '''*'''
for i in range(UpperCamelCase__ ):
for j in range(UpperCamelCase__ ):
if (j, (n - 1) - i) in blocks:
__UpperCAmelCase = '''#'''
__UpperCAmelCase = '''-'''
__UpperCAmelCase = back_pointer[goal]
while x != start:
((__UpperCAmelCase) , (__UpperCAmelCase)) = x
# print(x)
__UpperCAmelCase = '''-'''
__UpperCAmelCase = back_pointer[x]
__UpperCAmelCase = '''-'''
for i in range(UpperCamelCase__ ):
for j in range(UpperCamelCase__ ):
if (i, j) == (0, n - 1):
print(grid[i][j] , end=''' ''' )
print('''<-- End position''' , end=''' ''' )
else:
print(grid[i][j] , end=''' ''' )
print()
print('''^''' )
print('''Start position''' )
print()
print('''# is an obstacle''' )
print('''- is the path taken by algorithm''' )
print('''PATH TAKEN BY THE ALGORITHM IS:-''' )
__UpperCAmelCase = back_pointer[goal]
while x != start:
print(UpperCamelCase__ , end=''' ''' )
__UpperCAmelCase = back_pointer[x]
print(UpperCamelCase__ )
sys.exit()
def lowerCAmelCase ( UpperCamelCase__ : TPos ):
"""simple docstring"""
if p[0] < 0 or p[0] > n - 1:
return False
if p[1] < 0 or p[1] > n - 1:
return False
return True
def lowerCAmelCase ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : int , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : Dict , UpperCamelCase__ : Dict , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Tuple , ):
"""simple docstring"""
for itera in range(UpperCamelCase__ ):
open_list[itera].remove_element(UpperCamelCase__ )
# print("s", s)
# print("j", j)
((__UpperCAmelCase) , (__UpperCAmelCase)) = s
__UpperCAmelCase = (x - 1, y)
__UpperCAmelCase = (x + 1, y)
__UpperCAmelCase = (x, y + 1)
__UpperCAmelCase = (x, y - 1)
for neighbours in [left, right, up, down]:
if neighbours not in blocks:
if valid(UpperCamelCase__ ) and neighbours not in visited:
# print("neighbour", neighbours)
visited.add(UpperCamelCase__ )
__UpperCAmelCase = -1
__UpperCAmelCase = float('''inf''' )
if valid(UpperCamelCase__ ) and g_function[neighbours] > g_function[s] + 1:
__UpperCAmelCase = g_function[s] + 1
__UpperCAmelCase = s
if neighbours not in close_list_anchor:
open_list[0].put(UpperCamelCase__ , key(UpperCamelCase__ , 0 , UpperCamelCase__ , UpperCamelCase__ ) )
if neighbours not in close_list_inad:
for var in range(1 , UpperCamelCase__ ):
if key(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) <= Wa * key(
UpperCamelCase__ , 0 , UpperCamelCase__ , UpperCamelCase__ ):
open_list[j].put(
UpperCamelCase__ , key(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) )
def lowerCAmelCase ( ):
"""simple docstring"""
__UpperCAmelCase = []
for x in range(1 , 5 ):
for y in range(1 , 6 ):
some_list.append((x, y) )
for x in range(1_5 , 2_0 ):
some_list.append((x, 1_7) )
for x in range(1_0 , 1_9 ):
for y in range(1 , 1_5 ):
some_list.append((x, y) )
# L block
for x in range(1 , 4 ):
for y in range(1_2 , 1_9 ):
some_list.append((x, y) )
for x in range(3 , 1_3 ):
for y in range(1_6 , 1_9 ):
some_list.append((x, y) )
return some_list
__lowerCAmelCase : Optional[Any] = {0: consistent_heuristic, 1: heuristic_a, 2: heuristic_a}
__lowerCAmelCase : List[Any] = [
(0, 1),
(1, 1),
(2, 1),
(3, 1),
(4, 1),
(5, 1),
(6, 1),
(7, 1),
(8, 1),
(9, 1),
(10, 1),
(11, 1),
(12, 1),
(13, 1),
(14, 1),
(15, 1),
(16, 1),
(17, 1),
(18, 1),
(19, 1),
]
__lowerCAmelCase : Dict = make_common_ground()
__lowerCAmelCase : int = blocks_blk
# hyper parameters
__lowerCAmelCase : Dict = 1
__lowerCAmelCase : List[str] = 1
__lowerCAmelCase : Union[str, Any] = 20
__lowerCAmelCase : Any = 3 # one consistent and two other inconsistent
# start and end destination
__lowerCAmelCase : Optional[Any] = (0, 0)
__lowerCAmelCase : Any = (n - 1, n - 1)
__lowerCAmelCase : Optional[int] = 1
def lowerCAmelCase ( UpperCamelCase__ : TPos , UpperCamelCase__ : TPos , UpperCamelCase__ : int ):
"""simple docstring"""
__UpperCAmelCase = {start: 0, goal: float('''inf''' )}
__UpperCAmelCase = {start: -1, goal: -1}
__UpperCAmelCase = []
__UpperCAmelCase = set()
for i in range(UpperCamelCase__ ):
open_list.append(PriorityQueue() )
open_list[i].put(UpperCamelCase__ , key(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) )
__UpperCAmelCase = []
__UpperCAmelCase = []
while open_list[0].minkey() < float('''inf''' ):
for i in range(1 , UpperCamelCase__ ):
# print(open_list[0].minkey(), open_list[i].minkey())
if open_list[i].minkey() <= Wa * open_list[0].minkey():
global t
t += 1
if g_function[goal] <= open_list[i].minkey():
if g_function[goal] < float('''inf''' ):
do_something(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
else:
__UpperCAmelCase , __UpperCAmelCase = open_list[i].top_show()
visited.add(UpperCamelCase__ )
expand_state(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , )
close_list_inad.append(UpperCamelCase__ )
else:
if g_function[goal] <= open_list[0].minkey():
if g_function[goal] < float('''inf''' ):
do_something(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
else:
__UpperCAmelCase = open_list[0].top_show()
visited.add(UpperCamelCase__ )
expand_state(
UpperCamelCase__ , 0 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , )
close_list_anchor.append(UpperCamelCase__ )
print('''No path found to goal''' )
print()
for i in range(n - 1 , -1 , -1 ):
for j in range(UpperCamelCase__ ):
if (j, i) in blocks:
print('''#''' , end=''' ''' )
elif (j, i) in back_pointer:
if (j, i) == (n - 1, n - 1):
print('''*''' , end=''' ''' )
else:
print('''-''' , end=''' ''' )
else:
print('''*''' , end=''' ''' )
if (j, i) == (n - 1, n - 1):
print('''<-- End position''' , end=''' ''' )
print()
print('''^''' )
print('''Start position''' )
print()
print('''# is an obstacle''' )
print('''- is the path taken by algorithm''' )
if __name__ == "__main__":
multi_a_star(start, goal, n_heuristic)
| 654 | 1 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
from ..models.clipseg import CLIPSegForImageSegmentation
from ..utils import is_vision_available, requires_backends
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class A ( UpperCAmelCase ):
a_ = (
'''This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image.'''
'''It takes two arguments named `image` which should be the original image, and `label` which should be a text '''
'''describing the elements what should be identified in the segmentation mask. The tool returns the mask.'''
)
a_ = '''CIDAS/clipseg-rd64-refined'''
a_ = '''image_segmenter'''
a_ = CLIPSegForImageSegmentation
a_ = ['''image''', '''text''']
a_ = ['''image''']
def __init__( self : Tuple , *__a : str , **__a : List[Any] ) -> Tuple:
requires_backends(self , ['''vision'''] )
super().__init__(*__a , **__a )
def snake_case__ ( self : Any , __a : "Image" , __a : str ) -> int:
return self.pre_processor(text=[label] , images=[image] , padding=__a , return_tensors='''pt''' )
def snake_case__ ( self : Any , __a : Dict ) -> List[Any]:
with torch.no_grad():
__UpperCAmelCase = self.model(**__a ).logits
return logits
def snake_case__ ( self : List[Any] , __a : Optional[int] ) -> List[Any]:
__UpperCAmelCase = outputs.cpu().detach().numpy()
__UpperCAmelCase = 0
__UpperCAmelCase = 1
return Image.fromarray((array * 2_5_5).astype(np.uinta ) )
| 654 |
'''simple docstring'''
import argparse
import collections
import os
import re
import tempfile
import pandas as pd
from datasets import Dataset
from huggingface_hub import hf_hub_download, upload_folder
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/update_metadata.py
__lowerCAmelCase : List[Any] = "src/transformers"
# This is to make sure the transformers module imported is the one in the repo.
__lowerCAmelCase : str = direct_transformers_import(TRANSFORMERS_PATH)
# Regexes that match TF/Flax/PT model names.
__lowerCAmelCase : int = re.compile(r"TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
__lowerCAmelCase : List[Any] = re.compile(r"Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
__lowerCAmelCase : List[str] = re.compile(r"(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
# Fill this with tuples (pipeline_tag, model_mapping, auto_model)
__lowerCAmelCase : Optional[int] = [
("pretraining", "MODEL_FOR_PRETRAINING_MAPPING_NAMES", "AutoModelForPreTraining"),
("feature-extraction", "MODEL_MAPPING_NAMES", "AutoModel"),
("audio-classification", "MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES", "AutoModelForAudioClassification"),
("text-generation", "MODEL_FOR_CAUSAL_LM_MAPPING_NAMES", "AutoModelForCausalLM"),
("automatic-speech-recognition", "MODEL_FOR_CTC_MAPPING_NAMES", "AutoModelForCTC"),
("image-classification", "MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES", "AutoModelForImageClassification"),
("image-segmentation", "MODEL_FOR_IMAGE_SEGMENTATION_MAPPING_NAMES", "AutoModelForImageSegmentation"),
("fill-mask", "MODEL_FOR_MASKED_LM_MAPPING_NAMES", "AutoModelForMaskedLM"),
("object-detection", "MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES", "AutoModelForObjectDetection"),
(
"zero-shot-object-detection",
"MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING_NAMES",
"AutoModelForZeroShotObjectDetection",
),
("question-answering", "MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES", "AutoModelForQuestionAnswering"),
("text2text-generation", "MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES", "AutoModelForSeq2SeqLM"),
("text-classification", "MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES", "AutoModelForSequenceClassification"),
("automatic-speech-recognition", "MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES", "AutoModelForSpeechSeq2Seq"),
(
"table-question-answering",
"MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING_NAMES",
"AutoModelForTableQuestionAnswering",
),
("token-classification", "MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES", "AutoModelForTokenClassification"),
("multiple-choice", "MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES", "AutoModelForMultipleChoice"),
(
"next-sentence-prediction",
"MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES",
"AutoModelForNextSentencePrediction",
),
(
"audio-frame-classification",
"MODEL_FOR_AUDIO_FRAME_CLASSIFICATION_MAPPING_NAMES",
"AutoModelForAudioFrameClassification",
),
("audio-xvector", "MODEL_FOR_AUDIO_XVECTOR_MAPPING_NAMES", "AutoModelForAudioXVector"),
(
"document-question-answering",
"MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES",
"AutoModelForDocumentQuestionAnswering",
),
(
"visual-question-answering",
"MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING_NAMES",
"AutoModelForVisualQuestionAnswering",
),
("image-to-text", "MODEL_FOR_FOR_VISION_2_SEQ_MAPPING_NAMES", "AutoModelForVision2Seq"),
(
"zero-shot-image-classification",
"MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING_NAMES",
"AutoModelForZeroShotImageClassification",
),
("depth-estimation", "MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES", "AutoModelForDepthEstimation"),
("video-classification", "MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES", "AutoModelForVideoClassification"),
("mask-generation", "MODEL_FOR_MASK_GENERATION_MAPPING_NAMES", "AutoModelForMaskGeneration"),
]
def lowerCAmelCase ( UpperCamelCase__ : int ):
"""simple docstring"""
__UpperCAmelCase = re.finditer('''.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)''' , UpperCamelCase__ )
return [m.group(0 ) for m in matches]
def lowerCAmelCase ( ):
"""simple docstring"""
__UpperCAmelCase = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
__UpperCAmelCase = {
config.replace('''Config''' , '''''' ): model_type for model_type, config in config_maping_names.items()
}
# Dictionaries flagging if each model prefix has a backend in PT/TF/Flax.
__UpperCAmelCase = collections.defaultdict(UpperCamelCase__ )
__UpperCAmelCase = collections.defaultdict(UpperCamelCase__ )
__UpperCAmelCase = collections.defaultdict(UpperCamelCase__ )
# Let's lookup through all transformers object (once) and find if models are supported by a given backend.
for attr_name in dir(UpperCamelCase__ ):
__UpperCAmelCase = None
if _re_tf_models.match(UpperCamelCase__ ) is not None:
__UpperCAmelCase = tf_models
__UpperCAmelCase = _re_tf_models.match(UpperCamelCase__ ).groups()[0]
elif _re_flax_models.match(UpperCamelCase__ ) is not None:
__UpperCAmelCase = flax_models
__UpperCAmelCase = _re_flax_models.match(UpperCamelCase__ ).groups()[0]
elif _re_pt_models.match(UpperCamelCase__ ) is not None:
__UpperCAmelCase = pt_models
__UpperCAmelCase = _re_pt_models.match(UpperCamelCase__ ).groups()[0]
if lookup_dict is not None:
while len(UpperCamelCase__ ) > 0:
if attr_name in model_prefix_to_model_type:
__UpperCAmelCase = True
break
# Try again after removing the last word in the name
__UpperCAmelCase = ''''''.join(camel_case_split(UpperCamelCase__ )[:-1] )
__UpperCAmelCase = set(list(pt_models.keys() ) + list(tf_models.keys() ) + list(flax_models.keys() ) )
__UpperCAmelCase = list(UpperCamelCase__ )
all_models.sort()
__UpperCAmelCase = {'''model_type''': all_models}
__UpperCAmelCase = [pt_models[t] for t in all_models]
__UpperCAmelCase = [tf_models[t] for t in all_models]
__UpperCAmelCase = [flax_models[t] for t in all_models]
# Now let's use the auto-mapping names to make sure
__UpperCAmelCase = {}
for t in all_models:
if t in transformers_module.models.auto.processing_auto.PROCESSOR_MAPPING_NAMES:
__UpperCAmelCase = '''AutoProcessor'''
elif t in transformers_module.models.auto.tokenization_auto.TOKENIZER_MAPPING_NAMES:
__UpperCAmelCase = '''AutoTokenizer'''
elif t in transformers_module.models.auto.feature_extraction_auto.FEATURE_EXTRACTOR_MAPPING_NAMES:
__UpperCAmelCase = '''AutoFeatureExtractor'''
else:
# Default to AutoTokenizer if a model has nothing, for backward compatibility.
__UpperCAmelCase = '''AutoTokenizer'''
__UpperCAmelCase = [processors[t] for t in all_models]
return pd.DataFrame(UpperCamelCase__ )
def lowerCAmelCase ( UpperCamelCase__ : List[str] ):
"""simple docstring"""
__UpperCAmelCase = [
transformers_module.models.auto.modeling_auto,
transformers_module.models.auto.modeling_tf_auto,
transformers_module.models.auto.modeling_flax_auto,
]
for pipeline_tag, model_mapping, auto_class in PIPELINE_TAGS_AND_AUTO_MODELS:
__UpperCAmelCase = [model_mapping, f"""TF_{model_mapping}""", f"""FLAX_{model_mapping}"""]
__UpperCAmelCase = [auto_class, f"""TF_{auto_class}""", f"""Flax_{auto_class}"""]
# Loop through all three frameworks
for module, cls, mapping in zip(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
# The type of pipeline may not exist in this framework
if not hasattr(UpperCamelCase__ , UpperCamelCase__ ):
continue
# First extract all model_names
__UpperCAmelCase = []
for name in getattr(UpperCamelCase__ , UpperCamelCase__ ).values():
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
model_names.append(UpperCamelCase__ )
else:
model_names.extend(list(UpperCamelCase__ ) )
# Add pipeline tag and auto model class for those models
table.update({model_name: (pipeline_tag, cls) for model_name in model_names} )
return table
def lowerCAmelCase ( UpperCamelCase__ : List[str] , UpperCamelCase__ : Dict ):
"""simple docstring"""
__UpperCAmelCase = get_frameworks_table()
__UpperCAmelCase = Dataset.from_pandas(UpperCamelCase__ )
__UpperCAmelCase = hf_hub_download(
'''huggingface/transformers-metadata''' , '''pipeline_tags.json''' , repo_type='''dataset''' , token=UpperCamelCase__ )
__UpperCAmelCase = Dataset.from_json(UpperCamelCase__ )
__UpperCAmelCase = {
tags_dataset[i]['''model_class''']: (tags_dataset[i]['''pipeline_tag'''], tags_dataset[i]['''auto_class'''])
for i in range(len(UpperCamelCase__ ) )
}
__UpperCAmelCase = update_pipeline_and_auto_class_table(UpperCamelCase__ )
# Sort the model classes to avoid some nondeterministic updates to create false update commits.
__UpperCAmelCase = sorted(table.keys() )
__UpperCAmelCase = pd.DataFrame(
{
'''model_class''': model_classes,
'''pipeline_tag''': [table[m][0] for m in model_classes],
'''auto_class''': [table[m][1] for m in model_classes],
} )
__UpperCAmelCase = Dataset.from_pandas(UpperCamelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
frameworks_dataset.to_json(os.path.join(UpperCamelCase__ , '''frameworks.json''' ) )
tags_dataset.to_json(os.path.join(UpperCamelCase__ , '''pipeline_tags.json''' ) )
if commit_sha is not None:
__UpperCAmelCase = (
f"""Update with commit {commit_sha}\n\nSee: """
f"""https://github.com/huggingface/transformers/commit/{commit_sha}"""
)
else:
__UpperCAmelCase = '''Update'''
upload_folder(
repo_id='''huggingface/transformers-metadata''' , folder_path=UpperCamelCase__ , repo_type='''dataset''' , token=UpperCamelCase__ , commit_message=UpperCamelCase__ , )
def lowerCAmelCase ( ):
"""simple docstring"""
__UpperCAmelCase = {tag: cls for tag, _, cls in PIPELINE_TAGS_AND_AUTO_MODELS}
__UpperCAmelCase = transformers_module.pipelines.SUPPORTED_TASKS
__UpperCAmelCase = []
for key in pipeline_tasks:
if key not in in_table:
__UpperCAmelCase = pipeline_tasks[key]['''pt''']
if isinstance(UpperCamelCase__ , (list, tuple) ):
__UpperCAmelCase = model[0]
__UpperCAmelCase = model.__name__
if model not in in_table.values():
missing.append(UpperCamelCase__ )
if len(UpperCamelCase__ ) > 0:
__UpperCAmelCase = ''', '''.join(UpperCamelCase__ )
raise ValueError(
'''The following pipeline tags are not present in the `PIPELINE_TAGS_AND_AUTO_MODELS` constant inside '''
f"""`utils/update_metadata.py`: {msg}. Please add them!""" )
if __name__ == "__main__":
__lowerCAmelCase : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument("--token", type=str, help="The token to use to push to the transformers-metadata dataset.")
parser.add_argument("--commit_sha", type=str, help="The sha of the commit going with this update.")
parser.add_argument("--check-only", action="store_true", help="Activate to just check all pipelines are present.")
__lowerCAmelCase : Tuple = parser.parse_args()
if args.check_only:
check_pipeline_tags()
else:
update_metadata(args.token, args.commit_sha)
| 654 | 1 |
'''simple docstring'''
from ....utils import logging
__lowerCAmelCase : Optional[Any] = logging.get_logger(__name__)
class A ( UpperCAmelCase ):
def __init__( self : Optional[int] , __a : int , __a : int=None , __a : Dict=2_0_4_8 ) -> Any:
__UpperCAmelCase = config.__dict__
__UpperCAmelCase = modal_hidden_size
if num_labels:
__UpperCAmelCase = num_labels
| 654 |
'''simple docstring'''
import ast
import os
import re
import shutil
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.test_utils.examples import compare_against_test
from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow
from accelerate.utils import write_basic_config
# DataLoaders built from `test_samples/MRPC` for quick testing
# Should mock `{script_name}.get_dataloaders` via:
# @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders)
__lowerCAmelCase : Optional[int] = [
"cross_validation.py",
"gradient_accumulation.py",
"local_sgd.py",
"multi_process_metrics.py",
"memory.py",
"automatic_gradient_accumulation.py",
"fsdp_with_peak_mem_tracking.py",
"deepspeed_with_config_support.py",
"megatron_lm_gpt_pretraining.py",
]
class A ( unittest.TestCase ):
def snake_case__ ( self : Any , __a : str , __a : bool , __a : str = None , __a : list = None ) -> Tuple:
__UpperCAmelCase = None
__UpperCAmelCase = os.path.abspath(os.path.join('''examples''' , '''by_feature''' ) )
__UpperCAmelCase = os.path.abspath('''examples''' )
for item in os.listdir(__a ):
if item not in EXCLUDE_EXAMPLES:
__UpperCAmelCase = os.path.join(__a , __a )
if os.path.isfile(__a ) and ".py" in item_path:
with self.subTest(
tested_script=__a , feature_script=__a , tested_section='''main()''' if parser_only else '''training_function()''' , ):
__UpperCAmelCase = compare_against_test(
os.path.join(__a , __a ) , __a , __a , __a )
__UpperCAmelCase = '''\n'''.join(__a )
if special_strings is not None:
for string in special_strings:
__UpperCAmelCase = diff.replace(__a , '''''' )
self.assertEqual(__a , '''''' )
def snake_case__ ( self : Optional[Any] ) -> str:
self.one_complete_example('''complete_nlp_example.py''' , __a )
self.one_complete_example('''complete_nlp_example.py''' , __a )
def snake_case__ ( self : List[str] ) -> Tuple:
__UpperCAmelCase = os.path.abspath(os.path.join('''examples''' , '''cv_example.py''' ) )
__UpperCAmelCase = [
''' ''' * 1_6 + '''{\n\n''',
''' ''' * 2_0 + '''"accuracy": eval_metric["accuracy"],\n\n''',
''' ''' * 2_0 + '''"f1": eval_metric["f1"],\n\n''',
''' ''' * 2_0 + '''"train_loss": total_loss.item() / len(train_dataloader),\n\n''',
''' ''' * 2_0 + '''"epoch": epoch,\n\n''',
''' ''' * 1_6 + '''},\n\n''',
''' ''' * 1_6 + '''step=epoch,\n''',
''' ''' * 1_2,
''' ''' * 8 + '''for step, batch in enumerate(active_dataloader):\n''',
]
self.one_complete_example('''complete_cv_example.py''' , __a , __a , __a )
self.one_complete_example('''complete_cv_example.py''' , __a , __a , __a )
@mock.patch.dict(os.environ , {'''TESTING_MOCKED_DATALOADERS''': '''1'''} )
class A ( UpperCAmelCase ):
a_ = False
@classmethod
def snake_case__ ( cls : Tuple ) -> str:
super().setUpClass()
__UpperCAmelCase = tempfile.mkdtemp()
__UpperCAmelCase = os.path.join(cls._tmpdir , '''default_config.yml''' )
write_basic_config(save_location=cls.configPath )
__UpperCAmelCase = ['''accelerate''', '''launch''', '''--config_file''', cls.configPath]
@classmethod
def snake_case__ ( cls : Dict ) -> int:
super().tearDownClass()
shutil.rmtree(cls._tmpdir )
def snake_case__ ( self : Tuple ) -> Dict:
__UpperCAmelCase = f"""
examples/by_feature/checkpointing.py
--checkpointing_steps epoch
--output_dir {self.tmpdir}
""".split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , '''epoch_0''' ) ) )
def snake_case__ ( self : str ) -> int:
__UpperCAmelCase = f"""
examples/by_feature/checkpointing.py
--checkpointing_steps 1
--output_dir {self.tmpdir}
""".split()
__UpperCAmelCase = run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , '''step_2''' ) ) )
def snake_case__ ( self : Any ) -> Any:
__UpperCAmelCase = f"""
examples/by_feature/checkpointing.py
--resume_from_checkpoint {os.path.join(self.tmpdir , 'epoch_0' )}
""".split()
__UpperCAmelCase = run_command(self._launch_args + testargs , return_stdout=__a )
self.assertNotIn('''epoch 0:''' , __a )
self.assertIn('''epoch 1:''' , __a )
def snake_case__ ( self : Tuple ) -> Optional[int]:
__UpperCAmelCase = f"""
examples/by_feature/checkpointing.py
--resume_from_checkpoint {os.path.join(self.tmpdir , 'step_2' )}
""".split()
__UpperCAmelCase = run_command(self._launch_args + testargs , return_stdout=__a )
if torch.cuda.is_available():
__UpperCAmelCase = torch.cuda.device_count()
else:
__UpperCAmelCase = 1
if num_processes > 1:
self.assertNotIn('''epoch 0:''' , __a )
self.assertIn('''epoch 1:''' , __a )
else:
self.assertIn('''epoch 0:''' , __a )
self.assertIn('''epoch 1:''' , __a )
@slow
def snake_case__ ( self : Any ) -> Optional[Any]:
__UpperCAmelCase = '''
examples/by_feature/cross_validation.py
--num_folds 2
'''.split()
with mock.patch.dict(os.environ , {'''TESTING_MOCKED_DATALOADERS''': '''0'''} ):
__UpperCAmelCase = run_command(self._launch_args + testargs , return_stdout=__a )
__UpperCAmelCase = re.findall('''({.+})''' , __a )
__UpperCAmelCase = [r for r in results if '''accuracy''' in r][-1]
__UpperCAmelCase = ast.literal_eval(__a )
self.assertGreaterEqual(results['''accuracy'''] , 0.7_5 )
def snake_case__ ( self : Dict ) -> int:
__UpperCAmelCase = ['''examples/by_feature/multi_process_metrics.py''']
run_command(self._launch_args + testargs )
@require_trackers
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def snake_case__ ( self : Optional[Any] ) -> Optional[int]:
with tempfile.TemporaryDirectory() as tmpdir:
__UpperCAmelCase = f"""
examples/by_feature/tracking.py
--with_tracking
--project_dir {tmpdir}
""".split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(__a , '''tracking''' ) ) )
def snake_case__ ( self : Optional[int] ) -> List[Any]:
__UpperCAmelCase = ['''examples/by_feature/gradient_accumulation.py''']
run_command(self._launch_args + testargs )
def snake_case__ ( self : Tuple ) -> Optional[Any]:
__UpperCAmelCase = ['''examples/by_feature/local_sgd.py''']
run_command(self._launch_args + testargs )
| 654 | 1 |
'''simple docstring'''
from typing import Callable, Optional
from .. import Features
from ..packaged_modules.generator.generator import Generator
from .abc import AbstractDatasetInputStream
class A ( UpperCAmelCase ):
def __init__( self : int , __a : Callable , __a : Optional[Features] = None , __a : str = None , __a : bool = False , __a : bool = False , __a : Optional[dict] = None , __a : Optional[int] = None , **__a : Optional[Any] , ) -> str:
super().__init__(
features=__a , cache_dir=__a , keep_in_memory=__a , streaming=__a , num_proc=__a , **__a , )
__UpperCAmelCase = Generator(
cache_dir=__a , features=__a , generator=__a , gen_kwargs=__a , **__a , )
def snake_case__ ( self : Optional[int] ) -> Optional[int]:
# Build iterable dataset
if self.streaming:
__UpperCAmelCase = self.builder.as_streaming_dataset(split='''train''' )
# Build regular (map-style) dataset
else:
__UpperCAmelCase = None
__UpperCAmelCase = None
__UpperCAmelCase = None
__UpperCAmelCase = None
self.builder.download_and_prepare(
download_config=__a , download_mode=__a , verification_mode=__a , base_path=__a , num_proc=self.num_proc , )
__UpperCAmelCase = self.builder.as_dataset(
split='''train''' , verification_mode=__a , in_memory=self.keep_in_memory )
return dataset
| 654 |
'''simple docstring'''
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
__lowerCAmelCase : Any = ""
__lowerCAmelCase : int = ""
__lowerCAmelCase : Union[str, Any] = ""
__lowerCAmelCase : Any = 1 # (0 is vertical, 1 is horizontal)
def lowerCAmelCase ( ):
"""simple docstring"""
__UpperCAmelCase , __UpperCAmelCase = get_dataset(UpperCamelCase__ , UpperCamelCase__ )
print('''Processing...''' )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = update_image_and_anno(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
for index, image in enumerate(UpperCamelCase__ ):
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
__UpperCAmelCase = random_chars(3_2 )
__UpperCAmelCase = paths[index].split(os.sep )[-1].rsplit('''.''' , 1 )[0]
__UpperCAmelCase = f"""{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}"""
cva.imwrite(f"""/{file_root}.jpg""" , UpperCamelCase__ , [cva.IMWRITE_JPEG_QUALITY, 8_5] )
print(f"""Success {index+1}/{len(UpperCamelCase__ )} with {file_name}""" )
__UpperCAmelCase = []
for anno in new_annos[index]:
__UpperCAmelCase = f"""{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}"""
annos_list.append(UpperCamelCase__ )
with open(f"""/{file_root}.txt""" , '''w''' ) as outfile:
outfile.write('''\n'''.join(line for line in annos_list ) )
def lowerCAmelCase ( UpperCamelCase__ : str , UpperCamelCase__ : str ):
"""simple docstring"""
__UpperCAmelCase = []
__UpperCAmelCase = []
for label_file in glob.glob(os.path.join(UpperCamelCase__ , '''*.txt''' ) ):
__UpperCAmelCase = label_file.split(os.sep )[-1].rsplit('''.''' , 1 )[0]
with open(UpperCamelCase__ ) as in_file:
__UpperCAmelCase = in_file.readlines()
__UpperCAmelCase = os.path.join(UpperCamelCase__ , f"""{label_name}.jpg""" )
__UpperCAmelCase = []
for obj_list in obj_lists:
__UpperCAmelCase = obj_list.rstrip('''\n''' ).split(''' ''' )
boxes.append(
[
int(obj[0] ),
float(obj[1] ),
float(obj[2] ),
float(obj[3] ),
float(obj[4] ),
] )
if not boxes:
continue
img_paths.append(UpperCamelCase__ )
labels.append(UpperCamelCase__ )
return img_paths, labels
def lowerCAmelCase ( UpperCamelCase__ : list , UpperCamelCase__ : list , UpperCamelCase__ : int = 1 ):
"""simple docstring"""
__UpperCAmelCase = []
__UpperCAmelCase = []
__UpperCAmelCase = []
for idx in range(len(UpperCamelCase__ ) ):
__UpperCAmelCase = []
__UpperCAmelCase = img_list[idx]
path_list.append(UpperCamelCase__ )
__UpperCAmelCase = anno_list[idx]
__UpperCAmelCase = cva.imread(UpperCamelCase__ )
if flip_type == 1:
__UpperCAmelCase = cva.flip(UpperCamelCase__ , UpperCamelCase__ )
for bbox in img_annos:
__UpperCAmelCase = 1 - bbox[1]
new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] )
elif flip_type == 0:
__UpperCAmelCase = cva.flip(UpperCamelCase__ , UpperCamelCase__ )
for bbox in img_annos:
__UpperCAmelCase = 1 - bbox[2]
new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] )
new_annos_lists.append(UpperCamelCase__ )
new_imgs_list.append(UpperCamelCase__ )
return new_imgs_list, new_annos_lists, path_list
def lowerCAmelCase ( UpperCamelCase__ : int = 3_2 ):
"""simple docstring"""
assert number_char > 1, "The number of character should greater than 1"
__UpperCAmelCase = ascii_lowercase + digits
return "".join(random.choice(UpperCamelCase__ ) for _ in range(UpperCamelCase__ ) )
if __name__ == "__main__":
main()
print("DONE ✅")
| 654 | 1 |
'''simple docstring'''
import requests
__lowerCAmelCase : Dict = "YOUR API KEY"
def lowerCAmelCase ( UpperCamelCase__ : str , UpperCamelCase__ : str = giphy_api_key ):
"""simple docstring"""
__UpperCAmelCase = '''+'''.join(query.split() )
__UpperCAmelCase = f"""https://api.giphy.com/v1/gifs/search?q={formatted_query}&api_key={api_key}"""
__UpperCAmelCase = requests.get(UpperCamelCase__ ).json()['''data''']
return [gif["url"] for gif in gifs]
if __name__ == "__main__":
print("\n".join(get_gifs("space ship")))
| 654 |
'''simple docstring'''
from pathlib import Path
import fire
def lowerCAmelCase ( UpperCamelCase__ : str , UpperCamelCase__ : str , UpperCamelCase__ : int ):
"""simple docstring"""
__UpperCAmelCase = Path(UpperCamelCase__ )
__UpperCAmelCase = Path(UpperCamelCase__ )
dest_dir.mkdir(exist_ok=UpperCamelCase__ )
for path in src_dir.iterdir():
__UpperCAmelCase = [x.rstrip() for x in list(path.open().readlines() )][:n]
__UpperCAmelCase = dest_dir.joinpath(path.name )
print(UpperCamelCase__ )
dest_path.open('''w''' ).write('''\n'''.join(UpperCamelCase__ ) )
if __name__ == "__main__":
fire.Fire(minify)
| 654 | 1 |
'''simple docstring'''
import unittest
from knapsack import knapsack as k
class A ( unittest.TestCase ):
def snake_case__ ( self : int ) -> Tuple:
__UpperCAmelCase = 0
__UpperCAmelCase = [0]
__UpperCAmelCase = [0]
__UpperCAmelCase = len(__a )
self.assertEqual(k.knapsack(__a , __a , __a , __a ) , 0 )
__UpperCAmelCase = [6_0]
__UpperCAmelCase = [1_0]
__UpperCAmelCase = len(__a )
self.assertEqual(k.knapsack(__a , __a , __a , __a ) , 0 )
def snake_case__ ( self : str ) -> Tuple:
__UpperCAmelCase = 3
__UpperCAmelCase = [1, 2, 3]
__UpperCAmelCase = [3, 2, 1]
__UpperCAmelCase = len(__a )
self.assertEqual(k.knapsack(__a , __a , __a , __a ) , 5 )
def snake_case__ ( self : Tuple ) -> List[str]:
__UpperCAmelCase = 5_0
__UpperCAmelCase = [6_0, 1_0_0, 1_2_0]
__UpperCAmelCase = [1_0, 2_0, 3_0]
__UpperCAmelCase = len(__a )
self.assertEqual(k.knapsack(__a , __a , __a , __a ) , 2_2_0 )
if __name__ == "__main__":
unittest.main()
| 654 |
'''simple docstring'''
def lowerCAmelCase ( UpperCamelCase__ : int ):
"""simple docstring"""
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
__UpperCAmelCase = f"""Input value of [number={number}] must be an integer"""
raise TypeError(UpperCamelCase__ )
if number < 1:
__UpperCAmelCase = f"""Input value of [number={number}] must be > 0"""
raise ValueError(UpperCamelCase__ )
__UpperCAmelCase = 1
for i in range(1 , UpperCamelCase__ ):
current_number *= 4 * i - 2
current_number //= i + 1
return current_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 654 | 1 |
'''simple docstring'''
from torch import nn
def lowerCAmelCase ( UpperCamelCase__ : Any ):
"""simple docstring"""
if act_fn in ["swish", "silu"]:
return nn.SiLU()
elif act_fn == "mish":
return nn.Mish()
elif act_fn == "gelu":
return nn.GELU()
else:
raise ValueError(f"""Unsupported activation function: {act_fn}""" )
| 654 |
'''simple docstring'''
import unittest
import torch
from torch import nn
from accelerate.test_utils import require_cuda
from accelerate.utils.memory import find_executable_batch_size, release_memory
def lowerCAmelCase ( ):
"""simple docstring"""
raise RuntimeError('''CUDA out of memory.''' )
class A ( nn.Module ):
def __init__( self : Optional[Any] ) -> int:
super().__init__()
__UpperCAmelCase = nn.Linear(3 , 4 )
__UpperCAmelCase = nn.BatchNormad(4 )
__UpperCAmelCase = nn.Linear(4 , 5 )
def snake_case__ ( self : List[str] , __a : Optional[int] ) -> Optional[int]:
return self.lineara(self.batchnorm(self.lineara(__a ) ) )
class A ( unittest.TestCase ):
def snake_case__ ( self : Optional[int] ) -> Any:
__UpperCAmelCase = []
@find_executable_batch_size(starting_batch_size=1_2_8 )
def mock_training_loop_function(__a : Union[str, Any] ):
nonlocal batch_sizes
batch_sizes.append(__a )
if batch_size != 8:
raise_fake_out_of_memory()
mock_training_loop_function()
self.assertListEqual(__a , [1_2_8, 6_4, 3_2, 1_6, 8] )
def snake_case__ ( self : str ) -> int:
__UpperCAmelCase = []
@find_executable_batch_size(starting_batch_size=1_2_8 )
def mock_training_loop_function(__a : str , __a : Optional[int] ):
nonlocal batch_sizes
batch_sizes.append(__a )
if batch_size != 8:
raise_fake_out_of_memory()
return batch_size, arga
__UpperCAmelCase , __UpperCAmelCase = mock_training_loop_function('''hello''' )
self.assertListEqual(__a , [1_2_8, 6_4, 3_2, 1_6, 8] )
self.assertListEqual([bs, arga] , [8, '''hello'''] )
def snake_case__ ( self : Any ) -> int:
@find_executable_batch_size(starting_batch_size=0 )
def mock_training_loop_function(__a : Optional[int] ):
pass
with self.assertRaises(__a ) as cm:
mock_training_loop_function()
self.assertIn('''No executable batch size found, reached zero.''' , cm.exception.args[0] )
def snake_case__ ( self : Any ) -> List[Any]:
@find_executable_batch_size(starting_batch_size=1_6 )
def mock_training_loop_function(__a : Dict ):
if batch_size > 0:
raise_fake_out_of_memory()
pass
with self.assertRaises(__a ) as cm:
mock_training_loop_function()
self.assertIn('''No executable batch size found, reached zero.''' , cm.exception.args[0] )
def snake_case__ ( self : List[Any] ) -> List[str]:
@find_executable_batch_size(starting_batch_size=1_2_8 )
def mock_training_loop_function(__a : str , __a : Union[str, Any] , __a : int ):
if batch_size != 8:
raise raise_fake_out_of_memory()
with self.assertRaises(__a ) as cm:
mock_training_loop_function(1_2_8 , '''hello''' , '''world''' )
self.assertIn('''Batch size was passed into `f`''' , cm.exception.args[0] )
self.assertIn('''`f(arg1=\'hello\', arg2=\'world\')''' , cm.exception.args[0] )
def snake_case__ ( self : Tuple ) -> Optional[Any]:
@find_executable_batch_size(starting_batch_size=1_6 )
def mock_training_loop_function(__a : Tuple ):
raise ValueError('''Oops, we had an error!''' )
with self.assertRaises(__a ) as cm:
mock_training_loop_function()
self.assertIn('''Oops, we had an error!''' , cm.exception.args[0] )
@require_cuda
def snake_case__ ( self : Any ) -> List[Any]:
__UpperCAmelCase = torch.cuda.memory_allocated()
__UpperCAmelCase = ModelForTest()
model.cuda()
self.assertGreater(torch.cuda.memory_allocated() , __a )
__UpperCAmelCase = release_memory(__a )
self.assertEqual(torch.cuda.memory_allocated() , __a )
| 654 | 1 |
'''simple docstring'''
def lowerCAmelCase ( UpperCamelCase__ : int = 5_0 ):
"""simple docstring"""
__UpperCAmelCase = [1] * (length + 1)
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
ways_number[row_length] += ways_number[
row_length - tile_start - tile_length
]
return ways_number[length]
if __name__ == "__main__":
print(F"""{solution() = }""")
| 654 |
'''simple docstring'''
from __future__ import annotations
import math
def lowerCAmelCase ( UpperCamelCase__ : float , UpperCamelCase__ : int ):
"""simple docstring"""
__UpperCAmelCase = u
for i in range(1 , UpperCamelCase__ ):
__UpperCAmelCase = temp * (u - i)
return temp
def lowerCAmelCase ( ):
"""simple docstring"""
__UpperCAmelCase = int(input('''enter the numbers of values: ''' ) )
__UpperCAmelCase = []
for _ in range(UpperCamelCase__ ):
y.append([] )
for i in range(UpperCamelCase__ ):
for j in range(UpperCamelCase__ ):
y[i].append(UpperCamelCase__ )
__UpperCAmelCase = 0
print('''enter the values of parameters in a list: ''' )
__UpperCAmelCase = list(map(UpperCamelCase__ , input().split() ) )
print('''enter the values of corresponding parameters: ''' )
for i in range(UpperCamelCase__ ):
__UpperCAmelCase = float(input() )
__UpperCAmelCase = int(input('''enter the value to interpolate: ''' ) )
__UpperCAmelCase = (value - x[0]) / (x[1] - x[0])
# for calculating forward difference table
for i in range(1 , UpperCamelCase__ ):
for j in range(n - i ):
__UpperCAmelCase = y[j + 1][i - 1] - y[j][i - 1]
__UpperCAmelCase = y[0][0]
for i in range(1 , UpperCamelCase__ ):
summ += (ucal(UpperCamelCase__ , UpperCamelCase__ ) * y[0][i]) / math.factorial(UpperCamelCase__ )
print(f"""the value at {value} is {summ}""" )
if __name__ == "__main__":
main()
| 654 | 1 |
'''simple docstring'''
import math
def lowerCAmelCase ( ):
"""simple docstring"""
__UpperCAmelCase = input('''Enter message: ''' )
__UpperCAmelCase = int(input(f"""Enter key [2-{len(UpperCamelCase__ ) - 1}]: """ ) )
__UpperCAmelCase = input('''Encryption/Decryption [e/d]: ''' )
if mode.lower().startswith('''e''' ):
__UpperCAmelCase = encrypt_message(UpperCamelCase__ , UpperCamelCase__ )
elif mode.lower().startswith('''d''' ):
__UpperCAmelCase = decrypt_message(UpperCamelCase__ , UpperCamelCase__ )
# Append pipe symbol (vertical bar) to identify spaces at the end.
print(f"""Output:\n{text + '|'}""" )
def lowerCAmelCase ( UpperCamelCase__ : int , UpperCamelCase__ : str ):
"""simple docstring"""
__UpperCAmelCase = [''''''] * key
for col in range(UpperCamelCase__ ):
__UpperCAmelCase = col
while pointer < len(UpperCamelCase__ ):
cipher_text[col] += message[pointer]
pointer += key
return "".join(UpperCamelCase__ )
def lowerCAmelCase ( UpperCamelCase__ : int , UpperCamelCase__ : str ):
"""simple docstring"""
__UpperCAmelCase = math.ceil(len(UpperCamelCase__ ) / key )
__UpperCAmelCase = key
__UpperCAmelCase = (num_cols * num_rows) - len(UpperCamelCase__ )
__UpperCAmelCase = [''''''] * num_cols
__UpperCAmelCase = 0
__UpperCAmelCase = 0
for symbol in message:
plain_text[col] += symbol
col += 1
if (
(col == num_cols)
or (col == num_cols - 1)
and (row >= num_rows - num_shaded_boxes)
):
__UpperCAmelCase = 0
row += 1
return "".join(UpperCamelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 654 |
'''simple docstring'''
import argparse
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import (
RobertaTokenizer,
TrOCRConfig,
TrOCRForCausalLM,
TrOCRProcessor,
VisionEncoderDecoderModel,
ViTConfig,
ViTImageProcessor,
ViTModel,
)
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCAmelCase : Dict = logging.get_logger(__name__)
def lowerCAmelCase ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Union[str, Any] ):
"""simple docstring"""
__UpperCAmelCase = []
for i in range(encoder_config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f"""encoder.deit.blocks.{i}.norm1.weight""", f"""encoder.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((f"""encoder.deit.blocks.{i}.norm1.bias""", f"""encoder.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append(
(f"""encoder.deit.blocks.{i}.attn.proj.weight""", f"""encoder.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append(
(f"""encoder.deit.blocks.{i}.attn.proj.bias""", f"""encoder.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append(
(f"""encoder.deit.blocks.{i}.norm2.weight""", f"""encoder.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((f"""encoder.deit.blocks.{i}.norm2.bias""", f"""encoder.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append(
(f"""encoder.deit.blocks.{i}.mlp.fc1.weight""", f"""encoder.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append(
(f"""encoder.deit.blocks.{i}.mlp.fc1.bias""", f"""encoder.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append(
(f"""encoder.deit.blocks.{i}.mlp.fc2.weight""", f"""encoder.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((f"""encoder.deit.blocks.{i}.mlp.fc2.bias""", f"""encoder.encoder.layer.{i}.output.dense.bias""") )
# cls token, position embeddings and patch embeddings of encoder
rename_keys.extend(
[
('''encoder.deit.cls_token''', '''encoder.embeddings.cls_token'''),
('''encoder.deit.pos_embed''', '''encoder.embeddings.position_embeddings'''),
('''encoder.deit.patch_embed.proj.weight''', '''encoder.embeddings.patch_embeddings.projection.weight'''),
('''encoder.deit.patch_embed.proj.bias''', '''encoder.embeddings.patch_embeddings.projection.bias'''),
('''encoder.deit.norm.weight''', '''encoder.layernorm.weight'''),
('''encoder.deit.norm.bias''', '''encoder.layernorm.bias'''),
] )
return rename_keys
def lowerCAmelCase ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[Any] ):
"""simple docstring"""
for i in range(encoder_config.num_hidden_layers ):
# queries, keys and values (only weights, no biases)
__UpperCAmelCase = state_dict.pop(f"""encoder.deit.blocks.{i}.attn.qkv.weight""" )
__UpperCAmelCase = in_proj_weight[
: encoder_config.hidden_size, :
]
__UpperCAmelCase = in_proj_weight[
encoder_config.hidden_size : encoder_config.hidden_size * 2, :
]
__UpperCAmelCase = in_proj_weight[
-encoder_config.hidden_size :, :
]
def lowerCAmelCase ( UpperCamelCase__ : Any , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[str] ):
"""simple docstring"""
__UpperCAmelCase = dct.pop(UpperCamelCase__ )
__UpperCAmelCase = val
def lowerCAmelCase ( UpperCamelCase__ : Dict ):
"""simple docstring"""
if "handwritten" in checkpoint_url:
__UpperCAmelCase = '''https://fki.tic.heia-fr.ch/static/img/a01-122-02-00.jpg''' # industry
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-12.jpg" # have
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-10.jpg" # let
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02.jpg" #
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122.jpg"
elif "printed" in checkpoint_url or "stage1" in checkpoint_url:
__UpperCAmelCase = '''https://www.researchgate.net/profile/Dinh-Sang/publication/338099565/figure/fig8/AS:840413229350922@1577381536857/An-receipt-example-in-the-SROIE-2019-dataset_Q640.jpg'''
__UpperCAmelCase = Image.open(requests.get(UpperCamelCase__ , stream=UpperCamelCase__ ).raw ).convert('''RGB''' )
return im
@torch.no_grad()
def lowerCAmelCase ( UpperCamelCase__ : Dict , UpperCamelCase__ : Union[str, Any] ):
"""simple docstring"""
__UpperCAmelCase = ViTConfig(image_size=3_8_4 , qkv_bias=UpperCamelCase__ )
__UpperCAmelCase = TrOCRConfig()
# size of the architecture
if "base" in checkpoint_url:
__UpperCAmelCase = 7_6_8
elif "large" in checkpoint_url:
# use ViT-large encoder
__UpperCAmelCase = 1_0_2_4
__UpperCAmelCase = 4_0_9_6
__UpperCAmelCase = 2_4
__UpperCAmelCase = 1_6
__UpperCAmelCase = 1_0_2_4
else:
raise ValueError('''Should either find \'base\' or \'large\' in checkpoint URL''' )
# the large-printed + stage1 checkpoints uses sinusoidal position embeddings, no layernorm afterwards
if "large-printed" in checkpoint_url or "stage1" in checkpoint_url:
__UpperCAmelCase = False
__UpperCAmelCase = '''relu'''
__UpperCAmelCase = 1_0_2_4
__UpperCAmelCase = True
__UpperCAmelCase = False
__UpperCAmelCase = False
# load HuggingFace model
__UpperCAmelCase = ViTModel(UpperCamelCase__ , add_pooling_layer=UpperCamelCase__ )
__UpperCAmelCase = TrOCRForCausalLM(UpperCamelCase__ )
__UpperCAmelCase = VisionEncoderDecoderModel(encoder=UpperCamelCase__ , decoder=UpperCamelCase__ )
model.eval()
# load state_dict of original model, rename some keys
__UpperCAmelCase = torch.hub.load_state_dict_from_url(UpperCamelCase__ , map_location='''cpu''' , check_hash=UpperCamelCase__ )['''model''']
__UpperCAmelCase = create_rename_keys(UpperCamelCase__ , UpperCamelCase__ )
for src, dest in rename_keys:
rename_key(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
read_in_q_k_v(UpperCamelCase__ , UpperCamelCase__ )
# remove parameters we don't need
del state_dict["encoder.deit.head.weight"]
del state_dict["encoder.deit.head.bias"]
del state_dict["decoder.version"]
# add prefix to decoder keys
for key, val in state_dict.copy().items():
__UpperCAmelCase = state_dict.pop(UpperCamelCase__ )
if key.startswith('''decoder''' ) and "output_projection" not in key:
__UpperCAmelCase = val
else:
__UpperCAmelCase = val
# load state dict
model.load_state_dict(UpperCamelCase__ )
# Check outputs on an image
__UpperCAmelCase = ViTImageProcessor(size=encoder_config.image_size )
__UpperCAmelCase = RobertaTokenizer.from_pretrained('''roberta-large''' )
__UpperCAmelCase = TrOCRProcessor(UpperCamelCase__ , UpperCamelCase__ )
__UpperCAmelCase = processor(images=prepare_img(UpperCamelCase__ ) , return_tensors='''pt''' ).pixel_values
# verify logits
__UpperCAmelCase = torch.tensor([[model.config.decoder.decoder_start_token_id]] )
__UpperCAmelCase = model(pixel_values=UpperCamelCase__ , decoder_input_ids=UpperCamelCase__ )
__UpperCAmelCase = outputs.logits
__UpperCAmelCase = torch.Size([1, 1, 5_0_2_6_5] )
if "trocr-base-handwritten" in checkpoint_url:
__UpperCAmelCase = torch.tensor(
[-1.45_02, -4.66_83, -0.53_47, -2.92_91, 9.14_35, -3.05_71, 8.97_64, 1.75_60, 8.73_58, -1.53_11] )
elif "trocr-large-handwritten" in checkpoint_url:
__UpperCAmelCase = torch.tensor(
[-2.64_37, -1.31_29, -2.25_96, -5.34_55, 6.35_39, 1.76_04, 5.49_91, 1.47_02, 5.61_13, 2.01_70] )
elif "trocr-base-printed" in checkpoint_url:
__UpperCAmelCase = torch.tensor(
[-5.68_16, -5.83_88, 1.13_98, -6.90_34, 6.85_05, -2.43_93, 1.22_84, -1.02_32, -1.96_61, -3.92_10] )
elif "trocr-large-printed" in checkpoint_url:
__UpperCAmelCase = torch.tensor(
[-6.01_62, -7.09_59, 4.41_55, -5.10_63, 7.04_68, -3.16_31, 2.64_66, -0.30_81, -0.81_06, -1.75_35] )
if "stage1" not in checkpoint_url:
assert logits.shape == expected_shape, "Shape of logits not as expected"
assert torch.allclose(logits[0, 0, :1_0] , UpperCamelCase__ , atol=1E-3 ), "First elements of logits not as expected"
Path(UpperCamelCase__ ).mkdir(exist_ok=UpperCamelCase__ )
print(f"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(UpperCamelCase__ )
print(f"""Saving processor to {pytorch_dump_folder_path}""" )
processor.save_pretrained(UpperCamelCase__ )
if __name__ == "__main__":
__lowerCAmelCase : Any = argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_url",
default="https://layoutlm.blob.core.windows.net/trocr/model_zoo/fairseq/trocr-base-handwritten.pt",
type=str,
help="URL to the original PyTorch checkpoint (.pth file).",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
__lowerCAmelCase : Optional[int] = parser.parse_args()
convert_tr_ocr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 654 | 1 |
'''simple docstring'''
import argparse
import os
import jax as jnp
import numpy as onp
import torch
import torch.nn as nn
from music_spectrogram_diffusion import inference
from tax import checkpoints
from diffusers import DDPMScheduler, OnnxRuntimeModel, SpectrogramDiffusionPipeline
from diffusers.pipelines.spectrogram_diffusion import SpectrogramContEncoder, SpectrogramNotesEncoder, TaFilmDecoder
__lowerCAmelCase : List[Any] = "base_with_context"
def lowerCAmelCase ( UpperCamelCase__ : Dict , UpperCamelCase__ : int ):
"""simple docstring"""
__UpperCAmelCase = nn.Parameter(torch.FloatTensor(weights['''token_embedder''']['''embedding'''] ) )
__UpperCAmelCase = nn.Parameter(
torch.FloatTensor(weights['''Embed_0''']['''embedding'''] ) , requires_grad=UpperCamelCase__ )
for lyr_num, lyr in enumerate(model.encoders ):
__UpperCAmelCase = weights[f"""layers_{lyr_num}"""]
__UpperCAmelCase = nn.Parameter(
torch.FloatTensor(ly_weight['''pre_attention_layer_norm''']['''scale'''] ) )
__UpperCAmelCase = ly_weight['''attention''']
__UpperCAmelCase = nn.Parameter(torch.FloatTensor(attention_weights['''query''']['''kernel'''].T ) )
__UpperCAmelCase = nn.Parameter(torch.FloatTensor(attention_weights['''key''']['''kernel'''].T ) )
__UpperCAmelCase = nn.Parameter(torch.FloatTensor(attention_weights['''value''']['''kernel'''].T ) )
__UpperCAmelCase = nn.Parameter(torch.FloatTensor(attention_weights['''out''']['''kernel'''].T ) )
__UpperCAmelCase = nn.Parameter(torch.FloatTensor(ly_weight['''pre_mlp_layer_norm''']['''scale'''] ) )
__UpperCAmelCase = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_0''']['''kernel'''].T ) )
__UpperCAmelCase = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_1''']['''kernel'''].T ) )
__UpperCAmelCase = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wo''']['''kernel'''].T ) )
__UpperCAmelCase = nn.Parameter(torch.FloatTensor(weights['''encoder_norm''']['''scale'''] ) )
return model
def lowerCAmelCase ( UpperCamelCase__ : Tuple , UpperCamelCase__ : Dict ):
"""simple docstring"""
__UpperCAmelCase = nn.Parameter(torch.FloatTensor(weights['''input_proj''']['''kernel'''].T ) )
__UpperCAmelCase = nn.Parameter(
torch.FloatTensor(weights['''Embed_0''']['''embedding'''] ) , requires_grad=UpperCamelCase__ )
for lyr_num, lyr in enumerate(model.encoders ):
__UpperCAmelCase = weights[f"""layers_{lyr_num}"""]
__UpperCAmelCase = ly_weight['''attention''']
__UpperCAmelCase = nn.Parameter(torch.FloatTensor(attention_weights['''query''']['''kernel'''].T ) )
__UpperCAmelCase = nn.Parameter(torch.FloatTensor(attention_weights['''key''']['''kernel'''].T ) )
__UpperCAmelCase = nn.Parameter(torch.FloatTensor(attention_weights['''value''']['''kernel'''].T ) )
__UpperCAmelCase = nn.Parameter(torch.FloatTensor(attention_weights['''out''']['''kernel'''].T ) )
__UpperCAmelCase = nn.Parameter(
torch.FloatTensor(ly_weight['''pre_attention_layer_norm''']['''scale'''] ) )
__UpperCAmelCase = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_0''']['''kernel'''].T ) )
__UpperCAmelCase = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_1''']['''kernel'''].T ) )
__UpperCAmelCase = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wo''']['''kernel'''].T ) )
__UpperCAmelCase = nn.Parameter(torch.FloatTensor(ly_weight['''pre_mlp_layer_norm''']['''scale'''] ) )
__UpperCAmelCase = nn.Parameter(torch.FloatTensor(weights['''encoder_norm''']['''scale'''] ) )
return model
def lowerCAmelCase ( UpperCamelCase__ : int , UpperCamelCase__ : List[str] ):
"""simple docstring"""
__UpperCAmelCase = nn.Parameter(torch.FloatTensor(weights['''time_emb_dense0''']['''kernel'''].T ) )
__UpperCAmelCase = nn.Parameter(torch.FloatTensor(weights['''time_emb_dense1''']['''kernel'''].T ) )
__UpperCAmelCase = nn.Parameter(
torch.FloatTensor(weights['''Embed_0''']['''embedding'''] ) , requires_grad=UpperCamelCase__ )
__UpperCAmelCase = nn.Parameter(
torch.FloatTensor(weights['''continuous_inputs_projection''']['''kernel'''].T ) )
for lyr_num, lyr in enumerate(model.decoders ):
__UpperCAmelCase = weights[f"""layers_{lyr_num}"""]
__UpperCAmelCase = nn.Parameter(
torch.FloatTensor(ly_weight['''pre_self_attention_layer_norm''']['''scale'''] ) )
__UpperCAmelCase = nn.Parameter(
torch.FloatTensor(ly_weight['''FiLMLayer_0''']['''DenseGeneral_0''']['''kernel'''].T ) )
__UpperCAmelCase = ly_weight['''self_attention''']
__UpperCAmelCase = nn.Parameter(torch.FloatTensor(attention_weights['''query''']['''kernel'''].T ) )
__UpperCAmelCase = nn.Parameter(torch.FloatTensor(attention_weights['''key''']['''kernel'''].T ) )
__UpperCAmelCase = nn.Parameter(torch.FloatTensor(attention_weights['''value''']['''kernel'''].T ) )
__UpperCAmelCase = nn.Parameter(torch.FloatTensor(attention_weights['''out''']['''kernel'''].T ) )
__UpperCAmelCase = ly_weight['''MultiHeadDotProductAttention_0''']
__UpperCAmelCase = nn.Parameter(torch.FloatTensor(attention_weights['''query''']['''kernel'''].T ) )
__UpperCAmelCase = nn.Parameter(torch.FloatTensor(attention_weights['''key''']['''kernel'''].T ) )
__UpperCAmelCase = nn.Parameter(torch.FloatTensor(attention_weights['''value''']['''kernel'''].T ) )
__UpperCAmelCase = nn.Parameter(torch.FloatTensor(attention_weights['''out''']['''kernel'''].T ) )
__UpperCAmelCase = nn.Parameter(
torch.FloatTensor(ly_weight['''pre_cross_attention_layer_norm''']['''scale'''] ) )
__UpperCAmelCase = nn.Parameter(torch.FloatTensor(ly_weight['''pre_mlp_layer_norm''']['''scale'''] ) )
__UpperCAmelCase = nn.Parameter(
torch.FloatTensor(ly_weight['''FiLMLayer_1''']['''DenseGeneral_0''']['''kernel'''].T ) )
__UpperCAmelCase = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_0''']['''kernel'''].T ) )
__UpperCAmelCase = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_1''']['''kernel'''].T ) )
__UpperCAmelCase = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wo''']['''kernel'''].T ) )
__UpperCAmelCase = nn.Parameter(torch.FloatTensor(weights['''decoder_norm''']['''scale'''] ) )
__UpperCAmelCase = nn.Parameter(torch.FloatTensor(weights['''spec_out_dense''']['''kernel'''].T ) )
return model
def lowerCAmelCase ( UpperCamelCase__ : List[Any] ):
"""simple docstring"""
__UpperCAmelCase = checkpoints.load_tax_checkpoint(args.checkpoint_path )
__UpperCAmelCase = jnp.tree_util.tree_map(onp.array , UpperCamelCase__ )
__UpperCAmelCase = [
'''from __gin__ import dynamic_registration''',
'''from music_spectrogram_diffusion.models.diffusion import diffusion_utils''',
'''diffusion_utils.ClassifierFreeGuidanceConfig.eval_condition_weight = 2.0''',
'''diffusion_utils.DiffusionConfig.classifier_free_guidance = @diffusion_utils.ClassifierFreeGuidanceConfig()''',
]
__UpperCAmelCase = os.path.join(args.checkpoint_path , '''..''' , '''config.gin''' )
__UpperCAmelCase = inference.parse_training_gin_file(UpperCamelCase__ , UpperCamelCase__ )
__UpperCAmelCase = inference.InferenceModel(args.checkpoint_path , UpperCamelCase__ )
__UpperCAmelCase = DDPMScheduler(beta_schedule='''squaredcos_cap_v2''' , variance_type='''fixed_large''' )
__UpperCAmelCase = SpectrogramNotesEncoder(
max_length=synth_model.sequence_length['''inputs'''] , vocab_size=synth_model.model.module.config.vocab_size , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj='''gated-gelu''' , )
__UpperCAmelCase = SpectrogramContEncoder(
input_dims=synth_model.audio_codec.n_dims , targets_context_length=synth_model.sequence_length['''targets_context'''] , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj='''gated-gelu''' , )
__UpperCAmelCase = TaFilmDecoder(
input_dims=synth_model.audio_codec.n_dims , targets_length=synth_model.sequence_length['''targets_context'''] , max_decoder_noise_time=synth_model.model.module.config.max_decoder_noise_time , d_model=synth_model.model.module.config.emb_dim , num_layers=synth_model.model.module.config.num_decoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , dropout_rate=synth_model.model.module.config.dropout_rate , )
__UpperCAmelCase = load_notes_encoder(ta_checkpoint['''target''']['''token_encoder'''] , UpperCamelCase__ )
__UpperCAmelCase = load_continuous_encoder(ta_checkpoint['''target''']['''continuous_encoder'''] , UpperCamelCase__ )
__UpperCAmelCase = load_decoder(ta_checkpoint['''target''']['''decoder'''] , UpperCamelCase__ )
__UpperCAmelCase = OnnxRuntimeModel.from_pretrained('''kashif/soundstream_mel_decoder''' )
__UpperCAmelCase = SpectrogramDiffusionPipeline(
notes_encoder=UpperCamelCase__ , continuous_encoder=UpperCamelCase__ , decoder=UpperCamelCase__ , scheduler=UpperCamelCase__ , melgan=UpperCamelCase__ , )
if args.save:
pipe.save_pretrained(args.output_path )
if __name__ == "__main__":
__lowerCAmelCase : List[str] = argparse.ArgumentParser()
parser.add_argument("--output_path", default=None, type=str, required=True, help="Path to the converted model.")
parser.add_argument(
"--save", default=True, type=bool, required=False, help="Whether to save the converted model or not."
)
parser.add_argument(
"--checkpoint_path",
default=F"""{MODEL}/checkpoint_500000""",
type=str,
required=False,
help="Path to the original jax model checkpoint.",
)
__lowerCAmelCase : str = parser.parse_args()
main(args)
| 654 |
'''simple docstring'''
import gc
import unittest
from parameterized import parameterized
from diffusers import FlaxUNetaDConditionModel
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import load_hf_numpy, require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
@slow
@require_flax
class A ( unittest.TestCase ):
def snake_case__ ( self : List[Any] , __a : List[str] , __a : Optional[Any] ) -> List[Any]:
return f"""gaussian_noise_s={seed}_shape={'_'.join([str(__a ) for s in shape] )}.npy"""
def snake_case__ ( self : Dict ) -> Union[str, Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def snake_case__ ( self : Optional[Any] , __a : Tuple=0 , __a : List[Any]=(4, 4, 6_4, 6_4) , __a : Optional[Any]=False ) -> Tuple:
__UpperCAmelCase = jnp.bfloataa if fpaa else jnp.floataa
__UpperCAmelCase = jnp.array(load_hf_numpy(self.get_file_format(__a , __a ) ) , dtype=__a )
return image
def snake_case__ ( self : int , __a : Optional[Any]=False , __a : Optional[Any]="CompVis/stable-diffusion-v1-4" ) -> Any:
__UpperCAmelCase = jnp.bfloataa if fpaa else jnp.floataa
__UpperCAmelCase = '''bf16''' if fpaa else None
__UpperCAmelCase , __UpperCAmelCase = FlaxUNetaDConditionModel.from_pretrained(
__a , subfolder='''unet''' , dtype=__a , revision=__a )
return model, params
def snake_case__ ( self : str , __a : int=0 , __a : Tuple=(4, 7_7, 7_6_8) , __a : Optional[int]=False ) -> Union[str, Any]:
__UpperCAmelCase = jnp.bfloataa if fpaa else jnp.floataa
__UpperCAmelCase = jnp.array(load_hf_numpy(self.get_file_format(__a , __a ) ) , dtype=__a )
return hidden_states
@parameterized.expand(
[
# fmt: off
[8_3, 4, [-0.2_3_2_3, -0.1_3_0_4, 0.0_8_1_3, -0.3_0_9_3, -0.0_9_1_9, -0.1_5_7_1, -0.1_1_2_5, -0.5_8_0_6]],
[1_7, 0.5_5, [-0.0_8_3_1, -0.2_4_4_3, 0.0_9_0_1, -0.0_9_1_9, 0.3_3_9_6, 0.0_1_0_3, -0.3_7_4_3, 0.0_7_0_1]],
[8, 0.8_9, [-0.4_8_6_3, 0.0_8_5_9, 0.0_8_7_5, -0.1_6_5_8, 0.9_1_9_9, -0.0_1_1_4, 0.4_8_3_9, 0.4_6_3_9]],
[3, 1_0_0_0, [-0.5_6_4_9, 0.2_4_0_2, -0.5_5_1_8, 0.1_2_4_8, 1.1_3_2_8, -0.2_4_4_3, -0.0_3_2_5, -1.0_0_7_8]],
# fmt: on
] )
def snake_case__ ( self : Tuple , __a : Tuple , __a : str , __a : Optional[Any] ) -> Any:
__UpperCAmelCase , __UpperCAmelCase = self.get_unet_model(model_id='''CompVis/stable-diffusion-v1-4''' , fpaa=__a )
__UpperCAmelCase = self.get_latents(__a , fpaa=__a )
__UpperCAmelCase = self.get_encoder_hidden_states(__a , fpaa=__a )
__UpperCAmelCase = model.apply(
{'''params''': params} , __a , jnp.array(__a , dtype=jnp.intaa ) , encoder_hidden_states=__a , ).sample
assert sample.shape == latents.shape
__UpperCAmelCase = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
__UpperCAmelCase = jnp.array(__a , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, in the same hardware
assert jnp.allclose(__a , __a , atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[8_3, 4, [0.1_5_1_4, 0.0_8_0_7, 0.1_6_2_4, 0.1_0_1_6, -0.1_8_9_6, 0.0_2_6_3, 0.0_6_7_7, 0.2_3_1_0]],
[1_7, 0.5_5, [0.1_1_6_4, -0.0_2_1_6, 0.0_1_7_0, 0.1_5_8_9, -0.3_1_2_0, 0.1_0_0_5, -0.0_5_8_1, -0.1_4_5_8]],
[8, 0.8_9, [-0.1_7_5_8, -0.0_1_6_9, 0.1_0_0_4, -0.1_4_1_1, 0.1_3_1_2, 0.1_1_0_3, -0.1_9_9_6, 0.2_1_3_9]],
[3, 1_0_0_0, [0.1_2_1_4, 0.0_3_5_2, -0.0_7_3_1, -0.1_5_6_2, -0.0_9_9_4, -0.0_9_0_6, -0.2_3_4_0, -0.0_5_3_9]],
# fmt: on
] )
def snake_case__ ( self : Optional[Any] , __a : Optional[int] , __a : Optional[Any] , __a : Optional[Any] ) -> Union[str, Any]:
__UpperCAmelCase , __UpperCAmelCase = self.get_unet_model(model_id='''stabilityai/stable-diffusion-2''' , fpaa=__a )
__UpperCAmelCase = self.get_latents(__a , shape=(4, 4, 9_6, 9_6) , fpaa=__a )
__UpperCAmelCase = self.get_encoder_hidden_states(__a , shape=(4, 7_7, 1_0_2_4) , fpaa=__a )
__UpperCAmelCase = model.apply(
{'''params''': params} , __a , jnp.array(__a , dtype=jnp.intaa ) , encoder_hidden_states=__a , ).sample
assert sample.shape == latents.shape
__UpperCAmelCase = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
__UpperCAmelCase = jnp.array(__a , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, on the same hardware
assert jnp.allclose(__a , __a , atol=1e-2 )
| 654 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_speech_available, is_torch_available
__lowerCAmelCase : List[Any] = {
"configuration_audio_spectrogram_transformer": [
"AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"ASTConfig",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : str = [
"AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"ASTForAudioClassification",
"ASTModel",
"ASTPreTrainedModel",
]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Union[str, Any] = ["ASTFeatureExtractor"]
if TYPE_CHECKING:
from .configuration_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
ASTConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ASTForAudioClassification,
ASTModel,
ASTPreTrainedModel,
)
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_audio_spectrogram_transformer import ASTFeatureExtractor
else:
import sys
__lowerCAmelCase : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 654 |
'''simple docstring'''
import argparse
import os
import re
import packaging.version
__lowerCAmelCase : Optional[int] = "examples/"
__lowerCAmelCase : Dict = {
"examples": (re.compile(r"^check_min_version\(\"[^\"]+\"\)\s*$", re.MULTILINE), "check_min_version(\"VERSION\")\n"),
"init": (re.compile(r"^__version__\s+=\s+\"([^\"]+)\"\s*$", re.MULTILINE), "__version__ = \"VERSION\"\n"),
"setup": (re.compile(r"^(\s*)version\s*=\s*\"[^\"]+\",", re.MULTILINE), r"\1version=\"VERSION\","),
"doc": (re.compile(r"^(\s*)release\s*=\s*\"[^\"]+\"$", re.MULTILINE), "release = \"VERSION\"\n"),
}
__lowerCAmelCase : List[str] = {
"init": "src/transformers/__init__.py",
"setup": "setup.py",
}
__lowerCAmelCase : int = "README.md"
def lowerCAmelCase ( UpperCamelCase__ : List[str] , UpperCamelCase__ : int , UpperCamelCase__ : Tuple ):
"""simple docstring"""
with open(UpperCamelCase__ , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
__UpperCAmelCase = f.read()
__UpperCAmelCase , __UpperCAmelCase = REPLACE_PATTERNS[pattern]
__UpperCAmelCase = replace.replace('''VERSION''' , UpperCamelCase__ )
__UpperCAmelCase = re_pattern.sub(UpperCamelCase__ , UpperCamelCase__ )
with open(UpperCamelCase__ , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.write(UpperCamelCase__ )
def lowerCAmelCase ( UpperCamelCase__ : Optional[int] ):
"""simple docstring"""
for folder, directories, fnames in os.walk(UpperCamelCase__ ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove('''research_projects''' )
if "legacy" in directories:
directories.remove('''legacy''' )
for fname in fnames:
if fname.endswith('''.py''' ):
update_version_in_file(os.path.join(UpperCamelCase__ , UpperCamelCase__ ) , UpperCamelCase__ , pattern='''examples''' )
def lowerCAmelCase ( UpperCamelCase__ : Any , UpperCamelCase__ : Any=False ):
"""simple docstring"""
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
if not patch:
update_version_in_examples(UpperCamelCase__ )
def lowerCAmelCase ( ):
"""simple docstring"""
__UpperCAmelCase = '''🤗 Transformers currently provides the following architectures'''
__UpperCAmelCase = '''1. Want to contribute a new model?'''
with open(UpperCamelCase__ , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
__UpperCAmelCase = f.readlines()
# Find the start of the list.
__UpperCAmelCase = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
__UpperCAmelCase = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith('''1.''' ):
__UpperCAmelCase = lines[index].replace(
'''https://huggingface.co/docs/transformers/main/model_doc''' , '''https://huggingface.co/docs/transformers/model_doc''' , )
index += 1
with open(UpperCamelCase__ , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.writelines(UpperCamelCase__ )
def lowerCAmelCase ( ):
"""simple docstring"""
with open(REPLACE_FILES['''init'''] , '''r''' ) as f:
__UpperCAmelCase = f.read()
__UpperCAmelCase = REPLACE_PATTERNS['''init'''][0].search(UpperCamelCase__ ).groups()[0]
return packaging.version.parse(UpperCamelCase__ )
def lowerCAmelCase ( UpperCamelCase__ : Any=False ):
"""simple docstring"""
__UpperCAmelCase = get_version()
if patch and default_version.is_devrelease:
raise ValueError('''Can\'t create a patch version from the dev branch, checkout a released version!''' )
if default_version.is_devrelease:
__UpperCAmelCase = default_version.base_version
elif patch:
__UpperCAmelCase = f"""{default_version.major}.{default_version.minor}.{default_version.micro + 1}"""
else:
__UpperCAmelCase = f"""{default_version.major}.{default_version.minor + 1}.0"""
# Now let's ask nicely if that's the right one.
__UpperCAmelCase = input(f"""Which version are you releasing? [{default_version}]""" )
if len(UpperCamelCase__ ) == 0:
__UpperCAmelCase = default_version
print(f"""Updating version to {version}.""" )
global_version_update(UpperCamelCase__ , patch=UpperCamelCase__ )
if not patch:
print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' )
clean_main_ref_in_model_list()
def lowerCAmelCase ( ):
"""simple docstring"""
__UpperCAmelCase = get_version()
__UpperCAmelCase = f"""{current_version.major}.{current_version.minor + 1}.0.dev0"""
__UpperCAmelCase = current_version.base_version
# Check with the user we got that right.
__UpperCAmelCase = input(f"""Which version are we developing now? [{dev_version}]""" )
if len(UpperCamelCase__ ) == 0:
__UpperCAmelCase = dev_version
print(f"""Updating version to {version}.""" )
global_version_update(UpperCamelCase__ )
print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' )
clean_main_ref_in_model_list()
if __name__ == "__main__":
__lowerCAmelCase : Tuple = argparse.ArgumentParser()
parser.add_argument("--post_release", action="store_true", help="Whether this is pre or post release.")
parser.add_argument("--patch", action="store_true", help="Whether or not this is a patch release.")
__lowerCAmelCase : Tuple = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print("Nothing to do after a patch :-)")
else:
post_release_work()
| 654 | 1 |
'''simple docstring'''
import argparse
from collections import defaultdict
import yaml
__lowerCAmelCase : int = "docs/source/en/_toctree.yml"
def lowerCAmelCase ( UpperCamelCase__ : List[str] ):
"""simple docstring"""
__UpperCAmelCase = defaultdict(UpperCamelCase__ )
for doc in model_doc:
counts[doc["local"]] += 1
__UpperCAmelCase = [key for key, value in counts.items() if value > 1]
__UpperCAmelCase = []
for duplicate_key in duplicates:
__UpperCAmelCase = list({doc['''title'''] for doc in model_doc if doc['''local'''] == duplicate_key} )
if len(UpperCamelCase__ ) > 1:
raise ValueError(
f"""{duplicate_key} is present several times in the documentation table of content at """
'''`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the '''
'''others.''' )
# Only add this once
new_doc.append({'''local''': duplicate_key, '''title''': titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in model_doc if counts[doc['''local''']] == 1] )
# Sort
return sorted(UpperCamelCase__ , key=lambda UpperCamelCase__ : s["title"].lower() )
def lowerCAmelCase ( UpperCamelCase__ : Any=False ):
"""simple docstring"""
with open(UpperCamelCase__ , encoding='''utf-8''' ) as f:
__UpperCAmelCase = yaml.safe_load(f.read() )
# Get to the API doc
__UpperCAmelCase = 0
while content[api_idx]["title"] != "API":
api_idx += 1
__UpperCAmelCase = content[api_idx]['''sections''']
# Then to the model doc
__UpperCAmelCase = 0
while api_doc[model_idx]["title"] != "Models":
model_idx += 1
__UpperCAmelCase = api_doc[model_idx]['''sections''']
__UpperCAmelCase = [(idx, section) for idx, section in enumerate(UpperCamelCase__ ) if '''sections''' in section]
__UpperCAmelCase = False
for idx, modality_doc in modalities_docs:
__UpperCAmelCase = modality_doc['''sections''']
__UpperCAmelCase = clean_model_doc_toc(UpperCamelCase__ )
if old_modality_doc != new_modality_doc:
__UpperCAmelCase = True
if overwrite:
__UpperCAmelCase = new_modality_doc
if diff:
if overwrite:
__UpperCAmelCase = model_doc
__UpperCAmelCase = api_doc
with open(UpperCamelCase__ , '''w''' , encoding='''utf-8''' ) as f:
f.write(yaml.dump(UpperCamelCase__ , allow_unicode=UpperCamelCase__ ) )
else:
raise ValueError(
'''The model doc part of the table of content is not properly sorted, run `make style` to fix this.''' )
if __name__ == "__main__":
__lowerCAmelCase : str = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
__lowerCAmelCase : Dict = parser.parse_args()
check_model_doc(args.fix_and_overwrite)
| 654 |
'''simple docstring'''
def lowerCAmelCase ( UpperCamelCase__ : Tuple ):
"""simple docstring"""
# if the collection is empty, returns empty
if collection == []:
return []
# get some information about the collection
__UpperCAmelCase = len(UpperCamelCase__ )
__UpperCAmelCase = max(UpperCamelCase__ )
__UpperCAmelCase = min(UpperCamelCase__ )
# create the counting array
__UpperCAmelCase = coll_max + 1 - coll_min
__UpperCAmelCase = [0] * counting_arr_length
# count how much a number appears in the collection
for number in collection:
counting_arr[number - coll_min] += 1
# sum each position with it's predecessors. now, counting_arr[i] tells
# us how many elements <= i has in the collection
for i in range(1 , UpperCamelCase__ ):
__UpperCAmelCase = counting_arr[i] + counting_arr[i - 1]
# create the output collection
__UpperCAmelCase = [0] * coll_len
# place the elements in the output, respecting the original order (stable
# sort) from end to begin, updating counting_arr
for i in reversed(range(0 , UpperCamelCase__ ) ):
__UpperCAmelCase = collection[i]
counting_arr[collection[i] - coll_min] -= 1
return ordered
def lowerCAmelCase ( UpperCamelCase__ : Any ):
"""simple docstring"""
return "".join([chr(UpperCamelCase__ ) for i in counting_sort([ord(UpperCamelCase__ ) for c in string] )] )
if __name__ == "__main__":
# Test string sort
assert counting_sort_string("thisisthestring") == "eghhiiinrsssttt"
__lowerCAmelCase : str = input("Enter numbers separated by a comma:\n").strip()
__lowerCAmelCase : List[Any] = [int(item) for item in user_input.split(",")]
print(counting_sort(unsorted))
| 654 | 1 |
'''simple docstring'''
import argparse
import csv
import logging
import os
import random
import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from tqdm import tqdm, trange
from transformers import (
CONFIG_NAME,
WEIGHTS_NAME,
AdamW,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTTokenizer,
get_linear_schedule_with_warmup,
)
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO
)
__lowerCAmelCase : int = logging.getLogger(__name__)
def lowerCAmelCase ( UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[int] ):
"""simple docstring"""
__UpperCAmelCase = np.argmax(UpperCamelCase__ , axis=1 )
return np.sum(outputs == labels )
def lowerCAmelCase ( UpperCamelCase__ : List[str] ):
"""simple docstring"""
with open(UpperCamelCase__ , encoding='''utf_8''' ) as f:
__UpperCAmelCase = csv.reader(UpperCamelCase__ )
__UpperCAmelCase = []
next(UpperCamelCase__ ) # skip the first line
for line in tqdm(UpperCamelCase__ ):
output.append((''' '''.join(line[1:5] ), line[5], line[6], int(line[-1] ) - 1) )
return output
def lowerCAmelCase ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : Dict , UpperCamelCase__ : Any , UpperCamelCase__ : Any , UpperCamelCase__ : Optional[int] ):
"""simple docstring"""
__UpperCAmelCase = []
for dataset in encoded_datasets:
__UpperCAmelCase = len(UpperCamelCase__ )
__UpperCAmelCase = np.zeros((n_batch, 2, input_len) , dtype=np.intaa )
__UpperCAmelCase = np.zeros((n_batch, 2) , dtype=np.intaa )
__UpperCAmelCase = np.full((n_batch, 2, input_len) , fill_value=-1_0_0 , dtype=np.intaa )
__UpperCAmelCase = np.zeros((n_batch,) , dtype=np.intaa )
for (
i,
(story, conta, conta, mc_label),
) in enumerate(UpperCamelCase__ ):
__UpperCAmelCase = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
__UpperCAmelCase = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
__UpperCAmelCase = with_conta
__UpperCAmelCase = with_conta
__UpperCAmelCase = len(UpperCamelCase__ ) - 1
__UpperCAmelCase = len(UpperCamelCase__ ) - 1
__UpperCAmelCase = with_conta
__UpperCAmelCase = with_conta
__UpperCAmelCase = mc_label
__UpperCAmelCase = (input_ids, mc_token_ids, lm_labels, mc_labels)
tensor_datasets.append(tuple(torch.tensor(UpperCamelCase__ ) for t in all_inputs ) )
return tensor_datasets
def lowerCAmelCase ( ):
"""simple docstring"""
__UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument('''--model_name''' , type=UpperCamelCase__ , default='''openai-gpt''' , help='''pretrained model name''' )
parser.add_argument('''--do_train''' , action='''store_true''' , help='''Whether to run training.''' )
parser.add_argument('''--do_eval''' , action='''store_true''' , help='''Whether to run eval on the dev set.''' )
parser.add_argument(
'''--output_dir''' , default=UpperCamelCase__ , type=UpperCamelCase__ , required=UpperCamelCase__ , help='''The output directory where the model predictions and checkpoints will be written.''' , )
parser.add_argument('''--train_dataset''' , type=UpperCamelCase__ , default='''''' )
parser.add_argument('''--eval_dataset''' , type=UpperCamelCase__ , default='''''' )
parser.add_argument('''--seed''' , type=UpperCamelCase__ , default=4_2 )
parser.add_argument('''--num_train_epochs''' , type=UpperCamelCase__ , default=3 )
parser.add_argument('''--train_batch_size''' , type=UpperCamelCase__ , default=8 )
parser.add_argument('''--eval_batch_size''' , type=UpperCamelCase__ , default=1_6 )
parser.add_argument('''--adam_epsilon''' , default=1E-8 , type=UpperCamelCase__ , help='''Epsilon for Adam optimizer.''' )
parser.add_argument('''--max_grad_norm''' , type=UpperCamelCase__ , default=1 )
parser.add_argument(
'''--max_steps''' , default=-1 , type=UpperCamelCase__ , help=(
'''If > 0: set total number of training steps to perform. Override num_train_epochs.'''
) , )
parser.add_argument(
'''--gradient_accumulation_steps''' , type=UpperCamelCase__ , default=1 , help='''Number of updates steps to accumulate before performing a backward/update pass.''' , )
parser.add_argument('''--learning_rate''' , type=UpperCamelCase__ , default=6.2_5E-5 )
parser.add_argument('''--warmup_steps''' , default=0 , type=UpperCamelCase__ , help='''Linear warmup over warmup_steps.''' )
parser.add_argument('''--lr_schedule''' , type=UpperCamelCase__ , default='''warmup_linear''' )
parser.add_argument('''--weight_decay''' , type=UpperCamelCase__ , default=0.01 )
parser.add_argument('''--lm_coef''' , type=UpperCamelCase__ , default=0.9 )
parser.add_argument('''--n_valid''' , type=UpperCamelCase__ , default=3_7_4 )
parser.add_argument('''--server_ip''' , type=UpperCamelCase__ , default='''''' , help='''Can be used for distant debugging.''' )
parser.add_argument('''--server_port''' , type=UpperCamelCase__ , default='''''' , help='''Can be used for distant debugging.''' )
__UpperCAmelCase = parser.parse_args()
print(UpperCamelCase__ )
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('''Waiting for debugger attach''' )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=UpperCamelCase__ )
ptvsd.wait_for_attach()
random.seed(args.seed )
np.random.seed(args.seed )
torch.manual_seed(args.seed )
torch.cuda.manual_seed_all(args.seed )
__UpperCAmelCase = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' )
__UpperCAmelCase = torch.cuda.device_count()
logger.info('''device: {}, n_gpu {}'''.format(UpperCamelCase__ , UpperCamelCase__ ) )
if not args.do_train and not args.do_eval:
raise ValueError('''At least one of `do_train` or `do_eval` must be True.''' )
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
# Load tokenizer and model
# This loading functions also add new tokens and embeddings called `special tokens`
# These new embeddings will be fine-tuned on the RocStories dataset
__UpperCAmelCase = ['''_start_''', '''_delimiter_''', '''_classify_''']
__UpperCAmelCase = OpenAIGPTTokenizer.from_pretrained(args.model_name )
tokenizer.add_tokens(UpperCamelCase__ )
__UpperCAmelCase = tokenizer.convert_tokens_to_ids(UpperCamelCase__ )
__UpperCAmelCase = OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name )
model.resize_token_embeddings(len(UpperCamelCase__ ) )
model.to(UpperCamelCase__ )
# Load and encode the datasets
def tokenize_and_encode(UpperCamelCase__ : Union[str, Any] ):
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(UpperCamelCase__ ) )
elif isinstance(UpperCamelCase__ , UpperCamelCase__ ):
return obj
return [tokenize_and_encode(UpperCamelCase__ ) for o in obj]
logger.info('''Encoding dataset...''' )
__UpperCAmelCase = load_rocstories_dataset(args.train_dataset )
__UpperCAmelCase = load_rocstories_dataset(args.eval_dataset )
__UpperCAmelCase = (train_dataset, eval_dataset)
__UpperCAmelCase = tokenize_and_encode(UpperCamelCase__ )
# Compute the max input length for the Transformer
__UpperCAmelCase = model.config.n_positions // 2 - 2
__UpperCAmelCase = max(
len(story[:max_length] ) + max(len(conta[:max_length] ) , len(conta[:max_length] ) ) + 3
for dataset in encoded_datasets
for story, conta, conta, _ in dataset )
__UpperCAmelCase = min(UpperCamelCase__ , model.config.n_positions ) # Max size of input for the pre-trained model
# Prepare inputs tensors and dataloaders
__UpperCAmelCase = pre_process_datasets(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , *UpperCamelCase__ )
__UpperCAmelCase , __UpperCAmelCase = tensor_datasets[0], tensor_datasets[1]
__UpperCAmelCase = TensorDataset(*UpperCamelCase__ )
__UpperCAmelCase = RandomSampler(UpperCamelCase__ )
__UpperCAmelCase = DataLoader(UpperCamelCase__ , sampler=UpperCamelCase__ , batch_size=args.train_batch_size )
__UpperCAmelCase = TensorDataset(*UpperCamelCase__ )
__UpperCAmelCase = SequentialSampler(UpperCamelCase__ )
__UpperCAmelCase = DataLoader(UpperCamelCase__ , sampler=UpperCamelCase__ , batch_size=args.eval_batch_size )
# Prepare optimizer
if args.do_train:
if args.max_steps > 0:
__UpperCAmelCase = args.max_steps
__UpperCAmelCase = args.max_steps // (len(UpperCamelCase__ ) // args.gradient_accumulation_steps) + 1
else:
__UpperCAmelCase = len(UpperCamelCase__ ) // args.gradient_accumulation_steps * args.num_train_epochs
__UpperCAmelCase = list(model.named_parameters() )
__UpperCAmelCase = ['''bias''', '''LayerNorm.bias''', '''LayerNorm.weight''']
__UpperCAmelCase = [
{
'''params''': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay )],
'''weight_decay''': args.weight_decay,
},
{'''params''': [p for n, p in param_optimizer if any(nd in n for nd in no_decay )], '''weight_decay''': 0.0},
]
__UpperCAmelCase = AdamW(UpperCamelCase__ , lr=args.learning_rate , eps=args.adam_epsilon )
__UpperCAmelCase = get_linear_schedule_with_warmup(
UpperCamelCase__ , num_warmup_steps=args.warmup_steps , num_training_steps=UpperCamelCase__ )
if args.do_train:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = 0, 0, None
model.train()
for _ in trange(int(args.num_train_epochs ) , desc='''Epoch''' ):
__UpperCAmelCase = 0
__UpperCAmelCase = 0
__UpperCAmelCase = tqdm(UpperCamelCase__ , desc='''Training''' )
for step, batch in enumerate(UpperCamelCase__ ):
__UpperCAmelCase = tuple(t.to(UpperCamelCase__ ) for t in batch )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = batch
__UpperCAmelCase = model(UpperCamelCase__ , mc_token_ids=UpperCamelCase__ , lm_labels=UpperCamelCase__ , mc_labels=UpperCamelCase__ )
__UpperCAmelCase = args.lm_coef * losses[0] + losses[1]
loss.backward()
optimizer.step()
scheduler.step()
optimizer.zero_grad()
tr_loss += loss.item()
__UpperCAmelCase = (
loss.item() if exp_average_loss is None else 0.7 * exp_average_loss + 0.3 * loss.item()
)
nb_tr_steps += 1
__UpperCAmelCase = '''Training loss: {:.2e} lr: {:.2e}'''.format(UpperCamelCase__ , scheduler.get_lr()[0] )
# Save a trained model
if args.do_train:
# Save a trained model, configuration and tokenizer
__UpperCAmelCase = model.module if hasattr(UpperCamelCase__ , '''module''' ) else model # Only save the model itself
# If we save using the predefined names, we can load using `from_pretrained`
__UpperCAmelCase = os.path.join(args.output_dir , UpperCamelCase__ )
__UpperCAmelCase = os.path.join(args.output_dir , UpperCamelCase__ )
torch.save(model_to_save.state_dict() , UpperCamelCase__ )
model_to_save.config.to_json_file(UpperCamelCase__ )
tokenizer.save_vocabulary(args.output_dir )
# Load a trained model and vocabulary that you have fine-tuned
__UpperCAmelCase = OpenAIGPTDoubleHeadsModel.from_pretrained(args.output_dir )
__UpperCAmelCase = OpenAIGPTTokenizer.from_pretrained(args.output_dir )
model.to(UpperCamelCase__ )
if args.do_eval:
model.eval()
__UpperCAmelCase , __UpperCAmelCase = 0, 0
__UpperCAmelCase , __UpperCAmelCase = 0, 0
for batch in tqdm(UpperCamelCase__ , desc='''Evaluating''' ):
__UpperCAmelCase = tuple(t.to(UpperCamelCase__ ) for t in batch )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = batch
with torch.no_grad():
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = model(
UpperCamelCase__ , mc_token_ids=UpperCamelCase__ , lm_labels=UpperCamelCase__ , mc_labels=UpperCamelCase__ )
__UpperCAmelCase = mc_logits.detach().cpu().numpy()
__UpperCAmelCase = mc_labels.to('''cpu''' ).numpy()
__UpperCAmelCase = accuracy(UpperCamelCase__ , UpperCamelCase__ )
eval_loss += mc_loss.mean().item()
eval_accuracy += tmp_eval_accuracy
nb_eval_examples += input_ids.size(0 )
nb_eval_steps += 1
__UpperCAmelCase = eval_loss / nb_eval_steps
__UpperCAmelCase = eval_accuracy / nb_eval_examples
__UpperCAmelCase = tr_loss / nb_tr_steps if args.do_train else None
__UpperCAmelCase = {'''eval_loss''': eval_loss, '''eval_accuracy''': eval_accuracy, '''train_loss''': train_loss}
__UpperCAmelCase = os.path.join(args.output_dir , '''eval_results.txt''' )
with open(UpperCamelCase__ , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key in sorted(result.keys() ):
logger.info(''' %s = %s''' , UpperCamelCase__ , str(result[key] ) )
writer.write('''%s = %s\n''' % (key, str(result[key] )) )
if __name__ == "__main__":
main()
| 654 |
'''simple docstring'''
import requests
from bsa import BeautifulSoup
def lowerCAmelCase ( UpperCamelCase__ : str = "AAPL" ):
"""simple docstring"""
__UpperCAmelCase = f"""https://in.finance.yahoo.com/quote/{symbol}?s={symbol}"""
__UpperCAmelCase = BeautifulSoup(requests.get(UpperCamelCase__ ).text , '''html.parser''' )
__UpperCAmelCase = '''My(6px) Pos(r) smartphone_Mt(6px)'''
return soup.find('''div''' , class_=class_ ).find('''span''' ).text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(F"""Current {symbol:<4} stock price is {stock_price(symbol):>8}""")
| 654 | 1 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import DistilBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.distilbert.modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertModel,
)
class A :
def __init__( self : List[Any] , __a : Any , ) -> Dict:
__UpperCAmelCase = parent
__UpperCAmelCase = 1_3
__UpperCAmelCase = 7
__UpperCAmelCase = True
__UpperCAmelCase = True
__UpperCAmelCase = False
__UpperCAmelCase = True
__UpperCAmelCase = 9_9
__UpperCAmelCase = 3_2
__UpperCAmelCase = 2
__UpperCAmelCase = 4
__UpperCAmelCase = 3_7
__UpperCAmelCase = '''gelu'''
__UpperCAmelCase = 0.1
__UpperCAmelCase = 0.1
__UpperCAmelCase = 5_1_2
__UpperCAmelCase = 1_6
__UpperCAmelCase = 2
__UpperCAmelCase = 0.0_2
__UpperCAmelCase = 3
__UpperCAmelCase = 4
__UpperCAmelCase = None
def snake_case__ ( self : Optional[int] ) -> Dict:
__UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCAmelCase = None
if self.use_input_mask:
__UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCAmelCase = None
__UpperCAmelCase = None
__UpperCAmelCase = None
if self.use_labels:
__UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
__UpperCAmelCase = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case__ ( self : Union[str, Any] , __a : List[str] , __a : int , __a : Union[str, Any] , __a : Union[str, Any] , __a : List[Any] , __a : int ) -> Any:
__UpperCAmelCase = TFDistilBertModel(config=__a )
__UpperCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
__UpperCAmelCase = model(__a )
__UpperCAmelCase = [input_ids, input_mask]
__UpperCAmelCase = model(__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case__ ( self : Tuple , __a : List[Any] , __a : int , __a : Tuple , __a : List[Any] , __a : Union[str, Any] , __a : List[Any] ) -> int:
__UpperCAmelCase = TFDistilBertForMaskedLM(config=__a )
__UpperCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
__UpperCAmelCase = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case__ ( self : Optional[int] , __a : Any , __a : Union[str, Any] , __a : Optional[int] , __a : int , __a : Optional[Any] , __a : Optional[int] ) -> Dict:
__UpperCAmelCase = TFDistilBertForQuestionAnswering(config=__a )
__UpperCAmelCase = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
}
__UpperCAmelCase = model(__a )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def snake_case__ ( self : Any , __a : Optional[Any] , __a : List[str] , __a : Dict , __a : Dict , __a : int , __a : List[Any] ) -> Dict:
__UpperCAmelCase = self.num_labels
__UpperCAmelCase = TFDistilBertForSequenceClassification(__a )
__UpperCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
__UpperCAmelCase = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case__ ( self : Union[str, Any] , __a : int , __a : str , __a : Union[str, Any] , __a : Optional[int] , __a : List[str] , __a : Dict ) -> str:
__UpperCAmelCase = self.num_choices
__UpperCAmelCase = TFDistilBertForMultipleChoice(__a )
__UpperCAmelCase = tf.tile(tf.expand_dims(__a , 1 ) , (1, self.num_choices, 1) )
__UpperCAmelCase = tf.tile(tf.expand_dims(__a , 1 ) , (1, self.num_choices, 1) )
__UpperCAmelCase = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
}
__UpperCAmelCase = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def snake_case__ ( self : int , __a : Optional[Any] , __a : int , __a : Tuple , __a : int , __a : Optional[int] , __a : Optional[int] ) -> int:
__UpperCAmelCase = self.num_labels
__UpperCAmelCase = TFDistilBertForTokenClassification(__a )
__UpperCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
__UpperCAmelCase = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def snake_case__ ( self : str ) -> Any:
__UpperCAmelCase = self.prepare_config_and_inputs()
((__UpperCAmelCase) , (__UpperCAmelCase) , (__UpperCAmelCase) , (__UpperCAmelCase) , (__UpperCAmelCase) , (__UpperCAmelCase)) = config_and_inputs
__UpperCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class A ( UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
a_ = (
(
TFDistilBertModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertForMultipleChoice,
)
if is_tf_available()
else None
)
a_ = (
{
'''feature-extraction''': TFDistilBertModel,
'''fill-mask''': TFDistilBertForMaskedLM,
'''question-answering''': TFDistilBertForQuestionAnswering,
'''text-classification''': TFDistilBertForSequenceClassification,
'''token-classification''': TFDistilBertForTokenClassification,
'''zero-shot''': TFDistilBertForSequenceClassification,
}
if is_tf_available()
else {}
)
a_ = False
a_ = False
def snake_case__ ( self : Any ) -> Any:
__UpperCAmelCase = TFDistilBertModelTester(self )
__UpperCAmelCase = ConfigTester(self , config_class=__a , dim=3_7 )
def snake_case__ ( self : List[Any] ) -> Optional[int]:
self.config_tester.run_common_tests()
def snake_case__ ( self : Any ) -> str:
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*__a )
def snake_case__ ( self : Tuple ) -> Dict:
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*__a )
def snake_case__ ( self : Union[str, Any] ) -> Any:
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*__a )
def snake_case__ ( self : Optional[Any] ) -> Dict:
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*__a )
def snake_case__ ( self : Any ) -> int:
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*__a )
def snake_case__ ( self : List[str] ) -> List[Any]:
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*__a )
@slow
def snake_case__ ( self : Dict ) -> Tuple:
for model_name in list(TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1] ):
__UpperCAmelCase = TFDistilBertModel.from_pretrained(__a )
self.assertIsNotNone(__a )
@require_tf
class A ( unittest.TestCase ):
@slow
def snake_case__ ( self : int ) -> Dict:
__UpperCAmelCase = TFDistilBertModel.from_pretrained('''distilbert-base-uncased''' )
__UpperCAmelCase = tf.constant([[0, 1, 2, 3, 4, 5]] )
__UpperCAmelCase = model(__a )[0]
__UpperCAmelCase = [1, 6, 7_6_8]
self.assertEqual(output.shape , __a )
__UpperCAmelCase = tf.constant(
[
[
[0.1_9_2_6_1_8_8_5, -0.1_3_7_3_2_9_5_5, 0.4_1_1_9_7_9_9],
[0.2_2_1_5_0_1_5_6, -0.0_7_4_2_2_6_6_1, 0.3_9_0_3_7_2_0_4],
[0.2_2_7_5_6_0_1_8, -0.0_8_9_6_4_1_4, 0.3_7_0_1_4_6_7],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , __a , atol=1e-4 )
| 654 |
'''simple docstring'''
from __future__ import annotations
from statistics import mean
def lowerCAmelCase ( UpperCamelCase__ : list[int] , UpperCamelCase__ : list[int] , UpperCamelCase__ : int ):
"""simple docstring"""
__UpperCAmelCase = [0] * no_of_processes
__UpperCAmelCase = [0] * no_of_processes
# Initialize remaining_time to waiting_time.
for i in range(UpperCamelCase__ ):
__UpperCAmelCase = burst_time[i]
__UpperCAmelCase = []
__UpperCAmelCase = 0
__UpperCAmelCase = 0
# When processes are not completed,
# A process whose arrival time has passed \
# and has remaining execution time is put into the ready_process.
# The shortest process in the ready_process, target_process is executed.
while completed != no_of_processes:
__UpperCAmelCase = []
__UpperCAmelCase = -1
for i in range(UpperCamelCase__ ):
if (arrival_time[i] <= total_time) and (remaining_time[i] > 0):
ready_process.append(UpperCamelCase__ )
if len(UpperCamelCase__ ) > 0:
__UpperCAmelCase = ready_process[0]
for i in ready_process:
if remaining_time[i] < remaining_time[target_process]:
__UpperCAmelCase = i
total_time += burst_time[target_process]
completed += 1
__UpperCAmelCase = 0
__UpperCAmelCase = (
total_time - arrival_time[target_process] - burst_time[target_process]
)
else:
total_time += 1
return waiting_time
def lowerCAmelCase ( UpperCamelCase__ : list[int] , UpperCamelCase__ : int , UpperCamelCase__ : list[int] ):
"""simple docstring"""
__UpperCAmelCase = [0] * no_of_processes
for i in range(UpperCamelCase__ ):
__UpperCAmelCase = burst_time[i] + waiting_time[i]
return turn_around_time
if __name__ == "__main__":
print("[TEST CASE 01]")
__lowerCAmelCase : List[Any] = 4
__lowerCAmelCase : List[Any] = [2, 5, 3, 7]
__lowerCAmelCase : Tuple = [0, 0, 0, 0]
__lowerCAmelCase : Optional[int] = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
__lowerCAmelCase : Dict = calculate_turnaroundtime(
burst_time, no_of_processes, waiting_time
)
# Printing the Result
print("PID\tBurst Time\tArrival Time\tWaiting Time\tTurnaround Time")
for i, process_id in enumerate(list(range(1, 5))):
print(
F"""{process_id}\t{burst_time[i]}\t\t\t{arrival_time[i]}\t\t\t\t"""
F"""{waiting_time[i]}\t\t\t\t{turn_around_time[i]}"""
)
print(F"""\nAverage waiting time = {mean(waiting_time):.5f}""")
print(F"""Average turnaround time = {mean(turn_around_time):.5f}""")
| 654 | 1 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__)
def lowerCAmelCase ( UpperCamelCase__ : Tuple , UpperCamelCase__ : Union[str, Any]=False ):
"""simple docstring"""
__UpperCAmelCase = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"""blocks.{i}.norm1.weight""", f"""deit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((f"""blocks.{i}.norm1.bias""", f"""deit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append((f"""blocks.{i}.attn.proj.weight""", f"""deit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((f"""blocks.{i}.attn.proj.bias""", f"""deit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((f"""blocks.{i}.norm2.weight""", f"""deit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((f"""blocks.{i}.norm2.bias""", f"""deit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((f"""blocks.{i}.mlp.fc1.weight""", f"""deit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((f"""blocks.{i}.mlp.fc1.bias""", f"""deit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((f"""blocks.{i}.mlp.fc2.weight""", f"""deit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((f"""blocks.{i}.mlp.fc2.bias""", f"""deit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
('''cls_token''', '''deit.embeddings.cls_token'''),
('''dist_token''', '''deit.embeddings.distillation_token'''),
('''patch_embed.proj.weight''', '''deit.embeddings.patch_embeddings.projection.weight'''),
('''patch_embed.proj.bias''', '''deit.embeddings.patch_embeddings.projection.bias'''),
('''pos_embed''', '''deit.embeddings.position_embeddings'''),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('''norm.weight''', '''layernorm.weight'''),
('''norm.bias''', '''layernorm.bias'''),
('''pre_logits.fc.weight''', '''pooler.dense.weight'''),
('''pre_logits.fc.bias''', '''pooler.dense.bias'''),
] )
# if just the base model, we should remove "deit" from all keys that start with "deit"
__UpperCAmelCase = [(pair[0], pair[1][4:]) if pair[1].startswith('''deit''' ) else pair for pair in rename_keys]
else:
# layernorm + classification heads
rename_keys.extend(
[
('''norm.weight''', '''deit.layernorm.weight'''),
('''norm.bias''', '''deit.layernorm.bias'''),
('''head.weight''', '''cls_classifier.weight'''),
('''head.bias''', '''cls_classifier.bias'''),
('''head_dist.weight''', '''distillation_classifier.weight'''),
('''head_dist.bias''', '''distillation_classifier.bias'''),
] )
return rename_keys
def lowerCAmelCase ( UpperCamelCase__ : Any , UpperCamelCase__ : Tuple , UpperCamelCase__ : str=False ):
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
__UpperCAmelCase = ''''''
else:
__UpperCAmelCase = '''deit.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
__UpperCAmelCase = state_dict.pop(f"""blocks.{i}.attn.qkv.weight""" )
__UpperCAmelCase = state_dict.pop(f"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
__UpperCAmelCase = in_proj_weight[
: config.hidden_size, :
]
__UpperCAmelCase = in_proj_bias[: config.hidden_size]
__UpperCAmelCase = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__UpperCAmelCase = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
__UpperCAmelCase = in_proj_weight[
-config.hidden_size :, :
]
__UpperCAmelCase = in_proj_bias[-config.hidden_size :]
def lowerCAmelCase ( UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Tuple , UpperCamelCase__ : List[str] ):
"""simple docstring"""
__UpperCAmelCase = dct.pop(UpperCamelCase__ )
__UpperCAmelCase = val
def lowerCAmelCase ( ):
"""simple docstring"""
__UpperCAmelCase = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
__UpperCAmelCase = Image.open(requests.get(UpperCamelCase__ , stream=UpperCamelCase__ ).raw )
return im
@torch.no_grad()
def lowerCAmelCase ( UpperCamelCase__ : Dict , UpperCamelCase__ : Tuple ):
"""simple docstring"""
__UpperCAmelCase = DeiTConfig()
# all deit models have fine-tuned heads
__UpperCAmelCase = False
# dataset (fine-tuned on ImageNet 2012), patch_size and image_size
__UpperCAmelCase = 1_0_0_0
__UpperCAmelCase = '''huggingface/label-files'''
__UpperCAmelCase = '''imagenet-1k-id2label.json'''
__UpperCAmelCase = json.load(open(hf_hub_download(UpperCamelCase__ , UpperCamelCase__ , repo_type='''dataset''' ) , '''r''' ) )
__UpperCAmelCase = {int(UpperCamelCase__ ): v for k, v in idalabel.items()}
__UpperCAmelCase = idalabel
__UpperCAmelCase = {v: k for k, v in idalabel.items()}
__UpperCAmelCase = int(deit_name[-6:-4] )
__UpperCAmelCase = int(deit_name[-3:] )
# size of the architecture
if deit_name[9:].startswith('''tiny''' ):
__UpperCAmelCase = 1_9_2
__UpperCAmelCase = 7_6_8
__UpperCAmelCase = 1_2
__UpperCAmelCase = 3
elif deit_name[9:].startswith('''small''' ):
__UpperCAmelCase = 3_8_4
__UpperCAmelCase = 1_5_3_6
__UpperCAmelCase = 1_2
__UpperCAmelCase = 6
if deit_name[9:].startswith('''base''' ):
pass
elif deit_name[4:].startswith('''large''' ):
__UpperCAmelCase = 1_0_2_4
__UpperCAmelCase = 4_0_9_6
__UpperCAmelCase = 2_4
__UpperCAmelCase = 1_6
# load original model from timm
__UpperCAmelCase = timm.create_model(UpperCamelCase__ , pretrained=UpperCamelCase__ )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
__UpperCAmelCase = timm_model.state_dict()
__UpperCAmelCase = create_rename_keys(UpperCamelCase__ , UpperCamelCase__ )
for src, dest in rename_keys:
rename_key(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
read_in_q_k_v(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# load HuggingFace model
__UpperCAmelCase = DeiTForImageClassificationWithTeacher(UpperCamelCase__ ).eval()
model.load_state_dict(UpperCamelCase__ )
# Check outputs on an image, prepared by DeiTImageProcessor
__UpperCAmelCase = int(
(2_5_6 / 2_2_4) * config.image_size ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103
__UpperCAmelCase = DeiTImageProcessor(size=UpperCamelCase__ , crop_size=config.image_size )
__UpperCAmelCase = image_processor(images=prepare_img() , return_tensors='''pt''' )
__UpperCAmelCase = encoding['''pixel_values''']
__UpperCAmelCase = model(UpperCamelCase__ )
__UpperCAmelCase = timm_model(UpperCamelCase__ )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(UpperCamelCase__ , outputs.logits , atol=1E-3 )
Path(UpperCamelCase__ ).mkdir(exist_ok=UpperCamelCase__ )
print(f"""Saving model {deit_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(UpperCamelCase__ )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(UpperCamelCase__ )
if __name__ == "__main__":
__lowerCAmelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--deit_name",
default="vit_deit_base_distilled_patch16_224",
type=str,
help="Name of the DeiT timm model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
__lowerCAmelCase : List[str] = parser.parse_args()
convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
| 654 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Optional[int] , *__a : List[str] , **__a : Union[str, Any] ) -> Union[str, Any]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Any , *__a : List[str] , **__a : int ) -> Dict:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[str] , *__a : int , **__a : List[Any] ) -> Tuple:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : int , *__a : Optional[Any] , **__a : Tuple ) -> Dict:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Optional[Any] , *__a : Tuple , **__a : Optional[Any] ) -> int:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Dict , *__a : str , **__a : Tuple ) -> Optional[Any]:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Any , *__a : List[str] , **__a : int ) -> Optional[Any]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[str] , *__a : List[str] , **__a : Optional[int] ) -> List[Any]:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Any , *__a : str , **__a : Any ) -> Tuple:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : List[str] , *__a : Dict , **__a : List[str] ) -> List[str]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Dict , *__a : Any , **__a : List[Any] ) -> Union[str, Any]:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : str , *__a : List[str] , **__a : Union[str, Any] ) -> List[str]:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : str , *__a : Optional[int] , **__a : Optional[int] ) -> Any:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Optional[Any] , *__a : Dict , **__a : List[Any] ) -> str:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Optional[int] , *__a : List[str] , **__a : List[str] ) -> Union[str, Any]:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Dict , *__a : Optional[Any] , **__a : int ) -> Union[str, Any]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : int , *__a : Optional[int] , **__a : Dict ) -> Dict:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[str] , *__a : Tuple , **__a : Any ) -> List[str]:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Optional[Any] , *__a : Optional[Any] , **__a : int ) -> List[str]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[str] , *__a : Tuple , **__a : Optional[int] ) -> Tuple:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Any , *__a : str , **__a : List[Any] ) -> Optional[Any]:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Dict , *__a : List[Any] , **__a : int ) -> List[str]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Union[str, Any] , *__a : int , **__a : str ) -> List[Any]:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Optional[int] , *__a : List[Any] , **__a : Any ) -> Union[str, Any]:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Tuple , *__a : str , **__a : Tuple ) -> Dict:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : str , *__a : List[str] , **__a : Optional[Any] ) -> str:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Tuple , *__a : Optional[int] , **__a : Union[str, Any] ) -> int:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Any , *__a : Union[str, Any] , **__a : List[str] ) -> str:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Union[str, Any] , *__a : Dict , **__a : Union[str, Any] ) -> List[Any]:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Any , *__a : Union[str, Any] , **__a : Any ) -> List[str]:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : str , *__a : int , **__a : int ) -> List[str]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[str] , *__a : Optional[int] , **__a : int ) -> Dict:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Dict , *__a : Tuple , **__a : str ) -> Dict:
requires_backends(cls , ['''torch'''] )
def lowerCAmelCase ( *UpperCamelCase__ : int , **UpperCamelCase__ : Optional[int] ):
"""simple docstring"""
requires_backends(UpperCamelCase__ , ['''torch'''] )
def lowerCAmelCase ( *UpperCamelCase__ : Optional[int] , **UpperCamelCase__ : List[Any] ):
"""simple docstring"""
requires_backends(UpperCamelCase__ , ['''torch'''] )
def lowerCAmelCase ( *UpperCamelCase__ : Any , **UpperCamelCase__ : List[str] ):
"""simple docstring"""
requires_backends(UpperCamelCase__ , ['''torch'''] )
def lowerCAmelCase ( *UpperCamelCase__ : str , **UpperCamelCase__ : str ):
"""simple docstring"""
requires_backends(UpperCamelCase__ , ['''torch'''] )
def lowerCAmelCase ( *UpperCamelCase__ : Dict , **UpperCamelCase__ : Dict ):
"""simple docstring"""
requires_backends(UpperCamelCase__ , ['''torch'''] )
def lowerCAmelCase ( *UpperCamelCase__ : Tuple , **UpperCamelCase__ : int ):
"""simple docstring"""
requires_backends(UpperCamelCase__ , ['''torch'''] )
def lowerCAmelCase ( *UpperCamelCase__ : Union[str, Any] , **UpperCamelCase__ : Tuple ):
"""simple docstring"""
requires_backends(UpperCamelCase__ , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Union[str, Any] , *__a : str , **__a : List[Any] ) -> Any:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : str , *__a : int , **__a : int ) -> Any:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Optional[Any] , *__a : Dict , **__a : List[str] ) -> Any:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Optional[Any] , *__a : str , **__a : List[str] ) -> Optional[Any]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Union[str, Any] , *__a : Dict , **__a : Tuple ) -> int:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Optional[Any] , *__a : List[Any] , **__a : List[str] ) -> Any:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Optional[Any] , *__a : List[Any] , **__a : List[Any] ) -> int:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Any , *__a : Tuple , **__a : int ) -> Union[str, Any]:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Tuple , *__a : Any , **__a : int ) -> Optional[Any]:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Any , *__a : Optional[Any] , **__a : Tuple ) -> List[Any]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Optional[int] , *__a : str , **__a : int ) -> Any:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[str] , *__a : Dict , **__a : Any ) -> str:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Any , *__a : str , **__a : str ) -> List[Any]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[str] , *__a : Union[str, Any] , **__a : str ) -> Union[str, Any]:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[str] , *__a : int , **__a : Tuple ) -> str:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Optional[int] , *__a : Tuple , **__a : str ) -> Union[str, Any]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Tuple , *__a : Any , **__a : Dict ) -> Dict:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Union[str, Any] , *__a : str , **__a : List[str] ) -> str:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Any , *__a : int , **__a : int ) -> Union[str, Any]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : int , *__a : str , **__a : List[Any] ) -> Union[str, Any]:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Tuple , *__a : int , **__a : List[str] ) -> Optional[Any]:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : int , *__a : Union[str, Any] , **__a : Optional[Any] ) -> Any:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Tuple , *__a : List[Any] , **__a : str ) -> int:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : int , *__a : Optional[Any] , **__a : Any ) -> Dict:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : List[str] , *__a : List[str] , **__a : Dict ) -> Optional[int]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Optional[Any] , *__a : Union[str, Any] , **__a : Optional[int] ) -> Tuple:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[Any] , *__a : int , **__a : Dict ) -> Tuple:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : List[str] , *__a : List[str] , **__a : Union[str, Any] ) -> str:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Union[str, Any] , *__a : int , **__a : Dict ) -> List[Any]:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Tuple , *__a : Tuple , **__a : List[str] ) -> Any:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Any , *__a : str , **__a : Any ) -> int:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : str , *__a : Dict , **__a : Optional[Any] ) -> Any:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Any , *__a : str , **__a : Union[str, Any] ) -> Tuple:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Any , *__a : Optional[Any] , **__a : List[str] ) -> str:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Union[str, Any] , *__a : Union[str, Any] , **__a : Any ) -> Any:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Tuple , *__a : Union[str, Any] , **__a : List[Any] ) -> Optional[Any]:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Tuple , *__a : Optional[int] , **__a : List[Any] ) -> Any:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Tuple , *__a : List[str] , **__a : Dict ) -> List[str]:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Optional[int] , *__a : Optional[Any] , **__a : List[Any] ) -> Any:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Optional[int] , *__a : Any , **__a : str ) -> List[Any]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Optional[int] , *__a : Optional[Any] , **__a : int ) -> Union[str, Any]:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Optional[Any] , *__a : int , **__a : Optional[Any] ) -> int:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : str , *__a : List[str] , **__a : Dict ) -> List[str]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Tuple , *__a : Any , **__a : Union[str, Any] ) -> Optional[int]:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Optional[Any] , *__a : Dict , **__a : Union[str, Any] ) -> List[str]:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Any , *__a : Tuple , **__a : Optional[int] ) -> str:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Dict , *__a : List[Any] , **__a : List[str] ) -> Any:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[str] , *__a : Union[str, Any] , **__a : Union[str, Any] ) -> Any:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Dict , *__a : List[Any] , **__a : int ) -> int:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[str] , *__a : Optional[Any] , **__a : str ) -> int:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Any , *__a : Tuple , **__a : int ) -> Any:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Optional[Any] , *__a : Optional[int] , **__a : Tuple ) -> Optional[int]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : int , *__a : Union[str, Any] , **__a : List[Any] ) -> Dict:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : int , *__a : List[Any] , **__a : List[str] ) -> str:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : List[Any] , *__a : Optional[int] , **__a : int ) -> Dict:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[str] , *__a : Union[str, Any] , **__a : List[Any] ) -> Optional[Any]:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Any , *__a : int , **__a : Any ) -> str:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : str , *__a : Any , **__a : int ) -> Tuple:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Union[str, Any] , *__a : List[Any] , **__a : Union[str, Any] ) -> Union[str, Any]:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Optional[int] , *__a : List[Any] , **__a : Dict ) -> int:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : List[Any] , *__a : int , **__a : Optional[int] ) -> List[str]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Dict , *__a : List[Any] , **__a : Any ) -> str:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Dict , *__a : Tuple , **__a : List[Any] ) -> Any:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Dict , *__a : Dict , **__a : Optional[int] ) -> Dict:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Optional[int] , *__a : Any , **__a : Dict ) -> Tuple:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : str , *__a : Any , **__a : Any ) -> Optional[Any]:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Union[str, Any] , *__a : Tuple , **__a : Optional[int] ) -> Optional[Any]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : str , *__a : Optional[Any] , **__a : Optional[Any] ) -> str:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Optional[Any] , *__a : List[Any] , **__a : Dict ) -> Union[str, Any]:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Dict , *__a : Union[str, Any] , **__a : Optional[int] ) -> Dict:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Union[str, Any] , *__a : Any , **__a : Optional[int] ) -> Optional[int]:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Dict , *__a : Union[str, Any] , **__a : List[str] ) -> Optional[Any]:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Optional[Any] , *__a : Optional[int] , **__a : List[Any] ) -> str:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Any , *__a : Dict , **__a : int ) -> Optional[Any]:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[str] , *__a : Optional[int] , **__a : Union[str, Any] ) -> Dict:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Optional[int] , *__a : Union[str, Any] , **__a : int ) -> Optional[int]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[Any] , *__a : Optional[Any] , **__a : int ) -> Tuple:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[Any] , *__a : List[Any] , **__a : Optional[int] ) -> Optional[int]:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Optional[int] , *__a : Optional[Any] , **__a : Optional[int] ) -> Tuple:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Optional[int] , *__a : Optional[int] , **__a : List[Any] ) -> Dict:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : int , *__a : List[str] , **__a : Union[str, Any] ) -> Tuple:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : str , *__a : Tuple , **__a : Tuple ) -> str:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[Any] , *__a : List[str] , **__a : Tuple ) -> Tuple:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Union[str, Any] , *__a : Dict , **__a : Tuple ) -> Tuple:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Optional[Any] , *__a : Union[str, Any] , **__a : Optional[int] ) -> List[Any]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Dict , *__a : List[str] , **__a : int ) -> str:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : int , *__a : Tuple , **__a : Optional[Any] ) -> Optional[Any]:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Optional[int] , *__a : Union[str, Any] , **__a : Union[str, Any] ) -> Optional[int]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Optional[Any] , *__a : Any , **__a : List[str] ) -> str:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : str , *__a : int , **__a : int ) -> List[Any]:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Dict , *__a : Any , **__a : List[Any] ) -> Union[str, Any]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Dict , *__a : int , **__a : str ) -> Any:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[Any] , *__a : Dict , **__a : Optional[Any] ) -> Dict:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Union[str, Any] , *__a : List[str] , **__a : int ) -> Union[str, Any]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Union[str, Any] , *__a : List[Any] , **__a : Any ) -> int:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[str] , *__a : str , **__a : Optional[Any] ) -> Dict:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Optional[int] , *__a : List[Any] , **__a : List[str] ) -> Optional[int]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Union[str, Any] , *__a : Optional[Any] , **__a : str ) -> int:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : str , *__a : List[Any] , **__a : Union[str, Any] ) -> Optional[int]:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Union[str, Any] , *__a : List[Any] , **__a : Optional[Any] ) -> int:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Union[str, Any] , *__a : List[Any] , **__a : Any ) -> int:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Optional[Any] , *__a : List[str] , **__a : Any ) -> str:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Any , *__a : List[str] , **__a : Dict ) -> List[str]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[Any] , *__a : Dict , **__a : Optional[Any] ) -> Union[str, Any]:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[Any] , *__a : str , **__a : Any ) -> Dict:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : List[str] , *__a : Union[str, Any] , **__a : Optional[int] ) -> List[Any]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : str , *__a : Dict , **__a : Tuple ) -> Optional[int]:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : str , *__a : Any , **__a : Any ) -> Any:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : int , *__a : Any , **__a : Optional[Any] ) -> int:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Dict , *__a : int , **__a : List[Any] ) -> str:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[str] , *__a : Dict , **__a : Tuple ) -> Union[str, Any]:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Tuple , *__a : int , **__a : Optional[Any] ) -> List[Any]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[Any] , *__a : Union[str, Any] , **__a : Union[str, Any] ) -> str:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : Tuple , *__a : Any , **__a : Optional[int] ) -> str:
requires_backends(cls , ['''torch'''] )
class A ( metaclass=UpperCAmelCase ):
a_ = ['''torch''']
def __init__( self : Dict , *__a : List[str] , **__a : List[Any] ) -> Optional[Any]:
requires_backends(self , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[Any] , *__a : int , **__a : Any ) -> Any:
requires_backends(cls , ['''torch'''] )
@classmethod
def snake_case__ ( cls : List[Any] , *__a : List[str] , **__a : List[str] ) -> List[Any]:
requires_backends(cls , ['''torch'''] )
| 654 | 1 |
'''simple docstring'''
import argparse
import collections
import os
import re
import tempfile
import pandas as pd
from datasets import Dataset
from huggingface_hub import hf_hub_download, upload_folder
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/update_metadata.py
__lowerCAmelCase : List[Any] = "src/transformers"
# This is to make sure the transformers module imported is the one in the repo.
__lowerCAmelCase : str = direct_transformers_import(TRANSFORMERS_PATH)
# Regexes that match TF/Flax/PT model names.
__lowerCAmelCase : int = re.compile(r"TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
__lowerCAmelCase : List[Any] = re.compile(r"Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
__lowerCAmelCase : List[str] = re.compile(r"(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
# Fill this with tuples (pipeline_tag, model_mapping, auto_model)
__lowerCAmelCase : Optional[int] = [
("pretraining", "MODEL_FOR_PRETRAINING_MAPPING_NAMES", "AutoModelForPreTraining"),
("feature-extraction", "MODEL_MAPPING_NAMES", "AutoModel"),
("audio-classification", "MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES", "AutoModelForAudioClassification"),
("text-generation", "MODEL_FOR_CAUSAL_LM_MAPPING_NAMES", "AutoModelForCausalLM"),
("automatic-speech-recognition", "MODEL_FOR_CTC_MAPPING_NAMES", "AutoModelForCTC"),
("image-classification", "MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES", "AutoModelForImageClassification"),
("image-segmentation", "MODEL_FOR_IMAGE_SEGMENTATION_MAPPING_NAMES", "AutoModelForImageSegmentation"),
("fill-mask", "MODEL_FOR_MASKED_LM_MAPPING_NAMES", "AutoModelForMaskedLM"),
("object-detection", "MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES", "AutoModelForObjectDetection"),
(
"zero-shot-object-detection",
"MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING_NAMES",
"AutoModelForZeroShotObjectDetection",
),
("question-answering", "MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES", "AutoModelForQuestionAnswering"),
("text2text-generation", "MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES", "AutoModelForSeq2SeqLM"),
("text-classification", "MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES", "AutoModelForSequenceClassification"),
("automatic-speech-recognition", "MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES", "AutoModelForSpeechSeq2Seq"),
(
"table-question-answering",
"MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING_NAMES",
"AutoModelForTableQuestionAnswering",
),
("token-classification", "MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES", "AutoModelForTokenClassification"),
("multiple-choice", "MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES", "AutoModelForMultipleChoice"),
(
"next-sentence-prediction",
"MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES",
"AutoModelForNextSentencePrediction",
),
(
"audio-frame-classification",
"MODEL_FOR_AUDIO_FRAME_CLASSIFICATION_MAPPING_NAMES",
"AutoModelForAudioFrameClassification",
),
("audio-xvector", "MODEL_FOR_AUDIO_XVECTOR_MAPPING_NAMES", "AutoModelForAudioXVector"),
(
"document-question-answering",
"MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES",
"AutoModelForDocumentQuestionAnswering",
),
(
"visual-question-answering",
"MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING_NAMES",
"AutoModelForVisualQuestionAnswering",
),
("image-to-text", "MODEL_FOR_FOR_VISION_2_SEQ_MAPPING_NAMES", "AutoModelForVision2Seq"),
(
"zero-shot-image-classification",
"MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING_NAMES",
"AutoModelForZeroShotImageClassification",
),
("depth-estimation", "MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES", "AutoModelForDepthEstimation"),
("video-classification", "MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES", "AutoModelForVideoClassification"),
("mask-generation", "MODEL_FOR_MASK_GENERATION_MAPPING_NAMES", "AutoModelForMaskGeneration"),
]
def lowerCAmelCase ( UpperCamelCase__ : int ):
"""simple docstring"""
__UpperCAmelCase = re.finditer('''.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)''' , UpperCamelCase__ )
return [m.group(0 ) for m in matches]
def lowerCAmelCase ( ):
"""simple docstring"""
__UpperCAmelCase = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
__UpperCAmelCase = {
config.replace('''Config''' , '''''' ): model_type for model_type, config in config_maping_names.items()
}
# Dictionaries flagging if each model prefix has a backend in PT/TF/Flax.
__UpperCAmelCase = collections.defaultdict(UpperCamelCase__ )
__UpperCAmelCase = collections.defaultdict(UpperCamelCase__ )
__UpperCAmelCase = collections.defaultdict(UpperCamelCase__ )
# Let's lookup through all transformers object (once) and find if models are supported by a given backend.
for attr_name in dir(UpperCamelCase__ ):
__UpperCAmelCase = None
if _re_tf_models.match(UpperCamelCase__ ) is not None:
__UpperCAmelCase = tf_models
__UpperCAmelCase = _re_tf_models.match(UpperCamelCase__ ).groups()[0]
elif _re_flax_models.match(UpperCamelCase__ ) is not None:
__UpperCAmelCase = flax_models
__UpperCAmelCase = _re_flax_models.match(UpperCamelCase__ ).groups()[0]
elif _re_pt_models.match(UpperCamelCase__ ) is not None:
__UpperCAmelCase = pt_models
__UpperCAmelCase = _re_pt_models.match(UpperCamelCase__ ).groups()[0]
if lookup_dict is not None:
while len(UpperCamelCase__ ) > 0:
if attr_name in model_prefix_to_model_type:
__UpperCAmelCase = True
break
# Try again after removing the last word in the name
__UpperCAmelCase = ''''''.join(camel_case_split(UpperCamelCase__ )[:-1] )
__UpperCAmelCase = set(list(pt_models.keys() ) + list(tf_models.keys() ) + list(flax_models.keys() ) )
__UpperCAmelCase = list(UpperCamelCase__ )
all_models.sort()
__UpperCAmelCase = {'''model_type''': all_models}
__UpperCAmelCase = [pt_models[t] for t in all_models]
__UpperCAmelCase = [tf_models[t] for t in all_models]
__UpperCAmelCase = [flax_models[t] for t in all_models]
# Now let's use the auto-mapping names to make sure
__UpperCAmelCase = {}
for t in all_models:
if t in transformers_module.models.auto.processing_auto.PROCESSOR_MAPPING_NAMES:
__UpperCAmelCase = '''AutoProcessor'''
elif t in transformers_module.models.auto.tokenization_auto.TOKENIZER_MAPPING_NAMES:
__UpperCAmelCase = '''AutoTokenizer'''
elif t in transformers_module.models.auto.feature_extraction_auto.FEATURE_EXTRACTOR_MAPPING_NAMES:
__UpperCAmelCase = '''AutoFeatureExtractor'''
else:
# Default to AutoTokenizer if a model has nothing, for backward compatibility.
__UpperCAmelCase = '''AutoTokenizer'''
__UpperCAmelCase = [processors[t] for t in all_models]
return pd.DataFrame(UpperCamelCase__ )
def lowerCAmelCase ( UpperCamelCase__ : List[str] ):
"""simple docstring"""
__UpperCAmelCase = [
transformers_module.models.auto.modeling_auto,
transformers_module.models.auto.modeling_tf_auto,
transformers_module.models.auto.modeling_flax_auto,
]
for pipeline_tag, model_mapping, auto_class in PIPELINE_TAGS_AND_AUTO_MODELS:
__UpperCAmelCase = [model_mapping, f"""TF_{model_mapping}""", f"""FLAX_{model_mapping}"""]
__UpperCAmelCase = [auto_class, f"""TF_{auto_class}""", f"""Flax_{auto_class}"""]
# Loop through all three frameworks
for module, cls, mapping in zip(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
# The type of pipeline may not exist in this framework
if not hasattr(UpperCamelCase__ , UpperCamelCase__ ):
continue
# First extract all model_names
__UpperCAmelCase = []
for name in getattr(UpperCamelCase__ , UpperCamelCase__ ).values():
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
model_names.append(UpperCamelCase__ )
else:
model_names.extend(list(UpperCamelCase__ ) )
# Add pipeline tag and auto model class for those models
table.update({model_name: (pipeline_tag, cls) for model_name in model_names} )
return table
def lowerCAmelCase ( UpperCamelCase__ : List[str] , UpperCamelCase__ : Dict ):
"""simple docstring"""
__UpperCAmelCase = get_frameworks_table()
__UpperCAmelCase = Dataset.from_pandas(UpperCamelCase__ )
__UpperCAmelCase = hf_hub_download(
'''huggingface/transformers-metadata''' , '''pipeline_tags.json''' , repo_type='''dataset''' , token=UpperCamelCase__ )
__UpperCAmelCase = Dataset.from_json(UpperCamelCase__ )
__UpperCAmelCase = {
tags_dataset[i]['''model_class''']: (tags_dataset[i]['''pipeline_tag'''], tags_dataset[i]['''auto_class'''])
for i in range(len(UpperCamelCase__ ) )
}
__UpperCAmelCase = update_pipeline_and_auto_class_table(UpperCamelCase__ )
# Sort the model classes to avoid some nondeterministic updates to create false update commits.
__UpperCAmelCase = sorted(table.keys() )
__UpperCAmelCase = pd.DataFrame(
{
'''model_class''': model_classes,
'''pipeline_tag''': [table[m][0] for m in model_classes],
'''auto_class''': [table[m][1] for m in model_classes],
} )
__UpperCAmelCase = Dataset.from_pandas(UpperCamelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
frameworks_dataset.to_json(os.path.join(UpperCamelCase__ , '''frameworks.json''' ) )
tags_dataset.to_json(os.path.join(UpperCamelCase__ , '''pipeline_tags.json''' ) )
if commit_sha is not None:
__UpperCAmelCase = (
f"""Update with commit {commit_sha}\n\nSee: """
f"""https://github.com/huggingface/transformers/commit/{commit_sha}"""
)
else:
__UpperCAmelCase = '''Update'''
upload_folder(
repo_id='''huggingface/transformers-metadata''' , folder_path=UpperCamelCase__ , repo_type='''dataset''' , token=UpperCamelCase__ , commit_message=UpperCamelCase__ , )
def lowerCAmelCase ( ):
"""simple docstring"""
__UpperCAmelCase = {tag: cls for tag, _, cls in PIPELINE_TAGS_AND_AUTO_MODELS}
__UpperCAmelCase = transformers_module.pipelines.SUPPORTED_TASKS
__UpperCAmelCase = []
for key in pipeline_tasks:
if key not in in_table:
__UpperCAmelCase = pipeline_tasks[key]['''pt''']
if isinstance(UpperCamelCase__ , (list, tuple) ):
__UpperCAmelCase = model[0]
__UpperCAmelCase = model.__name__
if model not in in_table.values():
missing.append(UpperCamelCase__ )
if len(UpperCamelCase__ ) > 0:
__UpperCAmelCase = ''', '''.join(UpperCamelCase__ )
raise ValueError(
'''The following pipeline tags are not present in the `PIPELINE_TAGS_AND_AUTO_MODELS` constant inside '''
f"""`utils/update_metadata.py`: {msg}. Please add them!""" )
if __name__ == "__main__":
__lowerCAmelCase : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument("--token", type=str, help="The token to use to push to the transformers-metadata dataset.")
parser.add_argument("--commit_sha", type=str, help="The sha of the commit going with this update.")
parser.add_argument("--check-only", action="store_true", help="Activate to just check all pipelines are present.")
__lowerCAmelCase : Tuple = parser.parse_args()
if args.check_only:
check_pipeline_tags()
else:
update_metadata(args.token, args.commit_sha)
| 654 |
'''simple docstring'''
import warnings
from diffusers import StableDiffusionImgaImgPipeline # noqa F401
warnings.warn(
"The `image_to_image.py` script is outdated. Please use directly `from diffusers import"
" StableDiffusionImg2ImgPipeline` instead."
)
| 654 | 1 |
'''simple docstring'''
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
import numpy as np
import PIL
from PIL import Image
from ...utils import BaseOutput, is_torch_available, is_transformers_available
@dataclass
class A ( UpperCAmelCase ):
a_ = 42
a_ = 42
if is_transformers_available() and is_torch_available():
from .pipeline_semantic_stable_diffusion import SemanticStableDiffusionPipeline
| 654 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__lowerCAmelCase : Optional[Any] = {
"configuration_llama": ["LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP", "LlamaConfig"],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : str = ["LlamaTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : str = ["LlamaTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Union[str, Any] = [
"LlamaForCausalLM",
"LlamaModel",
"LlamaPreTrainedModel",
"LlamaForSequenceClassification",
]
if TYPE_CHECKING:
from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama import LlamaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama_fast import LlamaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel
else:
import sys
__lowerCAmelCase : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 654 | 1 |
'''simple docstring'''
def lowerCAmelCase ( UpperCamelCase__ : list[int] ):
"""simple docstring"""
__UpperCAmelCase = []
if len(UpperCamelCase__ ) == 1:
return [nums.copy()]
for _ in range(len(UpperCamelCase__ ) ):
__UpperCAmelCase = nums.pop(0 )
__UpperCAmelCase = permute(UpperCamelCase__ )
for perm in permutations:
perm.append(UpperCamelCase__ )
result.extend(UpperCamelCase__ )
nums.append(UpperCamelCase__ )
return result
def lowerCAmelCase ( UpperCamelCase__ : List[str] ):
"""simple docstring"""
def backtrack(UpperCamelCase__ : str ):
if start == len(UpperCamelCase__ ) - 1:
output.append(nums[:] )
else:
for i in range(UpperCamelCase__ , len(UpperCamelCase__ ) ):
__UpperCAmelCase , __UpperCAmelCase = nums[i], nums[start]
backtrack(start + 1 )
__UpperCAmelCase , __UpperCAmelCase = nums[i], nums[start] # backtrack
__UpperCAmelCase = []
backtrack(0 )
return output
if __name__ == "__main__":
import doctest
# use res to print the data in permute2 function
__lowerCAmelCase : Dict = permutea([1, 2, 3])
print(res)
doctest.testmod()
| 654 |
'''simple docstring'''
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def lowerCAmelCase ( UpperCamelCase__ : str , UpperCamelCase__ : str , UpperCamelCase__ : str , UpperCamelCase__ : PreTrainedTokenizer , UpperCamelCase__ : int , UpperCamelCase__ : Optional[int] = None , ):
"""simple docstring"""
__UpperCAmelCase = {}
if train_file is not None:
__UpperCAmelCase = [train_file]
if eval_file is not None:
__UpperCAmelCase = [eval_file]
if test_file is not None:
__UpperCAmelCase = [test_file]
__UpperCAmelCase = datasets.load_dataset('''csv''' , data_files=UpperCamelCase__ )
__UpperCAmelCase = list(ds[list(files.keys() )[0]].features.keys() )
__UpperCAmelCase = features_name.pop(UpperCamelCase__ )
__UpperCAmelCase = list(set(ds[list(files.keys() )[0]][label_name] ) )
__UpperCAmelCase = {label: i for i, label in enumerate(UpperCamelCase__ )}
__UpperCAmelCase = tokenizer.model_input_names
__UpperCAmelCase = {}
if len(UpperCamelCase__ ) == 1:
for k in files.keys():
__UpperCAmelCase = ds[k].map(
lambda UpperCamelCase__ : tokenizer.batch_encode_plus(
example[features_name[0]] , truncation=UpperCamelCase__ , max_length=UpperCamelCase__ , padding='''max_length''' ) , batched=UpperCamelCase__ , )
elif len(UpperCamelCase__ ) == 2:
for k in files.keys():
__UpperCAmelCase = ds[k].map(
lambda UpperCamelCase__ : tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]) , truncation=UpperCamelCase__ , max_length=UpperCamelCase__ , padding='''max_length''' , ) , batched=UpperCamelCase__ , )
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
__UpperCAmelCase = {k: v for k, v in ex.items() if k in input_names}
__UpperCAmelCase = labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
__UpperCAmelCase = {k: v for k, v in ex.items() if k in input_names}
__UpperCAmelCase = labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
__UpperCAmelCase = {k: v for k, v in ex.items() if k in input_names}
__UpperCAmelCase = labelaid[ex[label_name]]
yield (d, label)
__UpperCAmelCase = (
tf.data.Dataset.from_generator(
UpperCamelCase__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
__UpperCAmelCase = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) )
__UpperCAmelCase = (
tf.data.Dataset.from_generator(
UpperCamelCase__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
__UpperCAmelCase = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) )
__UpperCAmelCase = (
tf.data.Dataset.from_generator(
UpperCamelCase__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
__UpperCAmelCase = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) )
return train_ds, val_ds, test_ds, labelaid
__lowerCAmelCase : List[Any] = logging.getLogger(__name__)
@dataclass
class A :
a_ = field(metadata={'''help''': '''Which column contains the label'''} )
a_ = field(default=UpperCAmelCase , metadata={'''help''': '''The path of the training file'''} )
a_ = field(default=UpperCAmelCase , metadata={'''help''': '''The path of the development file'''} )
a_ = field(default=UpperCAmelCase , metadata={'''help''': '''The path of the test file'''} )
a_ = field(
default=1_2_8 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
a_ = field(
default=UpperCAmelCase , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
@dataclass
class A :
a_ = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
a_ = field(
default=UpperCAmelCase , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
a_ = field(
default=UpperCAmelCase , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
a_ = field(default=UpperCAmelCase , metadata={'''help''': '''Set this flag to use fast tokenization.'''} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
a_ = field(
default=UpperCAmelCase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
def lowerCAmelCase ( ):
"""simple docstring"""
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__UpperCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
''' --overwrite_output_dir to overcome.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO , )
logger.info(
f"""n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, """
f"""16-bits training: {training_args.fpaa}""" )
logger.info(f"""Training/evaluation parameters {training_args}""" )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__UpperCAmelCase = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = get_tfds(
train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=UpperCamelCase__ , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , )
__UpperCAmelCase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(UpperCamelCase__ ) , labelaid=UpperCamelCase__ , idalabel={id: label for label, id in labelaid.items()} , finetuning_task='''text-classification''' , cache_dir=model_args.cache_dir , )
with training_args.strategy.scope():
__UpperCAmelCase = TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_pt=bool('''.bin''' in model_args.model_name_or_path ) , config=UpperCamelCase__ , cache_dir=model_args.cache_dir , )
def compute_metrics(UpperCamelCase__ : EvalPrediction ) -> Dict:
__UpperCAmelCase = np.argmax(p.predictions , axis=1 )
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
__UpperCAmelCase = TFTrainer(
model=UpperCamelCase__ , args=UpperCamelCase__ , train_dataset=UpperCamelCase__ , eval_dataset=UpperCamelCase__ , compute_metrics=UpperCamelCase__ , )
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
__UpperCAmelCase = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
__UpperCAmelCase = trainer.evaluate()
__UpperCAmelCase = os.path.join(training_args.output_dir , '''eval_results.txt''' )
with open(UpperCamelCase__ , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key, value in result.items():
logger.info(f""" {key} = {value}""" )
writer.write(f"""{key} = {value}\n""" )
results.update(UpperCamelCase__ )
return results
if __name__ == "__main__":
main()
| 654 | 1 |
'''simple docstring'''
def lowerCAmelCase ( UpperCamelCase__ : Tuple ):
"""simple docstring"""
# if the collection is empty, returns empty
if collection == []:
return []
# get some information about the collection
__UpperCAmelCase = len(UpperCamelCase__ )
__UpperCAmelCase = max(UpperCamelCase__ )
__UpperCAmelCase = min(UpperCamelCase__ )
# create the counting array
__UpperCAmelCase = coll_max + 1 - coll_min
__UpperCAmelCase = [0] * counting_arr_length
# count how much a number appears in the collection
for number in collection:
counting_arr[number - coll_min] += 1
# sum each position with it's predecessors. now, counting_arr[i] tells
# us how many elements <= i has in the collection
for i in range(1 , UpperCamelCase__ ):
__UpperCAmelCase = counting_arr[i] + counting_arr[i - 1]
# create the output collection
__UpperCAmelCase = [0] * coll_len
# place the elements in the output, respecting the original order (stable
# sort) from end to begin, updating counting_arr
for i in reversed(range(0 , UpperCamelCase__ ) ):
__UpperCAmelCase = collection[i]
counting_arr[collection[i] - coll_min] -= 1
return ordered
def lowerCAmelCase ( UpperCamelCase__ : Any ):
"""simple docstring"""
return "".join([chr(UpperCamelCase__ ) for i in counting_sort([ord(UpperCamelCase__ ) for c in string] )] )
if __name__ == "__main__":
# Test string sort
assert counting_sort_string("thisisthestring") == "eghhiiinrsssttt"
__lowerCAmelCase : str = input("Enter numbers separated by a comma:\n").strip()
__lowerCAmelCase : List[Any] = [int(item) for item in user_input.split(",")]
print(counting_sort(unsorted))
| 654 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import DistilBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.distilbert.modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertModel,
)
class A :
def __init__( self : List[Any] , __a : Any , ) -> Dict:
__UpperCAmelCase = parent
__UpperCAmelCase = 1_3
__UpperCAmelCase = 7
__UpperCAmelCase = True
__UpperCAmelCase = True
__UpperCAmelCase = False
__UpperCAmelCase = True
__UpperCAmelCase = 9_9
__UpperCAmelCase = 3_2
__UpperCAmelCase = 2
__UpperCAmelCase = 4
__UpperCAmelCase = 3_7
__UpperCAmelCase = '''gelu'''
__UpperCAmelCase = 0.1
__UpperCAmelCase = 0.1
__UpperCAmelCase = 5_1_2
__UpperCAmelCase = 1_6
__UpperCAmelCase = 2
__UpperCAmelCase = 0.0_2
__UpperCAmelCase = 3
__UpperCAmelCase = 4
__UpperCAmelCase = None
def snake_case__ ( self : Optional[int] ) -> Dict:
__UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCAmelCase = None
if self.use_input_mask:
__UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCAmelCase = None
__UpperCAmelCase = None
__UpperCAmelCase = None
if self.use_labels:
__UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
__UpperCAmelCase = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case__ ( self : Union[str, Any] , __a : List[str] , __a : int , __a : Union[str, Any] , __a : Union[str, Any] , __a : List[Any] , __a : int ) -> Any:
__UpperCAmelCase = TFDistilBertModel(config=__a )
__UpperCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
__UpperCAmelCase = model(__a )
__UpperCAmelCase = [input_ids, input_mask]
__UpperCAmelCase = model(__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case__ ( self : Tuple , __a : List[Any] , __a : int , __a : Tuple , __a : List[Any] , __a : Union[str, Any] , __a : List[Any] ) -> int:
__UpperCAmelCase = TFDistilBertForMaskedLM(config=__a )
__UpperCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
__UpperCAmelCase = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case__ ( self : Optional[int] , __a : Any , __a : Union[str, Any] , __a : Optional[int] , __a : int , __a : Optional[Any] , __a : Optional[int] ) -> Dict:
__UpperCAmelCase = TFDistilBertForQuestionAnswering(config=__a )
__UpperCAmelCase = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
}
__UpperCAmelCase = model(__a )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def snake_case__ ( self : Any , __a : Optional[Any] , __a : List[str] , __a : Dict , __a : Dict , __a : int , __a : List[Any] ) -> Dict:
__UpperCAmelCase = self.num_labels
__UpperCAmelCase = TFDistilBertForSequenceClassification(__a )
__UpperCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
__UpperCAmelCase = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case__ ( self : Union[str, Any] , __a : int , __a : str , __a : Union[str, Any] , __a : Optional[int] , __a : List[str] , __a : Dict ) -> str:
__UpperCAmelCase = self.num_choices
__UpperCAmelCase = TFDistilBertForMultipleChoice(__a )
__UpperCAmelCase = tf.tile(tf.expand_dims(__a , 1 ) , (1, self.num_choices, 1) )
__UpperCAmelCase = tf.tile(tf.expand_dims(__a , 1 ) , (1, self.num_choices, 1) )
__UpperCAmelCase = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
}
__UpperCAmelCase = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def snake_case__ ( self : int , __a : Optional[Any] , __a : int , __a : Tuple , __a : int , __a : Optional[int] , __a : Optional[int] ) -> int:
__UpperCAmelCase = self.num_labels
__UpperCAmelCase = TFDistilBertForTokenClassification(__a )
__UpperCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
__UpperCAmelCase = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def snake_case__ ( self : str ) -> Any:
__UpperCAmelCase = self.prepare_config_and_inputs()
((__UpperCAmelCase) , (__UpperCAmelCase) , (__UpperCAmelCase) , (__UpperCAmelCase) , (__UpperCAmelCase) , (__UpperCAmelCase)) = config_and_inputs
__UpperCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class A ( UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
a_ = (
(
TFDistilBertModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertForMultipleChoice,
)
if is_tf_available()
else None
)
a_ = (
{
'''feature-extraction''': TFDistilBertModel,
'''fill-mask''': TFDistilBertForMaskedLM,
'''question-answering''': TFDistilBertForQuestionAnswering,
'''text-classification''': TFDistilBertForSequenceClassification,
'''token-classification''': TFDistilBertForTokenClassification,
'''zero-shot''': TFDistilBertForSequenceClassification,
}
if is_tf_available()
else {}
)
a_ = False
a_ = False
def snake_case__ ( self : Any ) -> Any:
__UpperCAmelCase = TFDistilBertModelTester(self )
__UpperCAmelCase = ConfigTester(self , config_class=__a , dim=3_7 )
def snake_case__ ( self : List[Any] ) -> Optional[int]:
self.config_tester.run_common_tests()
def snake_case__ ( self : Any ) -> str:
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*__a )
def snake_case__ ( self : Tuple ) -> Dict:
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*__a )
def snake_case__ ( self : Union[str, Any] ) -> Any:
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*__a )
def snake_case__ ( self : Optional[Any] ) -> Dict:
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*__a )
def snake_case__ ( self : Any ) -> int:
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*__a )
def snake_case__ ( self : List[str] ) -> List[Any]:
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*__a )
@slow
def snake_case__ ( self : Dict ) -> Tuple:
for model_name in list(TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1] ):
__UpperCAmelCase = TFDistilBertModel.from_pretrained(__a )
self.assertIsNotNone(__a )
@require_tf
class A ( unittest.TestCase ):
@slow
def snake_case__ ( self : int ) -> Dict:
__UpperCAmelCase = TFDistilBertModel.from_pretrained('''distilbert-base-uncased''' )
__UpperCAmelCase = tf.constant([[0, 1, 2, 3, 4, 5]] )
__UpperCAmelCase = model(__a )[0]
__UpperCAmelCase = [1, 6, 7_6_8]
self.assertEqual(output.shape , __a )
__UpperCAmelCase = tf.constant(
[
[
[0.1_9_2_6_1_8_8_5, -0.1_3_7_3_2_9_5_5, 0.4_1_1_9_7_9_9],
[0.2_2_1_5_0_1_5_6, -0.0_7_4_2_2_6_6_1, 0.3_9_0_3_7_2_0_4],
[0.2_2_7_5_6_0_1_8, -0.0_8_9_6_4_1_4, 0.3_7_0_1_4_6_7],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , __a , atol=1e-4 )
| 654 | 1 |
'''simple docstring'''
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import torch
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
@dataclass
class A ( UpperCAmelCase ):
a_ = 42
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_text_to_video_synth import TextToVideoSDPipeline
from .pipeline_text_to_video_synth_imgaimg import VideoToVideoSDPipeline # noqa: F401
from .pipeline_text_to_video_zero import TextToVideoZeroPipeline
| 654 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_speech_available, is_torch_available
__lowerCAmelCase : List[Any] = {
"configuration_audio_spectrogram_transformer": [
"AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"ASTConfig",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : str = [
"AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"ASTForAudioClassification",
"ASTModel",
"ASTPreTrainedModel",
]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Union[str, Any] = ["ASTFeatureExtractor"]
if TYPE_CHECKING:
from .configuration_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
ASTConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ASTForAudioClassification,
ASTModel,
ASTPreTrainedModel,
)
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_audio_spectrogram_transformer import ASTFeatureExtractor
else:
import sys
__lowerCAmelCase : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 654 | 1 |
'''simple docstring'''
# This model implementation is heavily inspired by https://github.com/haofanwang/ControlNet-for-Diffusers/
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
ControlNetModel,
DDIMScheduler,
StableDiffusionControlNetImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet import MultiControlNetModel
from diffusers.utils import floats_tensor, load_image, load_numpy, randn_tensor, slow, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
)
enable_full_determinism()
class A ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
a_ = StableDiffusionControlNetImgaImgPipeline
a_ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width'''}
a_ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
a_ = IMAGE_TO_IMAGE_IMAGE_PARAMS.union({'''control_image'''} )
a_ = IMAGE_TO_IMAGE_IMAGE_PARAMS
def snake_case__ ( self : Any ) -> List[str]:
torch.manual_seed(0 )
__UpperCAmelCase = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=3_2 , )
torch.manual_seed(0 )
__UpperCAmelCase = ControlNetModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , in_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , cross_attention_dim=3_2 , conditioning_embedding_out_channels=(1_6, 3_2) , )
torch.manual_seed(0 )
__UpperCAmelCase = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='''scaled_linear''' , clip_sample=__a , set_alpha_to_one=__a , )
torch.manual_seed(0 )
__UpperCAmelCase = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
__UpperCAmelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
__UpperCAmelCase = CLIPTextModel(__a )
__UpperCAmelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
__UpperCAmelCase = {
'''unet''': unet,
'''controlnet''': controlnet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def snake_case__ ( self : int , __a : Optional[Any] , __a : Tuple=0 ) -> List[str]:
if str(__a ).startswith('''mps''' ):
__UpperCAmelCase = torch.manual_seed(__a )
else:
__UpperCAmelCase = torch.Generator(device=__a ).manual_seed(__a )
__UpperCAmelCase = 2
__UpperCAmelCase = randn_tensor(
(1, 3, 3_2 * controlnet_embedder_scale_factor, 3_2 * controlnet_embedder_scale_factor) , generator=__a , device=torch.device(__a ) , )
__UpperCAmelCase = floats_tensor(control_image.shape , rng=random.Random(__a ) ).to(__a )
__UpperCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__UpperCAmelCase = Image.fromarray(np.uinta(__a ) ).convert('''RGB''' ).resize((6_4, 6_4) )
__UpperCAmelCase = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
'''image''': image,
'''control_image''': control_image,
}
return inputs
def snake_case__ ( self : List[Any] ) -> int:
return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3 )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def snake_case__ ( self : Dict ) -> Optional[int]:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3 )
def snake_case__ ( self : str ) -> Dict:
self._test_inference_batch_single_identical(expected_max_diff=2e-3 )
class A ( UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
a_ = StableDiffusionControlNetImgaImgPipeline
a_ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width'''}
a_ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
a_ = frozenset([] ) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess
def snake_case__ ( self : Tuple ) -> Union[str, Any]:
torch.manual_seed(0 )
__UpperCAmelCase = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=3_2 , )
torch.manual_seed(0 )
def init_weights(__a : int ):
if isinstance(__a , torch.nn.Convad ):
torch.nn.init.normal(m.weight )
m.bias.data.fill_(1.0 )
__UpperCAmelCase = ControlNetModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , in_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , cross_attention_dim=3_2 , conditioning_embedding_out_channels=(1_6, 3_2) , )
controlneta.controlnet_down_blocks.apply(__a )
torch.manual_seed(0 )
__UpperCAmelCase = ControlNetModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , in_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , cross_attention_dim=3_2 , conditioning_embedding_out_channels=(1_6, 3_2) , )
controlneta.controlnet_down_blocks.apply(__a )
torch.manual_seed(0 )
__UpperCAmelCase = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='''scaled_linear''' , clip_sample=__a , set_alpha_to_one=__a , )
torch.manual_seed(0 )
__UpperCAmelCase = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
__UpperCAmelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
__UpperCAmelCase = CLIPTextModel(__a )
__UpperCAmelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
__UpperCAmelCase = MultiControlNetModel([controlneta, controlneta] )
__UpperCAmelCase = {
'''unet''': unet,
'''controlnet''': controlnet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def snake_case__ ( self : Union[str, Any] , __a : List[str] , __a : Dict=0 ) -> Optional[int]:
if str(__a ).startswith('''mps''' ):
__UpperCAmelCase = torch.manual_seed(__a )
else:
__UpperCAmelCase = torch.Generator(device=__a ).manual_seed(__a )
__UpperCAmelCase = 2
__UpperCAmelCase = [
randn_tensor(
(1, 3, 3_2 * controlnet_embedder_scale_factor, 3_2 * controlnet_embedder_scale_factor) , generator=__a , device=torch.device(__a ) , ),
randn_tensor(
(1, 3, 3_2 * controlnet_embedder_scale_factor, 3_2 * controlnet_embedder_scale_factor) , generator=__a , device=torch.device(__a ) , ),
]
__UpperCAmelCase = floats_tensor(control_image[0].shape , rng=random.Random(__a ) ).to(__a )
__UpperCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__UpperCAmelCase = Image.fromarray(np.uinta(__a ) ).convert('''RGB''' ).resize((6_4, 6_4) )
__UpperCAmelCase = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
'''image''': image,
'''control_image''': control_image,
}
return inputs
def snake_case__ ( self : Union[str, Any] ) -> int:
__UpperCAmelCase = self.get_dummy_components()
__UpperCAmelCase = self.pipeline_class(**__a )
pipe.to(__a )
__UpperCAmelCase = 1_0.0
__UpperCAmelCase = 4
__UpperCAmelCase = self.get_dummy_inputs(__a )
__UpperCAmelCase = steps
__UpperCAmelCase = scale
__UpperCAmelCase = pipe(**__a )[0]
__UpperCAmelCase = self.get_dummy_inputs(__a )
__UpperCAmelCase = steps
__UpperCAmelCase = scale
__UpperCAmelCase = pipe(**__a , control_guidance_start=0.1 , control_guidance_end=0.2 )[0]
__UpperCAmelCase = self.get_dummy_inputs(__a )
__UpperCAmelCase = steps
__UpperCAmelCase = scale
__UpperCAmelCase = pipe(**__a , control_guidance_start=[0.1, 0.3] , control_guidance_end=[0.2, 0.7] )[0]
__UpperCAmelCase = self.get_dummy_inputs(__a )
__UpperCAmelCase = steps
__UpperCAmelCase = scale
__UpperCAmelCase = pipe(**__a , control_guidance_start=0.4 , control_guidance_end=[0.5, 0.8] )[0]
# make sure that all outputs are different
assert np.sum(np.abs(output_a - output_a ) ) > 1e-3
assert np.sum(np.abs(output_a - output_a ) ) > 1e-3
assert np.sum(np.abs(output_a - output_a ) ) > 1e-3
def snake_case__ ( self : Optional[Any] ) -> Union[str, Any]:
return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3 )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def snake_case__ ( self : Dict ) -> List[str]:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3 )
def snake_case__ ( self : int ) -> Tuple:
self._test_inference_batch_single_identical(expected_max_diff=2e-3 )
def snake_case__ ( self : Optional[Any] ) -> str:
__UpperCAmelCase = self.get_dummy_components()
__UpperCAmelCase = self.pipeline_class(**__a )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
with tempfile.TemporaryDirectory() as tmpdir:
try:
# save_pretrained is not implemented for Multi-ControlNet
pipe.save_pretrained(__a )
except NotImplementedError:
pass
@slow
@require_torch_gpu
class A ( unittest.TestCase ):
def snake_case__ ( self : Tuple ) -> int:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case__ ( self : Optional[Any] ) -> Dict:
__UpperCAmelCase = ControlNetModel.from_pretrained('''lllyasviel/sd-controlnet-canny''' )
__UpperCAmelCase = StableDiffusionControlNetImgaImgPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , safety_checker=__a , controlnet=__a )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=__a )
__UpperCAmelCase = torch.Generator(device='''cpu''' ).manual_seed(0 )
__UpperCAmelCase = '''evil space-punk bird'''
__UpperCAmelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png''' ).resize((5_1_2, 5_1_2) )
__UpperCAmelCase = load_image(
'''https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png''' ).resize((5_1_2, 5_1_2) )
__UpperCAmelCase = pipe(
__a , __a , control_image=__a , generator=__a , output_type='''np''' , num_inference_steps=5_0 , strength=0.6 , )
__UpperCAmelCase = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
__UpperCAmelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/img2img.npy''' )
assert np.abs(expected_image - image ).max() < 9e-2
| 654 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
class A ( UpperCAmelCase ):
a_ = '''bert-generation'''
def __init__( self : str , __a : str=5_0_3_5_8 , __a : int=1_0_2_4 , __a : Optional[Any]=2_4 , __a : Any=1_6 , __a : int=4_0_9_6 , __a : Any="gelu" , __a : Union[str, Any]=0.1 , __a : Any=0.1 , __a : Union[str, Any]=5_1_2 , __a : int=0.0_2 , __a : str=1e-12 , __a : List[str]=0 , __a : Optional[int]=2 , __a : Tuple=1 , __a : str="absolute" , __a : Optional[Any]=True , **__a : Tuple , ) -> Any:
super().__init__(pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , **__a )
__UpperCAmelCase = vocab_size
__UpperCAmelCase = hidden_size
__UpperCAmelCase = num_hidden_layers
__UpperCAmelCase = num_attention_heads
__UpperCAmelCase = hidden_act
__UpperCAmelCase = intermediate_size
__UpperCAmelCase = hidden_dropout_prob
__UpperCAmelCase = attention_probs_dropout_prob
__UpperCAmelCase = max_position_embeddings
__UpperCAmelCase = initializer_range
__UpperCAmelCase = layer_norm_eps
__UpperCAmelCase = position_embedding_type
__UpperCAmelCase = use_cache
| 654 | 1 |
'''simple docstring'''
#
# This a `torch.distributed` diagnostics script that checks that all GPUs in the cluster (one or
# many nodes) can talk to each other via nccl and allocate gpu memory.
#
# To run first adjust the number of processes and nodes:
#
# python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# You may need to add --master_addr $MASTER_ADDR --master_port $MASTER_PORT if using a custom addr:port
#
# You can also use the rdzv API: --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT --rdzv_backend c10d
#
# use torch.distributed.launch instead of torch.distributed.run for torch < 1.9
#
# If you get a hanging in `barrier` calls you have some network issues, you may try to debug this with:
#
# NCCL_DEBUG=INFO python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# which should tell you what's going on behind the scenes.
#
#
# This script can be run via `srun` in the SLURM environment as well. Here is a SLURM script that
# runs on 2 nodes of 4 gpus per node:
#
# #SBATCH --job-name=test-nodes # name
# #SBATCH --nodes=2 # nodes
# #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
# #SBATCH --cpus-per-task=10 # number of cores per tasks
# #SBATCH --gres=gpu:4 # number of gpus
# #SBATCH --time 0:05:00 # maximum execution time (HH:MM:SS)
# #SBATCH --output=%x-%j.out # output file name
#
# GPUS_PER_NODE=4
# MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
# MASTER_PORT=6000
#
# srun --jobid $SLURM_JOBID bash -c 'python -m torch.distributed.run \
# --nproc_per_node $GPUS_PER_NODE --nnodes $SLURM_NNODES --node_rank $SLURM_PROCID \
# --master_addr $MASTER_ADDR --master_port $MASTER_PORT \
# torch-distributed-gpu-test.py'
#
import fcntl
import os
import socket
import torch
import torch.distributed as dist
def lowerCAmelCase ( *UpperCamelCase__ : Optional[Any] ):
"""simple docstring"""
with open(UpperCamelCase__ , '''r''' ) as fh:
fcntl.flock(UpperCamelCase__ , fcntl.LOCK_EX )
try:
print(*UpperCamelCase__ )
finally:
fcntl.flock(UpperCamelCase__ , fcntl.LOCK_UN )
__lowerCAmelCase : Tuple = int(os.environ["LOCAL_RANK"])
torch.cuda.set_device(local_rank)
__lowerCAmelCase : Union[str, Any] = torch.device("cuda", local_rank)
__lowerCAmelCase : str = socket.gethostname()
__lowerCAmelCase : int = F"""[{hostname}-{local_rank}]"""
try:
# test distributed
dist.init_process_group("nccl")
dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM)
dist.barrier()
# test cuda is available and can allocate memory
torch.cuda.is_available()
torch.ones(1).cuda(local_rank)
# global rank
__lowerCAmelCase : Union[str, Any] = dist.get_rank()
__lowerCAmelCase : int = dist.get_world_size()
printflock(F"""{gpu} is OK (global rank: {rank}/{world_size})""")
dist.barrier()
if rank == 0:
printflock(F"""pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}""")
except Exception:
printflock(F"""{gpu} is broken""")
raise
| 654 |
'''simple docstring'''
from math import sqrt
import numpy as np
from sympy import symbols
# Coefficient
# Speed of light (m/s)
__lowerCAmelCase : str = 299_792_458
# Symbols
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : Any = symbols("ct x y z")
def lowerCAmelCase ( UpperCamelCase__ : float ):
"""simple docstring"""
if velocity > c:
raise ValueError('''Speed must not exceed light speed 299,792,458 [m/s]!''' )
elif velocity < 1:
# Usually the speed should be much higher than 1 (c order of magnitude)
raise ValueError('''Speed must be greater than or equal to 1!''' )
return velocity / c
def lowerCAmelCase ( UpperCamelCase__ : float ):
"""simple docstring"""
return 1 / sqrt(1 - beta(UpperCamelCase__ ) ** 2 )
def lowerCAmelCase ( UpperCamelCase__ : float ):
"""simple docstring"""
return np.array(
[
[gamma(UpperCamelCase__ ), -gamma(UpperCamelCase__ ) * beta(UpperCamelCase__ ), 0, 0],
[-gamma(UpperCamelCase__ ) * beta(UpperCamelCase__ ), gamma(UpperCamelCase__ ), 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1],
] )
def lowerCAmelCase ( UpperCamelCase__ : float , UpperCamelCase__ : np.ndarray | None = None ):
"""simple docstring"""
# Ensure event is not empty
if event is None:
__UpperCAmelCase = np.array([ct, x, y, z] ) # Symbolic four vector
else:
event[0] *= c # x0 is ct (speed of light * time)
return transformation_matrix(UpperCamelCase__ ) @ event
if __name__ == "__main__":
import doctest
doctest.testmod()
# Example of symbolic vector:
__lowerCAmelCase : Dict = transform(29_979_245)
print("Example of four vector: ")
print(F"""ct' = {four_vector[0]}""")
print(F"""x' = {four_vector[1]}""")
print(F"""y' = {four_vector[2]}""")
print(F"""z' = {four_vector[3]}""")
# Substitute symbols with numerical values
__lowerCAmelCase : Union[str, Any] = {ct: c, x: 1, y: 1, z: 1}
__lowerCAmelCase : Optional[int] = [four_vector[i].subs(sub_dict) for i in range(4)]
print(F"""\n{numerical_vector}""")
| 654 | 1 |
'''simple docstring'''
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotConfig, is_flax_available
from transformers.testing_utils import jax_device, require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
__lowerCAmelCase : str = "platform"
import jax
import jax.numpy as jnp
from transformers import BlenderbotTokenizer
from transformers.models.blenderbot.modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
shift_tokens_right,
)
def lowerCAmelCase ( UpperCamelCase__ : int , UpperCamelCase__ : Any , UpperCamelCase__ : Union[str, Any]=None , UpperCamelCase__ : Any=None , UpperCamelCase__ : Dict=None , UpperCamelCase__ : Union[str, Any]=None , UpperCamelCase__ : Dict=None , UpperCamelCase__ : str=None , ):
"""simple docstring"""
if attention_mask is None:
__UpperCAmelCase = np.where(input_ids != config.pad_token_id , 1 , 0 )
if decoder_attention_mask is None:
__UpperCAmelCase = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 )
if head_mask is None:
__UpperCAmelCase = np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
__UpperCAmelCase = np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
__UpperCAmelCase = np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class A :
def __init__( self : Dict , __a : Any , __a : List[str]=1_3 , __a : List[str]=7 , __a : List[str]=True , __a : List[Any]=False , __a : List[Any]=9_9 , __a : Any=1_6 , __a : Any=2 , __a : Dict=4 , __a : Optional[int]=4 , __a : List[str]="gelu" , __a : Optional[Any]=0.1 , __a : List[str]=0.1 , __a : Optional[int]=3_2 , __a : Union[str, Any]=2 , __a : Dict=1 , __a : Union[str, Any]=0 , __a : int=0.0_2 , ) -> Union[str, Any]:
__UpperCAmelCase = parent
__UpperCAmelCase = batch_size
__UpperCAmelCase = seq_length
__UpperCAmelCase = is_training
__UpperCAmelCase = use_labels
__UpperCAmelCase = vocab_size
__UpperCAmelCase = hidden_size
__UpperCAmelCase = num_hidden_layers
__UpperCAmelCase = num_attention_heads
__UpperCAmelCase = intermediate_size
__UpperCAmelCase = hidden_act
__UpperCAmelCase = hidden_dropout_prob
__UpperCAmelCase = attention_probs_dropout_prob
__UpperCAmelCase = max_position_embeddings
__UpperCAmelCase = eos_token_id
__UpperCAmelCase = pad_token_id
__UpperCAmelCase = bos_token_id
__UpperCAmelCase = initializer_range
def snake_case__ ( self : Dict ) -> Optional[int]:
__UpperCAmelCase = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size )
__UpperCAmelCase = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 )
__UpperCAmelCase = shift_tokens_right(__a , 1 , 2 )
__UpperCAmelCase = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=__a , )
__UpperCAmelCase = prepare_blenderbot_inputs_dict(__a , __a , __a )
return config, inputs_dict
def snake_case__ ( self : Tuple ) -> str:
__UpperCAmelCase , __UpperCAmelCase = self.prepare_config_and_inputs()
return config, inputs_dict
def snake_case__ ( self : str , __a : Optional[Any] , __a : Dict , __a : Union[str, Any] ) -> Tuple:
__UpperCAmelCase = 2_0
__UpperCAmelCase = model_class_name(__a )
__UpperCAmelCase = model.encode(inputs_dict['''input_ids'''] )
__UpperCAmelCase , __UpperCAmelCase = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
__UpperCAmelCase = model.init_cache(decoder_input_ids.shape[0] , __a , __a )
__UpperCAmelCase = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='''i4''' )
__UpperCAmelCase = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
__UpperCAmelCase = model.decode(
decoder_input_ids[:, :-1] , __a , decoder_attention_mask=__a , past_key_values=__a , decoder_position_ids=__a , )
__UpperCAmelCase = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' )
__UpperCAmelCase = model.decode(
decoder_input_ids[:, -1:] , __a , decoder_attention_mask=__a , past_key_values=outputs_cache.past_key_values , decoder_position_ids=__a , )
__UpperCAmelCase = model.decode(__a , __a )
__UpperCAmelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f"""Max diff is {diff}""" )
def snake_case__ ( self : Optional[Any] , __a : str , __a : List[str] , __a : str ) -> Union[str, Any]:
__UpperCAmelCase = 2_0
__UpperCAmelCase = model_class_name(__a )
__UpperCAmelCase = model.encode(inputs_dict['''input_ids'''] )
__UpperCAmelCase , __UpperCAmelCase = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
__UpperCAmelCase = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
__UpperCAmelCase = model.init_cache(decoder_input_ids.shape[0] , __a , __a )
__UpperCAmelCase = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
__UpperCAmelCase = model.decode(
decoder_input_ids[:, :-1] , __a , decoder_attention_mask=__a , past_key_values=__a , decoder_position_ids=__a , )
__UpperCAmelCase = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' )
__UpperCAmelCase = model.decode(
decoder_input_ids[:, -1:] , __a , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=__a , decoder_position_ids=__a , )
__UpperCAmelCase = model.decode(__a , __a , decoder_attention_mask=__a )
__UpperCAmelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f"""Max diff is {diff}""" )
@require_flax
class A ( unittest.TestCase ):
a_ = 9_9
def snake_case__ ( self : Optional[Any] ) -> Dict:
__UpperCAmelCase = np.array(
[
[7_1, 8_2, 1_8, 3_3, 4_6, 9_1, 2],
[6_8, 3_4, 2_6, 5_8, 3_0, 8_2, 2],
[5, 9_7, 1_7, 3_9, 9_4, 4_0, 2],
[7_6, 8_3, 9_4, 2_5, 7_0, 7_8, 2],
[8_7, 5_9, 4_1, 3_5, 4_8, 6_6, 2],
[5_5, 1_3, 1_6, 5_8, 5, 2, 1], # note padding
[6_4, 2_7, 3_1, 5_1, 1_2, 7_5, 2],
[5_2, 6_4, 8_6, 1_7, 8_3, 3_9, 2],
[4_8, 6_1, 9, 2_4, 7_1, 8_2, 2],
[2_6, 1, 6_0, 4_8, 2_2, 1_3, 2],
[2_1, 5, 6_2, 2_8, 1_4, 7_6, 2],
[4_5, 9_8, 3_7, 8_6, 5_9, 4_8, 2],
[7_0, 7_0, 5_0, 9, 2_8, 0, 2],
] , dtype=np.intaa , )
__UpperCAmelCase = input_ids.shape[0]
__UpperCAmelCase = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=2_4 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=3_2 , decoder_ffn_dim=3_2 , max_position_embeddings=4_8 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def snake_case__ ( self : str ) -> Optional[Any]:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = self._get_config_and_data()
__UpperCAmelCase = FlaxBlenderbotForConditionalGeneration(__a )
__UpperCAmelCase = lm_model(input_ids=__a )
__UpperCAmelCase = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs['''logits'''].shape , __a )
def snake_case__ ( self : Union[str, Any] ) -> Optional[int]:
__UpperCAmelCase = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=1_4 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=4_8 , )
__UpperCAmelCase = FlaxBlenderbotForConditionalGeneration(__a )
__UpperCAmelCase = np.array([[7_1, 8_2, 1_8, 3_3, 4_6, 9_1, 2], [6_8, 3_4, 2_6, 5_8, 3_0, 2, 1]] , dtype=np.intaa )
__UpperCAmelCase = np.array([[8_2, 7_1, 8_2, 1_8, 2], [5_8, 6_8, 2, 1, 1]] , dtype=np.intaa )
__UpperCAmelCase = lm_model(input_ids=__a , decoder_input_ids=__a )
__UpperCAmelCase = (*summary.shape, config.vocab_size)
self.assertEqual(outputs['''logits'''].shape , __a )
def snake_case__ ( self : int ) -> List[Any]:
__UpperCAmelCase = np.array([[7_1, 8_2, 1_8, 3_3, 2, 1, 1], [6_8, 3_4, 2_6, 5_8, 3_0, 8_2, 2]] , dtype=np.intaa )
__UpperCAmelCase = shift_tokens_right(__a , 1 , 2 )
__UpperCAmelCase = np.equal(__a , 1 ).astype(np.floataa ).sum()
__UpperCAmelCase = np.equal(__a , 1 ).astype(np.floataa ).sum()
self.assertEqual(shifted.shape , input_ids.shape )
self.assertEqual(__a , n_pad_before - 1 )
self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() )
@require_flax
class A ( UpperCAmelCase , unittest.TestCase , UpperCAmelCase ):
a_ = True
a_ = (
(
FlaxBlenderbotModel,
FlaxBlenderbotForConditionalGeneration,
)
if is_flax_available()
else ()
)
a_ = (FlaxBlenderbotForConditionalGeneration,) if is_flax_available() else ()
def snake_case__ ( self : Optional[Any] ) -> int:
__UpperCAmelCase = FlaxBlenderbotModelTester(self )
def snake_case__ ( self : Union[str, Any] ) -> str:
__UpperCAmelCase , __UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(__a , __a , __a )
def snake_case__ ( self : int ) -> Dict:
__UpperCAmelCase , __UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(__a , __a , __a )
def snake_case__ ( self : Any ) -> Tuple:
__UpperCAmelCase , __UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__UpperCAmelCase = self._prepare_for_class(__a , __a )
__UpperCAmelCase = model_class(__a )
@jax.jit
def encode_jitted(__a : List[Any] , __a : Any=None , **__a : Optional[Any] ):
return model.encode(input_ids=__a , attention_mask=__a )
with self.subTest('''JIT Enabled''' ):
__UpperCAmelCase = encode_jitted(**__a ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
__UpperCAmelCase = encode_jitted(**__a ).to_tuple()
self.assertEqual(len(__a ) , len(__a ) )
for jitted_output, output in zip(__a , __a ):
self.assertEqual(jitted_output.shape , output.shape )
def snake_case__ ( self : str ) -> Any:
__UpperCAmelCase , __UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__UpperCAmelCase = model_class(__a )
__UpperCAmelCase = model.encode(inputs_dict['''input_ids'''] , inputs_dict['''attention_mask'''] )
__UpperCAmelCase = {
'''decoder_input_ids''': inputs_dict['''decoder_input_ids'''],
'''decoder_attention_mask''': inputs_dict['''decoder_attention_mask'''],
'''encoder_outputs''': encoder_outputs,
}
@jax.jit
def decode_jitted(__a : Tuple , __a : Tuple , __a : Optional[Any] ):
return model.decode(
decoder_input_ids=__a , decoder_attention_mask=__a , encoder_outputs=__a , )
with self.subTest('''JIT Enabled''' ):
__UpperCAmelCase = decode_jitted(**__a ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
__UpperCAmelCase = decode_jitted(**__a ).to_tuple()
self.assertEqual(len(__a ) , len(__a ) )
for jitted_output, output in zip(__a , __a ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def snake_case__ ( self : List[Any] ) -> Optional[int]:
for model_class_name in self.all_model_classes:
__UpperCAmelCase = model_class_name.from_pretrained('''facebook/blenderbot-400M-distill''' )
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
__UpperCAmelCase = np.ones((1, 1) ) * model.config.eos_token_id
__UpperCAmelCase = model(__a )
self.assertIsNotNone(__a )
@unittest.skipUnless(jax_device != '''cpu''' , '''3B test too slow on CPU.''' )
@slow
def snake_case__ ( self : Dict ) -> str:
__UpperCAmelCase = {'''num_beams''': 1, '''early_stopping''': True, '''min_length''': 1_5, '''max_length''': 2_5}
__UpperCAmelCase = {'''skip_special_tokens''': True, '''clean_up_tokenization_spaces''': True}
__UpperCAmelCase = FlaxBlenderbotForConditionalGeneration.from_pretrained('''facebook/blenderbot-3B''' , from_pt=__a )
__UpperCAmelCase = BlenderbotTokenizer.from_pretrained('''facebook/blenderbot-3B''' )
__UpperCAmelCase = ['''Sam''']
__UpperCAmelCase = tokenizer(__a , return_tensors='''jax''' )
__UpperCAmelCase = model.generate(**__a , **__a )
__UpperCAmelCase = '''Sam is a great name. It means "sun" in Gaelic.'''
__UpperCAmelCase = tokenizer.batch_decode(__a , **__a )
assert generated_txt[0].strip() == tgt_text
| 654 |
'''simple docstring'''
import heapq
import sys
import numpy as np
__lowerCAmelCase : Any = tuple[int, int]
class A :
def __init__( self : Optional[int] ) -> int:
__UpperCAmelCase = []
__UpperCAmelCase = set()
def snake_case__ ( self : Optional[Any] ) -> List[Any]:
if not self.empty():
return self.elements[0][0]
else:
return float('''inf''' )
def snake_case__ ( self : Dict ) -> Optional[int]:
return len(self.elements ) == 0
def snake_case__ ( self : Optional[int] , __a : Optional[Any] , __a : Dict ) -> Optional[Any]:
if item not in self.set:
heapq.heappush(self.elements , (priority, item) )
self.set.add(__a )
else:
# update
# print("update", item)
__UpperCAmelCase = []
((__UpperCAmelCase) , (__UpperCAmelCase)) = heapq.heappop(self.elements )
while x != item:
temp.append((pri, x) )
((__UpperCAmelCase) , (__UpperCAmelCase)) = heapq.heappop(self.elements )
temp.append((priority, item) )
for pro, xxx in temp:
heapq.heappush(self.elements , (pro, xxx) )
def snake_case__ ( self : int , __a : Any ) -> int:
if item in self.set:
self.set.remove(__a )
__UpperCAmelCase = []
((__UpperCAmelCase) , (__UpperCAmelCase)) = heapq.heappop(self.elements )
while x != item:
temp.append((pro, x) )
((__UpperCAmelCase) , (__UpperCAmelCase)) = heapq.heappop(self.elements )
for prito, yyy in temp:
heapq.heappush(self.elements , (prito, yyy) )
def snake_case__ ( self : List[str] ) -> Dict:
return self.elements[0][1]
def snake_case__ ( self : Any ) -> List[str]:
((__UpperCAmelCase) , (__UpperCAmelCase)) = heapq.heappop(self.elements )
self.set.remove(__a )
return (priority, item)
def lowerCAmelCase ( UpperCamelCase__ : TPos , UpperCamelCase__ : TPos ):
"""simple docstring"""
# euclidean distance
__UpperCAmelCase = np.array(UpperCamelCase__ )
__UpperCAmelCase = np.array(UpperCamelCase__ )
return np.linalg.norm(a - b )
def lowerCAmelCase ( UpperCamelCase__ : TPos , UpperCamelCase__ : TPos ):
"""simple docstring"""
# integer division by time variable
return consistent_heuristic(UpperCamelCase__ , UpperCamelCase__ ) // t
def lowerCAmelCase ( UpperCamelCase__ : TPos , UpperCamelCase__ : TPos ):
"""simple docstring"""
# manhattan distance
return abs(p[0] - goal[0] ) + abs(p[1] - goal[1] )
def lowerCAmelCase ( UpperCamelCase__ : TPos , UpperCamelCase__ : int , UpperCamelCase__ : TPos , UpperCamelCase__ : dict[TPos, float] ):
"""simple docstring"""
__UpperCAmelCase = g_function[start] + Wa * heuristics[i](UpperCamelCase__ , UpperCamelCase__ )
return ans
def lowerCAmelCase ( UpperCamelCase__ : List[Any] , UpperCamelCase__ : int , UpperCamelCase__ : Optional[int] ):
"""simple docstring"""
__UpperCAmelCase = np.chararray((n, n) )
for i in range(UpperCamelCase__ ):
for j in range(UpperCamelCase__ ):
__UpperCAmelCase = '''*'''
for i in range(UpperCamelCase__ ):
for j in range(UpperCamelCase__ ):
if (j, (n - 1) - i) in blocks:
__UpperCAmelCase = '''#'''
__UpperCAmelCase = '''-'''
__UpperCAmelCase = back_pointer[goal]
while x != start:
((__UpperCAmelCase) , (__UpperCAmelCase)) = x
# print(x)
__UpperCAmelCase = '''-'''
__UpperCAmelCase = back_pointer[x]
__UpperCAmelCase = '''-'''
for i in range(UpperCamelCase__ ):
for j in range(UpperCamelCase__ ):
if (i, j) == (0, n - 1):
print(grid[i][j] , end=''' ''' )
print('''<-- End position''' , end=''' ''' )
else:
print(grid[i][j] , end=''' ''' )
print()
print('''^''' )
print('''Start position''' )
print()
print('''# is an obstacle''' )
print('''- is the path taken by algorithm''' )
print('''PATH TAKEN BY THE ALGORITHM IS:-''' )
__UpperCAmelCase = back_pointer[goal]
while x != start:
print(UpperCamelCase__ , end=''' ''' )
__UpperCAmelCase = back_pointer[x]
print(UpperCamelCase__ )
sys.exit()
def lowerCAmelCase ( UpperCamelCase__ : TPos ):
"""simple docstring"""
if p[0] < 0 or p[0] > n - 1:
return False
if p[1] < 0 or p[1] > n - 1:
return False
return True
def lowerCAmelCase ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : int , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : Dict , UpperCamelCase__ : Dict , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Tuple , ):
"""simple docstring"""
for itera in range(UpperCamelCase__ ):
open_list[itera].remove_element(UpperCamelCase__ )
# print("s", s)
# print("j", j)
((__UpperCAmelCase) , (__UpperCAmelCase)) = s
__UpperCAmelCase = (x - 1, y)
__UpperCAmelCase = (x + 1, y)
__UpperCAmelCase = (x, y + 1)
__UpperCAmelCase = (x, y - 1)
for neighbours in [left, right, up, down]:
if neighbours not in blocks:
if valid(UpperCamelCase__ ) and neighbours not in visited:
# print("neighbour", neighbours)
visited.add(UpperCamelCase__ )
__UpperCAmelCase = -1
__UpperCAmelCase = float('''inf''' )
if valid(UpperCamelCase__ ) and g_function[neighbours] > g_function[s] + 1:
__UpperCAmelCase = g_function[s] + 1
__UpperCAmelCase = s
if neighbours not in close_list_anchor:
open_list[0].put(UpperCamelCase__ , key(UpperCamelCase__ , 0 , UpperCamelCase__ , UpperCamelCase__ ) )
if neighbours not in close_list_inad:
for var in range(1 , UpperCamelCase__ ):
if key(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) <= Wa * key(
UpperCamelCase__ , 0 , UpperCamelCase__ , UpperCamelCase__ ):
open_list[j].put(
UpperCamelCase__ , key(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) )
def lowerCAmelCase ( ):
"""simple docstring"""
__UpperCAmelCase = []
for x in range(1 , 5 ):
for y in range(1 , 6 ):
some_list.append((x, y) )
for x in range(1_5 , 2_0 ):
some_list.append((x, 1_7) )
for x in range(1_0 , 1_9 ):
for y in range(1 , 1_5 ):
some_list.append((x, y) )
# L block
for x in range(1 , 4 ):
for y in range(1_2 , 1_9 ):
some_list.append((x, y) )
for x in range(3 , 1_3 ):
for y in range(1_6 , 1_9 ):
some_list.append((x, y) )
return some_list
__lowerCAmelCase : Optional[Any] = {0: consistent_heuristic, 1: heuristic_a, 2: heuristic_a}
__lowerCAmelCase : List[Any] = [
(0, 1),
(1, 1),
(2, 1),
(3, 1),
(4, 1),
(5, 1),
(6, 1),
(7, 1),
(8, 1),
(9, 1),
(10, 1),
(11, 1),
(12, 1),
(13, 1),
(14, 1),
(15, 1),
(16, 1),
(17, 1),
(18, 1),
(19, 1),
]
__lowerCAmelCase : Dict = make_common_ground()
__lowerCAmelCase : int = blocks_blk
# hyper parameters
__lowerCAmelCase : Dict = 1
__lowerCAmelCase : List[str] = 1
__lowerCAmelCase : Union[str, Any] = 20
__lowerCAmelCase : Any = 3 # one consistent and two other inconsistent
# start and end destination
__lowerCAmelCase : Optional[Any] = (0, 0)
__lowerCAmelCase : Any = (n - 1, n - 1)
__lowerCAmelCase : Optional[int] = 1
def lowerCAmelCase ( UpperCamelCase__ : TPos , UpperCamelCase__ : TPos , UpperCamelCase__ : int ):
"""simple docstring"""
__UpperCAmelCase = {start: 0, goal: float('''inf''' )}
__UpperCAmelCase = {start: -1, goal: -1}
__UpperCAmelCase = []
__UpperCAmelCase = set()
for i in range(UpperCamelCase__ ):
open_list.append(PriorityQueue() )
open_list[i].put(UpperCamelCase__ , key(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) )
__UpperCAmelCase = []
__UpperCAmelCase = []
while open_list[0].minkey() < float('''inf''' ):
for i in range(1 , UpperCamelCase__ ):
# print(open_list[0].minkey(), open_list[i].minkey())
if open_list[i].minkey() <= Wa * open_list[0].minkey():
global t
t += 1
if g_function[goal] <= open_list[i].minkey():
if g_function[goal] < float('''inf''' ):
do_something(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
else:
__UpperCAmelCase , __UpperCAmelCase = open_list[i].top_show()
visited.add(UpperCamelCase__ )
expand_state(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , )
close_list_inad.append(UpperCamelCase__ )
else:
if g_function[goal] <= open_list[0].minkey():
if g_function[goal] < float('''inf''' ):
do_something(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
else:
__UpperCAmelCase = open_list[0].top_show()
visited.add(UpperCamelCase__ )
expand_state(
UpperCamelCase__ , 0 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , )
close_list_anchor.append(UpperCamelCase__ )
print('''No path found to goal''' )
print()
for i in range(n - 1 , -1 , -1 ):
for j in range(UpperCamelCase__ ):
if (j, i) in blocks:
print('''#''' , end=''' ''' )
elif (j, i) in back_pointer:
if (j, i) == (n - 1, n - 1):
print('''*''' , end=''' ''' )
else:
print('''-''' , end=''' ''' )
else:
print('''*''' , end=''' ''' )
if (j, i) == (n - 1, n - 1):
print('''<-- End position''' , end=''' ''' )
print()
print('''^''' )
print('''Start position''' )
print()
print('''# is an obstacle''' )
print('''- is the path taken by algorithm''' )
if __name__ == "__main__":
multi_a_star(start, goal, n_heuristic)
| 654 | 1 |
'''simple docstring'''
from typing import Dict, List, Optional
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__lowerCAmelCase : str = logging.get_logger(__name__)
__lowerCAmelCase : Optional[int] = {
"nielsr/canine-s": 2_048,
}
# Unicode defines 1,114,112 total “codepoints”
__lowerCAmelCase : str = 1_114_112
# Below: Constants defining canonical codepoints for special, pseudo-characters.
# Copied from https://github.com/google-research/language/blob/master/language/canine/special_codepoints.py
__lowerCAmelCase : List[str] = 0
__lowerCAmelCase : Optional[int] = 0Xe000
__lowerCAmelCase : Optional[int] = 0Xe001
__lowerCAmelCase : Optional[int] = 0Xe002
__lowerCAmelCase : Dict = 0Xe003
__lowerCAmelCase : Optional[int] = 0Xe004
# Maps special codepoints to human-readable names.
__lowerCAmelCase : Dict[int, str] = {
# Special symbols are represented using codepoints values that are valid,
# but designated as "Private Use", meaning that they will never be assigned
# characters by the Unicode Consortium, and are thus safe for use here.
#
# NOTE: Do *NOT* add any sort of [UNK_CHAR] here. They are explicitly
# excluded and should fail with a hard error.
CLS: "[CLS]",
SEP: "[SEP]",
BOS: "[BOS]",
MASK: "[MASK]",
PAD: "[PAD]",
RESERVED: "[RESERVED]",
}
# Maps special codepoint human-readable names to their codepoint values.
__lowerCAmelCase : Dict[str, int] = {name: codepoint for codepoint, name in SPECIAL_CODEPOINTS.items()}
class A ( UpperCAmelCase ):
a_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : List[str] , __a : Union[str, Any]=chr(__a ) , __a : Optional[int]=chr(__a ) , __a : Union[str, Any]=chr(__a ) , __a : List[Any]=chr(__a ) , __a : Any=chr(__a ) , __a : Tuple=chr(__a ) , __a : Union[str, Any]=False , __a : Optional[int]=2_0_4_8 , **__a : str , ) -> Tuple:
__UpperCAmelCase = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else bos_token
__UpperCAmelCase = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else eos_token
__UpperCAmelCase = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else sep_token
__UpperCAmelCase = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else cls_token
__UpperCAmelCase = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
__UpperCAmelCase = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else mask_token
super().__init__(
bos_token=__a , eos_token=__a , sep_token=__a , cls_token=__a , pad_token=__a , mask_token=__a , add_prefix_space=__a , model_max_length=__a , **__a , )
# Creates a mapping for looking up the IDs of special symbols.
__UpperCAmelCase = {}
for codepoint, name in SPECIAL_CODEPOINTS.items():
__UpperCAmelCase = codepoint
# Creates a mapping for looking up the string forms of special symbol IDs.
__UpperCAmelCase = {
codepoint: name for name, codepoint in self._special_codepoints.items()
}
__UpperCAmelCase = UNICODE_VOCAB_SIZE
__UpperCAmelCase = len(self._special_codepoints )
@property
def snake_case__ ( self : Union[str, Any] ) -> int:
return self._unicode_vocab_size
def snake_case__ ( self : str , __a : str ) -> List[str]:
return list(__a )
def snake_case__ ( self : Optional[int] , __a : str ) -> int:
try:
return ord(__a )
except TypeError:
raise ValueError(f"""invalid token: '{token}'""" )
def snake_case__ ( self : List[str] , __a : int ) -> str:
try:
if index in SPECIAL_CODEPOINTS:
return SPECIAL_CODEPOINTS[index]
return chr(__a )
except TypeError:
raise ValueError(f"""invalid id: {index}""" )
def snake_case__ ( self : Any , __a : Union[str, Any] ) -> Optional[int]:
return "".join(__a )
def snake_case__ ( self : List[str] , __a : List[int] , __a : Optional[List[int]] = None ) -> List[int]:
__UpperCAmelCase = [self.sep_token_id]
__UpperCAmelCase = [self.cls_token_id]
__UpperCAmelCase = cls + token_ids_a + sep
if token_ids_a is not None:
result += token_ids_a + sep
return result
def snake_case__ ( self : int , __a : List[int] , __a : Optional[List[int]] = None , __a : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__a , token_ids_a=__a , already_has_special_tokens=__a )
__UpperCAmelCase = [1] + ([0] * len(__a )) + [1]
if token_ids_a is not None:
result += ([0] * len(__a )) + [1]
return result
def snake_case__ ( self : Optional[int] , __a : List[int] , __a : Optional[List[int]] = None ) -> List[int]:
__UpperCAmelCase = [self.sep_token_id]
__UpperCAmelCase = [self.cls_token_id]
__UpperCAmelCase = len(cls + token_ids_a + sep ) * [0]
if token_ids_a is not None:
result += len(token_ids_a + sep ) * [1]
return result
def snake_case__ ( self : Optional[Any] , __a : str , __a : Optional[str] = None ) -> Any:
return ()
| 654 |
'''simple docstring'''
import argparse
import collections
import os
import re
import tempfile
import pandas as pd
from datasets import Dataset
from huggingface_hub import hf_hub_download, upload_folder
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/update_metadata.py
__lowerCAmelCase : List[Any] = "src/transformers"
# This is to make sure the transformers module imported is the one in the repo.
__lowerCAmelCase : str = direct_transformers_import(TRANSFORMERS_PATH)
# Regexes that match TF/Flax/PT model names.
__lowerCAmelCase : int = re.compile(r"TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
__lowerCAmelCase : List[Any] = re.compile(r"Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
__lowerCAmelCase : List[str] = re.compile(r"(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
# Fill this with tuples (pipeline_tag, model_mapping, auto_model)
__lowerCAmelCase : Optional[int] = [
("pretraining", "MODEL_FOR_PRETRAINING_MAPPING_NAMES", "AutoModelForPreTraining"),
("feature-extraction", "MODEL_MAPPING_NAMES", "AutoModel"),
("audio-classification", "MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES", "AutoModelForAudioClassification"),
("text-generation", "MODEL_FOR_CAUSAL_LM_MAPPING_NAMES", "AutoModelForCausalLM"),
("automatic-speech-recognition", "MODEL_FOR_CTC_MAPPING_NAMES", "AutoModelForCTC"),
("image-classification", "MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES", "AutoModelForImageClassification"),
("image-segmentation", "MODEL_FOR_IMAGE_SEGMENTATION_MAPPING_NAMES", "AutoModelForImageSegmentation"),
("fill-mask", "MODEL_FOR_MASKED_LM_MAPPING_NAMES", "AutoModelForMaskedLM"),
("object-detection", "MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES", "AutoModelForObjectDetection"),
(
"zero-shot-object-detection",
"MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING_NAMES",
"AutoModelForZeroShotObjectDetection",
),
("question-answering", "MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES", "AutoModelForQuestionAnswering"),
("text2text-generation", "MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES", "AutoModelForSeq2SeqLM"),
("text-classification", "MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES", "AutoModelForSequenceClassification"),
("automatic-speech-recognition", "MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES", "AutoModelForSpeechSeq2Seq"),
(
"table-question-answering",
"MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING_NAMES",
"AutoModelForTableQuestionAnswering",
),
("token-classification", "MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES", "AutoModelForTokenClassification"),
("multiple-choice", "MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES", "AutoModelForMultipleChoice"),
(
"next-sentence-prediction",
"MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES",
"AutoModelForNextSentencePrediction",
),
(
"audio-frame-classification",
"MODEL_FOR_AUDIO_FRAME_CLASSIFICATION_MAPPING_NAMES",
"AutoModelForAudioFrameClassification",
),
("audio-xvector", "MODEL_FOR_AUDIO_XVECTOR_MAPPING_NAMES", "AutoModelForAudioXVector"),
(
"document-question-answering",
"MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES",
"AutoModelForDocumentQuestionAnswering",
),
(
"visual-question-answering",
"MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING_NAMES",
"AutoModelForVisualQuestionAnswering",
),
("image-to-text", "MODEL_FOR_FOR_VISION_2_SEQ_MAPPING_NAMES", "AutoModelForVision2Seq"),
(
"zero-shot-image-classification",
"MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING_NAMES",
"AutoModelForZeroShotImageClassification",
),
("depth-estimation", "MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES", "AutoModelForDepthEstimation"),
("video-classification", "MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES", "AutoModelForVideoClassification"),
("mask-generation", "MODEL_FOR_MASK_GENERATION_MAPPING_NAMES", "AutoModelForMaskGeneration"),
]
def lowerCAmelCase ( UpperCamelCase__ : int ):
"""simple docstring"""
__UpperCAmelCase = re.finditer('''.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)''' , UpperCamelCase__ )
return [m.group(0 ) for m in matches]
def lowerCAmelCase ( ):
"""simple docstring"""
__UpperCAmelCase = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
__UpperCAmelCase = {
config.replace('''Config''' , '''''' ): model_type for model_type, config in config_maping_names.items()
}
# Dictionaries flagging if each model prefix has a backend in PT/TF/Flax.
__UpperCAmelCase = collections.defaultdict(UpperCamelCase__ )
__UpperCAmelCase = collections.defaultdict(UpperCamelCase__ )
__UpperCAmelCase = collections.defaultdict(UpperCamelCase__ )
# Let's lookup through all transformers object (once) and find if models are supported by a given backend.
for attr_name in dir(UpperCamelCase__ ):
__UpperCAmelCase = None
if _re_tf_models.match(UpperCamelCase__ ) is not None:
__UpperCAmelCase = tf_models
__UpperCAmelCase = _re_tf_models.match(UpperCamelCase__ ).groups()[0]
elif _re_flax_models.match(UpperCamelCase__ ) is not None:
__UpperCAmelCase = flax_models
__UpperCAmelCase = _re_flax_models.match(UpperCamelCase__ ).groups()[0]
elif _re_pt_models.match(UpperCamelCase__ ) is not None:
__UpperCAmelCase = pt_models
__UpperCAmelCase = _re_pt_models.match(UpperCamelCase__ ).groups()[0]
if lookup_dict is not None:
while len(UpperCamelCase__ ) > 0:
if attr_name in model_prefix_to_model_type:
__UpperCAmelCase = True
break
# Try again after removing the last word in the name
__UpperCAmelCase = ''''''.join(camel_case_split(UpperCamelCase__ )[:-1] )
__UpperCAmelCase = set(list(pt_models.keys() ) + list(tf_models.keys() ) + list(flax_models.keys() ) )
__UpperCAmelCase = list(UpperCamelCase__ )
all_models.sort()
__UpperCAmelCase = {'''model_type''': all_models}
__UpperCAmelCase = [pt_models[t] for t in all_models]
__UpperCAmelCase = [tf_models[t] for t in all_models]
__UpperCAmelCase = [flax_models[t] for t in all_models]
# Now let's use the auto-mapping names to make sure
__UpperCAmelCase = {}
for t in all_models:
if t in transformers_module.models.auto.processing_auto.PROCESSOR_MAPPING_NAMES:
__UpperCAmelCase = '''AutoProcessor'''
elif t in transformers_module.models.auto.tokenization_auto.TOKENIZER_MAPPING_NAMES:
__UpperCAmelCase = '''AutoTokenizer'''
elif t in transformers_module.models.auto.feature_extraction_auto.FEATURE_EXTRACTOR_MAPPING_NAMES:
__UpperCAmelCase = '''AutoFeatureExtractor'''
else:
# Default to AutoTokenizer if a model has nothing, for backward compatibility.
__UpperCAmelCase = '''AutoTokenizer'''
__UpperCAmelCase = [processors[t] for t in all_models]
return pd.DataFrame(UpperCamelCase__ )
def lowerCAmelCase ( UpperCamelCase__ : List[str] ):
"""simple docstring"""
__UpperCAmelCase = [
transformers_module.models.auto.modeling_auto,
transformers_module.models.auto.modeling_tf_auto,
transformers_module.models.auto.modeling_flax_auto,
]
for pipeline_tag, model_mapping, auto_class in PIPELINE_TAGS_AND_AUTO_MODELS:
__UpperCAmelCase = [model_mapping, f"""TF_{model_mapping}""", f"""FLAX_{model_mapping}"""]
__UpperCAmelCase = [auto_class, f"""TF_{auto_class}""", f"""Flax_{auto_class}"""]
# Loop through all three frameworks
for module, cls, mapping in zip(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
# The type of pipeline may not exist in this framework
if not hasattr(UpperCamelCase__ , UpperCamelCase__ ):
continue
# First extract all model_names
__UpperCAmelCase = []
for name in getattr(UpperCamelCase__ , UpperCamelCase__ ).values():
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
model_names.append(UpperCamelCase__ )
else:
model_names.extend(list(UpperCamelCase__ ) )
# Add pipeline tag and auto model class for those models
table.update({model_name: (pipeline_tag, cls) for model_name in model_names} )
return table
def lowerCAmelCase ( UpperCamelCase__ : List[str] , UpperCamelCase__ : Dict ):
"""simple docstring"""
__UpperCAmelCase = get_frameworks_table()
__UpperCAmelCase = Dataset.from_pandas(UpperCamelCase__ )
__UpperCAmelCase = hf_hub_download(
'''huggingface/transformers-metadata''' , '''pipeline_tags.json''' , repo_type='''dataset''' , token=UpperCamelCase__ )
__UpperCAmelCase = Dataset.from_json(UpperCamelCase__ )
__UpperCAmelCase = {
tags_dataset[i]['''model_class''']: (tags_dataset[i]['''pipeline_tag'''], tags_dataset[i]['''auto_class'''])
for i in range(len(UpperCamelCase__ ) )
}
__UpperCAmelCase = update_pipeline_and_auto_class_table(UpperCamelCase__ )
# Sort the model classes to avoid some nondeterministic updates to create false update commits.
__UpperCAmelCase = sorted(table.keys() )
__UpperCAmelCase = pd.DataFrame(
{
'''model_class''': model_classes,
'''pipeline_tag''': [table[m][0] for m in model_classes],
'''auto_class''': [table[m][1] for m in model_classes],
} )
__UpperCAmelCase = Dataset.from_pandas(UpperCamelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
frameworks_dataset.to_json(os.path.join(UpperCamelCase__ , '''frameworks.json''' ) )
tags_dataset.to_json(os.path.join(UpperCamelCase__ , '''pipeline_tags.json''' ) )
if commit_sha is not None:
__UpperCAmelCase = (
f"""Update with commit {commit_sha}\n\nSee: """
f"""https://github.com/huggingface/transformers/commit/{commit_sha}"""
)
else:
__UpperCAmelCase = '''Update'''
upload_folder(
repo_id='''huggingface/transformers-metadata''' , folder_path=UpperCamelCase__ , repo_type='''dataset''' , token=UpperCamelCase__ , commit_message=UpperCamelCase__ , )
def lowerCAmelCase ( ):
"""simple docstring"""
__UpperCAmelCase = {tag: cls for tag, _, cls in PIPELINE_TAGS_AND_AUTO_MODELS}
__UpperCAmelCase = transformers_module.pipelines.SUPPORTED_TASKS
__UpperCAmelCase = []
for key in pipeline_tasks:
if key not in in_table:
__UpperCAmelCase = pipeline_tasks[key]['''pt''']
if isinstance(UpperCamelCase__ , (list, tuple) ):
__UpperCAmelCase = model[0]
__UpperCAmelCase = model.__name__
if model not in in_table.values():
missing.append(UpperCamelCase__ )
if len(UpperCamelCase__ ) > 0:
__UpperCAmelCase = ''', '''.join(UpperCamelCase__ )
raise ValueError(
'''The following pipeline tags are not present in the `PIPELINE_TAGS_AND_AUTO_MODELS` constant inside '''
f"""`utils/update_metadata.py`: {msg}. Please add them!""" )
if __name__ == "__main__":
__lowerCAmelCase : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument("--token", type=str, help="The token to use to push to the transformers-metadata dataset.")
parser.add_argument("--commit_sha", type=str, help="The sha of the commit going with this update.")
parser.add_argument("--check-only", action="store_true", help="Activate to just check all pipelines are present.")
__lowerCAmelCase : Tuple = parser.parse_args()
if args.check_only:
check_pipeline_tags()
else:
update_metadata(args.token, args.commit_sha)
| 654 | 1 |
'''simple docstring'''
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision import transforms
from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def lowerCAmelCase ( UpperCamelCase__ : Optional[int] ):
"""simple docstring"""
__UpperCAmelCase = [2, 2, 6, 2] if '''tiny''' in model_name else [2, 2, 1_8, 2]
__UpperCAmelCase = True if '''large''' in model_name or '''huge''' in model_name else False
__UpperCAmelCase = True if '''large''' in model_name or '''huge''' in model_name else False
__UpperCAmelCase = True if '''large''' in model_name or '''huge''' in model_name else False
if "large" in model_name or "xlarge" in model_name or "huge" in model_name:
if "fl3" in model_name:
__UpperCAmelCase = [3, 3, 3, 3]
__UpperCAmelCase = [5, 5, 5, 5]
elif "fl4" in model_name:
__UpperCAmelCase = [4, 4, 4, 4]
__UpperCAmelCase = [3, 3, 3, 3]
if "tiny" in model_name or "small" in model_name or "base" in model_name:
__UpperCAmelCase = [3, 3, 3, 3]
if "lrf" in model_name:
__UpperCAmelCase = [3, 3, 3, 3]
else:
__UpperCAmelCase = [2, 2, 2, 2]
if "tiny" in model_name:
__UpperCAmelCase = 9_6
elif "small" in model_name:
__UpperCAmelCase = 9_6
elif "base" in model_name:
__UpperCAmelCase = 1_2_8
elif "large" in model_name:
__UpperCAmelCase = 1_9_2
elif "xlarge" in model_name:
__UpperCAmelCase = 2_5_6
elif "huge" in model_name:
__UpperCAmelCase = 3_5_2
# set label information
__UpperCAmelCase = '''huggingface/label-files'''
if "large" in model_name or "huge" in model_name:
__UpperCAmelCase = '''imagenet-22k-id2label.json'''
else:
__UpperCAmelCase = '''imagenet-1k-id2label.json'''
__UpperCAmelCase = json.load(open(hf_hub_download(UpperCamelCase__ , UpperCamelCase__ , repo_type='''dataset''' ) , '''r''' ) )
__UpperCAmelCase = {int(UpperCamelCase__ ): v for k, v in idalabel.items()}
__UpperCAmelCase = {v: k for k, v in idalabel.items()}
__UpperCAmelCase = FocalNetConfig(
embed_dim=UpperCamelCase__ , depths=UpperCamelCase__ , focal_levels=UpperCamelCase__ , focal_windows=UpperCamelCase__ , use_conv_embed=UpperCamelCase__ , idalabel=UpperCamelCase__ , labelaid=UpperCamelCase__ , use_post_layernorm=UpperCamelCase__ , use_layerscale=UpperCamelCase__ , )
return config
def lowerCAmelCase ( UpperCamelCase__ : int ):
"""simple docstring"""
if "patch_embed.proj" in name:
__UpperCAmelCase = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
__UpperCAmelCase = name.replace('''patch_embed.norm''' , '''embeddings.norm''' )
if "layers" in name:
__UpperCAmelCase = '''encoder.''' + name
if "encoder.layers" in name:
__UpperCAmelCase = name.replace('''encoder.layers''' , '''encoder.stages''' )
if "downsample.proj" in name:
__UpperCAmelCase = name.replace('''downsample.proj''' , '''downsample.projection''' )
if "blocks" in name:
__UpperCAmelCase = name.replace('''blocks''' , '''layers''' )
if "modulation.f.weight" in name or "modulation.f.bias" in name:
__UpperCAmelCase = name.replace('''modulation.f''' , '''modulation.projection_in''' )
if "modulation.h.weight" in name or "modulation.h.bias" in name:
__UpperCAmelCase = name.replace('''modulation.h''' , '''modulation.projection_context''' )
if "modulation.proj.weight" in name or "modulation.proj.bias" in name:
__UpperCAmelCase = name.replace('''modulation.proj''' , '''modulation.projection_out''' )
if name == "norm.weight":
__UpperCAmelCase = '''layernorm.weight'''
if name == "norm.bias":
__UpperCAmelCase = '''layernorm.bias'''
if "head" in name:
__UpperCAmelCase = name.replace('''head''' , '''classifier''' )
else:
__UpperCAmelCase = '''focalnet.''' + name
return name
def lowerCAmelCase ( UpperCamelCase__ : Tuple , UpperCamelCase__ : Dict , UpperCamelCase__ : Tuple=False ):
"""simple docstring"""
# fmt: off
__UpperCAmelCase = {
'''focalnet-tiny''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth''',
'''focalnet-tiny-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth''',
'''focalnet-small''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth''',
'''focalnet-small-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth''',
'''focalnet-base''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth''',
'''focalnet-base-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth''',
'''focalnet-large-lrf-fl3''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth''',
'''focalnet-large-lrf-fl4''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth''',
'''focalnet-xlarge-lrf-fl3''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth''',
'''focalnet-xlarge-lrf-fl4''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth''',
}
# fmt: on
__UpperCAmelCase = model_name_to_url[model_name]
print('''Checkpoint URL: ''' , UpperCamelCase__ )
__UpperCAmelCase = torch.hub.load_state_dict_from_url(UpperCamelCase__ , map_location='''cpu''' )['''model''']
# rename keys
for key in state_dict.copy().keys():
__UpperCAmelCase = state_dict.pop(UpperCamelCase__ )
__UpperCAmelCase = val
__UpperCAmelCase = get_focalnet_config(UpperCamelCase__ )
__UpperCAmelCase = FocalNetForImageClassification(UpperCamelCase__ )
model.eval()
# load state dict
model.load_state_dict(UpperCamelCase__ )
# verify conversion
__UpperCAmelCase = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
__UpperCAmelCase = BitImageProcessor(
do_resize=UpperCamelCase__ , size={'''shortest_edge''': 2_5_6} , resample=PILImageResampling.BILINEAR , do_center_crop=UpperCamelCase__ , crop_size=2_2_4 , do_normalize=UpperCamelCase__ , image_mean=UpperCamelCase__ , image_std=UpperCamelCase__ , )
__UpperCAmelCase = Image.open(requests.get(UpperCamelCase__ , stream=UpperCamelCase__ ).raw )
__UpperCAmelCase = processor(images=UpperCamelCase__ , return_tensors='''pt''' )
__UpperCAmelCase = transforms.Compose(
[
transforms.Resize(2_5_6 ),
transforms.CenterCrop(2_2_4 ),
transforms.ToTensor(),
transforms.Normalize(mean=[0.4_85, 0.4_56, 0.4_06] , std=[0.2_29, 0.2_24, 0.2_25] ),
] )
__UpperCAmelCase = image_transforms(UpperCamelCase__ ).unsqueeze(0 )
# verify pixel_values
assert torch.allclose(inputs.pixel_values , UpperCamelCase__ , atol=1E-4 )
__UpperCAmelCase = model(**UpperCamelCase__ )
__UpperCAmelCase = outputs.logits.argmax(-1 ).item()
print('''Predicted class:''' , model.config.idalabel[predicted_class_idx] )
print('''First values of logits:''' , outputs.logits[0, :3] )
if model_name == "focalnet-tiny":
__UpperCAmelCase = torch.tensor([0.21_66, -0.43_68, 0.21_91] )
elif model_name == "focalnet-tiny-lrf":
__UpperCAmelCase = torch.tensor([1.16_69, 0.01_25, -0.16_95] )
elif model_name == "focalnet-small":
__UpperCAmelCase = torch.tensor([0.49_17, -0.04_30, 0.13_41] )
elif model_name == "focalnet-small-lrf":
__UpperCAmelCase = torch.tensor([-0.25_88, -0.53_42, -0.23_31] )
elif model_name == "focalnet-base":
__UpperCAmelCase = torch.tensor([-0.16_55, -0.40_90, -0.17_30] )
elif model_name == "focalnet-base-lrf":
__UpperCAmelCase = torch.tensor([0.53_06, -0.04_83, -0.39_28] )
assert torch.allclose(outputs.logits[0, :3] , UpperCamelCase__ , atol=1E-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(f"""Saving model and processor of {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(UpperCamelCase__ )
processor.save_pretrained(UpperCamelCase__ )
if push_to_hub:
print(f"""Pushing model and processor of {model_name} to the hub...""" )
model.push_to_hub(f"""{model_name}""" )
processor.push_to_hub(f"""{model_name}""" )
if __name__ == "__main__":
__lowerCAmelCase : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="focalnet-tiny",
type=str,
help="Name of the FocalNet model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether to push the model and processor to the hub.",
)
__lowerCAmelCase : Any = parser.parse_args()
convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 654 |
'''simple docstring'''
import ast
import os
import re
import shutil
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.test_utils.examples import compare_against_test
from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow
from accelerate.utils import write_basic_config
# DataLoaders built from `test_samples/MRPC` for quick testing
# Should mock `{script_name}.get_dataloaders` via:
# @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders)
__lowerCAmelCase : Optional[int] = [
"cross_validation.py",
"gradient_accumulation.py",
"local_sgd.py",
"multi_process_metrics.py",
"memory.py",
"automatic_gradient_accumulation.py",
"fsdp_with_peak_mem_tracking.py",
"deepspeed_with_config_support.py",
"megatron_lm_gpt_pretraining.py",
]
class A ( unittest.TestCase ):
def snake_case__ ( self : Any , __a : str , __a : bool , __a : str = None , __a : list = None ) -> Tuple:
__UpperCAmelCase = None
__UpperCAmelCase = os.path.abspath(os.path.join('''examples''' , '''by_feature''' ) )
__UpperCAmelCase = os.path.abspath('''examples''' )
for item in os.listdir(__a ):
if item not in EXCLUDE_EXAMPLES:
__UpperCAmelCase = os.path.join(__a , __a )
if os.path.isfile(__a ) and ".py" in item_path:
with self.subTest(
tested_script=__a , feature_script=__a , tested_section='''main()''' if parser_only else '''training_function()''' , ):
__UpperCAmelCase = compare_against_test(
os.path.join(__a , __a ) , __a , __a , __a )
__UpperCAmelCase = '''\n'''.join(__a )
if special_strings is not None:
for string in special_strings:
__UpperCAmelCase = diff.replace(__a , '''''' )
self.assertEqual(__a , '''''' )
def snake_case__ ( self : Optional[Any] ) -> str:
self.one_complete_example('''complete_nlp_example.py''' , __a )
self.one_complete_example('''complete_nlp_example.py''' , __a )
def snake_case__ ( self : List[str] ) -> Tuple:
__UpperCAmelCase = os.path.abspath(os.path.join('''examples''' , '''cv_example.py''' ) )
__UpperCAmelCase = [
''' ''' * 1_6 + '''{\n\n''',
''' ''' * 2_0 + '''"accuracy": eval_metric["accuracy"],\n\n''',
''' ''' * 2_0 + '''"f1": eval_metric["f1"],\n\n''',
''' ''' * 2_0 + '''"train_loss": total_loss.item() / len(train_dataloader),\n\n''',
''' ''' * 2_0 + '''"epoch": epoch,\n\n''',
''' ''' * 1_6 + '''},\n\n''',
''' ''' * 1_6 + '''step=epoch,\n''',
''' ''' * 1_2,
''' ''' * 8 + '''for step, batch in enumerate(active_dataloader):\n''',
]
self.one_complete_example('''complete_cv_example.py''' , __a , __a , __a )
self.one_complete_example('''complete_cv_example.py''' , __a , __a , __a )
@mock.patch.dict(os.environ , {'''TESTING_MOCKED_DATALOADERS''': '''1'''} )
class A ( UpperCAmelCase ):
a_ = False
@classmethod
def snake_case__ ( cls : Tuple ) -> str:
super().setUpClass()
__UpperCAmelCase = tempfile.mkdtemp()
__UpperCAmelCase = os.path.join(cls._tmpdir , '''default_config.yml''' )
write_basic_config(save_location=cls.configPath )
__UpperCAmelCase = ['''accelerate''', '''launch''', '''--config_file''', cls.configPath]
@classmethod
def snake_case__ ( cls : Dict ) -> int:
super().tearDownClass()
shutil.rmtree(cls._tmpdir )
def snake_case__ ( self : Tuple ) -> Dict:
__UpperCAmelCase = f"""
examples/by_feature/checkpointing.py
--checkpointing_steps epoch
--output_dir {self.tmpdir}
""".split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , '''epoch_0''' ) ) )
def snake_case__ ( self : str ) -> int:
__UpperCAmelCase = f"""
examples/by_feature/checkpointing.py
--checkpointing_steps 1
--output_dir {self.tmpdir}
""".split()
__UpperCAmelCase = run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , '''step_2''' ) ) )
def snake_case__ ( self : Any ) -> Any:
__UpperCAmelCase = f"""
examples/by_feature/checkpointing.py
--resume_from_checkpoint {os.path.join(self.tmpdir , 'epoch_0' )}
""".split()
__UpperCAmelCase = run_command(self._launch_args + testargs , return_stdout=__a )
self.assertNotIn('''epoch 0:''' , __a )
self.assertIn('''epoch 1:''' , __a )
def snake_case__ ( self : Tuple ) -> Optional[int]:
__UpperCAmelCase = f"""
examples/by_feature/checkpointing.py
--resume_from_checkpoint {os.path.join(self.tmpdir , 'step_2' )}
""".split()
__UpperCAmelCase = run_command(self._launch_args + testargs , return_stdout=__a )
if torch.cuda.is_available():
__UpperCAmelCase = torch.cuda.device_count()
else:
__UpperCAmelCase = 1
if num_processes > 1:
self.assertNotIn('''epoch 0:''' , __a )
self.assertIn('''epoch 1:''' , __a )
else:
self.assertIn('''epoch 0:''' , __a )
self.assertIn('''epoch 1:''' , __a )
@slow
def snake_case__ ( self : Any ) -> Optional[Any]:
__UpperCAmelCase = '''
examples/by_feature/cross_validation.py
--num_folds 2
'''.split()
with mock.patch.dict(os.environ , {'''TESTING_MOCKED_DATALOADERS''': '''0'''} ):
__UpperCAmelCase = run_command(self._launch_args + testargs , return_stdout=__a )
__UpperCAmelCase = re.findall('''({.+})''' , __a )
__UpperCAmelCase = [r for r in results if '''accuracy''' in r][-1]
__UpperCAmelCase = ast.literal_eval(__a )
self.assertGreaterEqual(results['''accuracy'''] , 0.7_5 )
def snake_case__ ( self : Dict ) -> int:
__UpperCAmelCase = ['''examples/by_feature/multi_process_metrics.py''']
run_command(self._launch_args + testargs )
@require_trackers
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def snake_case__ ( self : Optional[Any] ) -> Optional[int]:
with tempfile.TemporaryDirectory() as tmpdir:
__UpperCAmelCase = f"""
examples/by_feature/tracking.py
--with_tracking
--project_dir {tmpdir}
""".split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(__a , '''tracking''' ) ) )
def snake_case__ ( self : Optional[int] ) -> List[Any]:
__UpperCAmelCase = ['''examples/by_feature/gradient_accumulation.py''']
run_command(self._launch_args + testargs )
def snake_case__ ( self : Tuple ) -> Optional[Any]:
__UpperCAmelCase = ['''examples/by_feature/local_sgd.py''']
run_command(self._launch_args + testargs )
| 654 | 1 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Dict, Optional, Union
import torch
import torch.nn.functional as F
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .attention_processor import AttentionProcessor, AttnProcessor
from .embeddings import TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
@dataclass
class A ( UpperCAmelCase ):
a_ = 42
class A ( UpperCAmelCase , UpperCAmelCase ):
@register_to_config
def __init__( self : List[str] , __a : int = 3_2 , __a : int = 6_4 , __a : int = 2_0 , __a : int = 7_6_8 , __a : Union[str, Any]=7_7 , __a : Any=4 , __a : float = 0.0 , __a : str = "silu" , __a : Optional[str] = None , __a : Optional[str] = None , __a : Optional[str] = "linear" , __a : Optional[str] = "prd" , __a : Optional[int] = None , __a : Optional[int] = None , __a : Optional[int] = None , ) -> Union[str, Any]:
super().__init__()
__UpperCAmelCase = num_attention_heads
__UpperCAmelCase = attention_head_dim
__UpperCAmelCase = num_attention_heads * attention_head_dim
__UpperCAmelCase = additional_embeddings
__UpperCAmelCase = time_embed_dim or inner_dim
__UpperCAmelCase = embedding_proj_dim or embedding_dim
__UpperCAmelCase = clip_embed_dim or embedding_dim
__UpperCAmelCase = Timesteps(__a , __a , 0 )
__UpperCAmelCase = TimestepEmbedding(__a , __a , out_dim=__a , act_fn=__a )
__UpperCAmelCase = nn.Linear(__a , __a )
if embedding_proj_norm_type is None:
__UpperCAmelCase = None
elif embedding_proj_norm_type == "layer":
__UpperCAmelCase = nn.LayerNorm(__a )
else:
raise ValueError(f"""unsupported embedding_proj_norm_type: {embedding_proj_norm_type}""" )
__UpperCAmelCase = nn.Linear(__a , __a )
if encoder_hid_proj_type is None:
__UpperCAmelCase = None
elif encoder_hid_proj_type == "linear":
__UpperCAmelCase = nn.Linear(__a , __a )
else:
raise ValueError(f"""unsupported encoder_hid_proj_type: {encoder_hid_proj_type}""" )
__UpperCAmelCase = nn.Parameter(torch.zeros(1 , num_embeddings + additional_embeddings , __a ) )
if added_emb_type == "prd":
__UpperCAmelCase = nn.Parameter(torch.zeros(1 , 1 , __a ) )
elif added_emb_type is None:
__UpperCAmelCase = None
else:
raise ValueError(
f"""`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `'prd'` or `None`.""" )
__UpperCAmelCase = nn.ModuleList(
[
BasicTransformerBlock(
__a , __a , __a , dropout=__a , activation_fn='''gelu''' , attention_bias=__a , )
for d in range(__a )
] )
if norm_in_type == "layer":
__UpperCAmelCase = nn.LayerNorm(__a )
elif norm_in_type is None:
__UpperCAmelCase = None
else:
raise ValueError(f"""Unsupported norm_in_type: {norm_in_type}.""" )
__UpperCAmelCase = nn.LayerNorm(__a )
__UpperCAmelCase = nn.Linear(__a , __a )
__UpperCAmelCase = torch.full(
[num_embeddings + additional_embeddings, num_embeddings + additional_embeddings] , -1_0_0_0_0.0 )
causal_attention_mask.triu_(1 )
__UpperCAmelCase = causal_attention_mask[None, ...]
self.register_buffer('''causal_attention_mask''' , __a , persistent=__a )
__UpperCAmelCase = nn.Parameter(torch.zeros(1 , __a ) )
__UpperCAmelCase = nn.Parameter(torch.zeros(1 , __a ) )
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def snake_case__ ( self : Dict ) -> Dict[str, AttentionProcessor]:
__UpperCAmelCase = {}
def fn_recursive_add_processors(__a : str , __a : torch.nn.Module , __a : Dict[str, AttentionProcessor] ):
if hasattr(__a , '''set_processor''' ):
__UpperCAmelCase = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(f"""{name}.{sub_name}""" , __a , __a )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(__a , __a , __a )
return processors
def snake_case__ ( self : Optional[Any] , __a : Union[AttentionProcessor, Dict[str, AttentionProcessor]] ) -> Any:
__UpperCAmelCase = len(self.attn_processors.keys() )
if isinstance(__a , __a ) and len(__a ) != count:
raise ValueError(
f"""A dict of processors was passed, but the number of processors {len(__a )} does not match the"""
f""" number of attention layers: {count}. Please make sure to pass {count} processor classes.""" )
def fn_recursive_attn_processor(__a : str , __a : torch.nn.Module , __a : Union[str, Any] ):
if hasattr(__a , '''set_processor''' ):
if not isinstance(__a , __a ):
module.set_processor(__a )
else:
module.set_processor(processor.pop(f"""{name}.processor""" ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(f"""{name}.{sub_name}""" , __a , __a )
for name, module in self.named_children():
fn_recursive_attn_processor(__a , __a , __a )
def snake_case__ ( self : Any ) -> int:
self.set_attn_processor(AttnProcessor() )
def snake_case__ ( self : List[str] , __a : str , __a : Union[torch.Tensor, float, int] , __a : torch.FloatTensor , __a : Optional[torch.FloatTensor] = None , __a : Optional[torch.BoolTensor] = None , __a : bool = True , ) -> str:
__UpperCAmelCase = hidden_states.shape[0]
__UpperCAmelCase = timestep
if not torch.is_tensor(__a ):
__UpperCAmelCase = torch.tensor([timesteps] , dtype=torch.long , device=hidden_states.device )
elif torch.is_tensor(__a ) and len(timesteps.shape ) == 0:
__UpperCAmelCase = timesteps[None].to(hidden_states.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
__UpperCAmelCase = timesteps * torch.ones(__a , dtype=timesteps.dtype , device=timesteps.device )
__UpperCAmelCase = self.time_proj(__a )
# timesteps does not contain any weights and will always return f32 tensors
# but time_embedding might be fp16, so we need to cast here.
__UpperCAmelCase = timesteps_projected.to(dtype=self.dtype )
__UpperCAmelCase = self.time_embedding(__a )
if self.embedding_proj_norm is not None:
__UpperCAmelCase = self.embedding_proj_norm(__a )
__UpperCAmelCase = self.embedding_proj(__a )
if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None:
__UpperCAmelCase = self.encoder_hidden_states_proj(__a )
elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None:
raise ValueError('''`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set''' )
__UpperCAmelCase = self.proj_in(__a )
__UpperCAmelCase = self.positional_embedding.to(hidden_states.dtype )
__UpperCAmelCase = []
__UpperCAmelCase = 0
if encoder_hidden_states is not None:
additional_embeds.append(__a )
additional_embeddings_len += encoder_hidden_states.shape[1]
if len(proj_embeddings.shape ) == 2:
__UpperCAmelCase = proj_embeddings[:, None, :]
if len(hidden_states.shape ) == 2:
__UpperCAmelCase = hidden_states[:, None, :]
__UpperCAmelCase = additional_embeds + [
proj_embeddings,
time_embeddings[:, None, :],
hidden_states,
]
if self.prd_embedding is not None:
__UpperCAmelCase = self.prd_embedding.to(hidden_states.dtype ).expand(__a , -1 , -1 )
additional_embeds.append(__a )
__UpperCAmelCase = torch.cat(
__a , dim=1 , )
# Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens
__UpperCAmelCase = additional_embeddings_len + proj_embeddings.shape[1] + 1
if positional_embeddings.shape[1] < hidden_states.shape[1]:
__UpperCAmelCase = F.pad(
__a , (
0,
0,
additional_embeddings_len,
self.prd_embedding.shape[1] if self.prd_embedding is not None else 0,
) , value=0.0 , )
__UpperCAmelCase = hidden_states + positional_embeddings
if attention_mask is not None:
__UpperCAmelCase = (1 - attention_mask.to(hidden_states.dtype )) * -1_0_0_0_0.0
__UpperCAmelCase = F.pad(__a , (0, self.additional_embeddings) , value=0.0 )
__UpperCAmelCase = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype )
__UpperCAmelCase = attention_mask.repeat_interleave(self.config.num_attention_heads , dim=0 )
if self.norm_in is not None:
__UpperCAmelCase = self.norm_in(__a )
for block in self.transformer_blocks:
__UpperCAmelCase = block(__a , attention_mask=__a )
__UpperCAmelCase = self.norm_out(__a )
if self.prd_embedding is not None:
__UpperCAmelCase = hidden_states[:, -1]
else:
__UpperCAmelCase = hidden_states[:, additional_embeddings_len:]
__UpperCAmelCase = self.proj_to_clip_embeddings(__a )
if not return_dict:
return (predicted_image_embedding,)
return PriorTransformerOutput(predicted_image_embedding=__a )
def snake_case__ ( self : str , __a : int ) -> str:
__UpperCAmelCase = (prior_latents * self.clip_std) + self.clip_mean
return prior_latents
| 654 |
'''simple docstring'''
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
__lowerCAmelCase : Any = ""
__lowerCAmelCase : int = ""
__lowerCAmelCase : Union[str, Any] = ""
__lowerCAmelCase : Any = 1 # (0 is vertical, 1 is horizontal)
def lowerCAmelCase ( ):
"""simple docstring"""
__UpperCAmelCase , __UpperCAmelCase = get_dataset(UpperCamelCase__ , UpperCamelCase__ )
print('''Processing...''' )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = update_image_and_anno(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
for index, image in enumerate(UpperCamelCase__ ):
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
__UpperCAmelCase = random_chars(3_2 )
__UpperCAmelCase = paths[index].split(os.sep )[-1].rsplit('''.''' , 1 )[0]
__UpperCAmelCase = f"""{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}"""
cva.imwrite(f"""/{file_root}.jpg""" , UpperCamelCase__ , [cva.IMWRITE_JPEG_QUALITY, 8_5] )
print(f"""Success {index+1}/{len(UpperCamelCase__ )} with {file_name}""" )
__UpperCAmelCase = []
for anno in new_annos[index]:
__UpperCAmelCase = f"""{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}"""
annos_list.append(UpperCamelCase__ )
with open(f"""/{file_root}.txt""" , '''w''' ) as outfile:
outfile.write('''\n'''.join(line for line in annos_list ) )
def lowerCAmelCase ( UpperCamelCase__ : str , UpperCamelCase__ : str ):
"""simple docstring"""
__UpperCAmelCase = []
__UpperCAmelCase = []
for label_file in glob.glob(os.path.join(UpperCamelCase__ , '''*.txt''' ) ):
__UpperCAmelCase = label_file.split(os.sep )[-1].rsplit('''.''' , 1 )[0]
with open(UpperCamelCase__ ) as in_file:
__UpperCAmelCase = in_file.readlines()
__UpperCAmelCase = os.path.join(UpperCamelCase__ , f"""{label_name}.jpg""" )
__UpperCAmelCase = []
for obj_list in obj_lists:
__UpperCAmelCase = obj_list.rstrip('''\n''' ).split(''' ''' )
boxes.append(
[
int(obj[0] ),
float(obj[1] ),
float(obj[2] ),
float(obj[3] ),
float(obj[4] ),
] )
if not boxes:
continue
img_paths.append(UpperCamelCase__ )
labels.append(UpperCamelCase__ )
return img_paths, labels
def lowerCAmelCase ( UpperCamelCase__ : list , UpperCamelCase__ : list , UpperCamelCase__ : int = 1 ):
"""simple docstring"""
__UpperCAmelCase = []
__UpperCAmelCase = []
__UpperCAmelCase = []
for idx in range(len(UpperCamelCase__ ) ):
__UpperCAmelCase = []
__UpperCAmelCase = img_list[idx]
path_list.append(UpperCamelCase__ )
__UpperCAmelCase = anno_list[idx]
__UpperCAmelCase = cva.imread(UpperCamelCase__ )
if flip_type == 1:
__UpperCAmelCase = cva.flip(UpperCamelCase__ , UpperCamelCase__ )
for bbox in img_annos:
__UpperCAmelCase = 1 - bbox[1]
new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] )
elif flip_type == 0:
__UpperCAmelCase = cva.flip(UpperCamelCase__ , UpperCamelCase__ )
for bbox in img_annos:
__UpperCAmelCase = 1 - bbox[2]
new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] )
new_annos_lists.append(UpperCamelCase__ )
new_imgs_list.append(UpperCamelCase__ )
return new_imgs_list, new_annos_lists, path_list
def lowerCAmelCase ( UpperCamelCase__ : int = 3_2 ):
"""simple docstring"""
assert number_char > 1, "The number of character should greater than 1"
__UpperCAmelCase = ascii_lowercase + digits
return "".join(random.choice(UpperCamelCase__ ) for _ in range(UpperCamelCase__ ) )
if __name__ == "__main__":
main()
print("DONE ✅")
| 654 | 1 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionInstructPixaPixPipeline,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.utils import floats_tensor, load_image, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class A ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
a_ = StableDiffusionInstructPixaPixPipeline
a_ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width''', '''cross_attention_kwargs'''}
a_ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
a_ = IMAGE_TO_IMAGE_IMAGE_PARAMS
a_ = IMAGE_TO_IMAGE_IMAGE_PARAMS
def snake_case__ ( self : Optional[Any] ) -> List[str]:
torch.manual_seed(0 )
__UpperCAmelCase = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=8 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=3_2 , )
__UpperCAmelCase = PNDMScheduler(skip_prk_steps=__a )
torch.manual_seed(0 )
__UpperCAmelCase = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
__UpperCAmelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
__UpperCAmelCase = CLIPTextModel(__a )
__UpperCAmelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
__UpperCAmelCase = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def snake_case__ ( self : Union[str, Any] , __a : Optional[int] , __a : Optional[Any]=0 ) -> int:
__UpperCAmelCase = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(__a ) ).to(__a )
__UpperCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__UpperCAmelCase = Image.fromarray(np.uinta(__a ) ).convert('''RGB''' )
if str(__a ).startswith('''mps''' ):
__UpperCAmelCase = torch.manual_seed(__a )
else:
__UpperCAmelCase = torch.Generator(device=__a ).manual_seed(__a )
__UpperCAmelCase = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''image_guidance_scale''': 1,
'''output_type''': '''numpy''',
}
return inputs
def snake_case__ ( self : List[str] ) -> Optional[Any]:
__UpperCAmelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__UpperCAmelCase = self.get_dummy_components()
__UpperCAmelCase = StableDiffusionInstructPixaPixPipeline(**__a )
__UpperCAmelCase = sd_pipe.to(__a )
sd_pipe.set_progress_bar_config(disable=__a )
__UpperCAmelCase = self.get_dummy_inputs(__a )
__UpperCAmelCase = sd_pipe(**__a ).images
__UpperCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
__UpperCAmelCase = np.array([0.7_5_2_6, 0.3_7_5_0, 0.4_5_4_7, 0.6_1_1_7, 0.5_8_6_6, 0.5_0_1_6, 0.4_3_2_7, 0.5_6_4_2, 0.4_8_1_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def snake_case__ ( self : List[str] ) -> Optional[Any]:
__UpperCAmelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__UpperCAmelCase = self.get_dummy_components()
__UpperCAmelCase = StableDiffusionInstructPixaPixPipeline(**__a )
__UpperCAmelCase = sd_pipe.to(__a )
sd_pipe.set_progress_bar_config(disable=__a )
__UpperCAmelCase = self.get_dummy_inputs(__a )
__UpperCAmelCase = '''french fries'''
__UpperCAmelCase = sd_pipe(**__a , negative_prompt=__a )
__UpperCAmelCase = output.images
__UpperCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
__UpperCAmelCase = np.array([0.7_5_1_1, 0.3_6_4_2, 0.4_5_5_3, 0.6_2_3_6, 0.5_7_9_7, 0.5_0_1_3, 0.4_3_4_3, 0.5_6_1_1, 0.4_8_3_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def snake_case__ ( self : Any ) -> str:
__UpperCAmelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__UpperCAmelCase = self.get_dummy_components()
__UpperCAmelCase = StableDiffusionInstructPixaPixPipeline(**__a )
__UpperCAmelCase = sd_pipe.to(__a )
sd_pipe.set_progress_bar_config(disable=__a )
__UpperCAmelCase = self.get_dummy_inputs(__a )
__UpperCAmelCase = [inputs['''prompt''']] * 2
__UpperCAmelCase = np.array(inputs['''image'''] ).astype(np.floataa ) / 2_5_5.0
__UpperCAmelCase = torch.from_numpy(__a ).unsqueeze(0 ).to(__a )
__UpperCAmelCase = image / 2 + 0.5
__UpperCAmelCase = image.permute(0 , 3 , 1 , 2 )
__UpperCAmelCase = image.repeat(2 , 1 , 1 , 1 )
__UpperCAmelCase = sd_pipe(**__a ).images
__UpperCAmelCase = image[-1, -3:, -3:, -1]
assert image.shape == (2, 3_2, 3_2, 3)
__UpperCAmelCase = np.array([0.5_8_1_2, 0.5_7_4_8, 0.5_2_2_2, 0.5_9_0_8, 0.5_6_9_5, 0.7_1_7_4, 0.6_8_0_4, 0.5_5_2_3, 0.5_5_7_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def snake_case__ ( self : Optional[int] ) -> Optional[Any]:
__UpperCAmelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__UpperCAmelCase = self.get_dummy_components()
__UpperCAmelCase = EulerAncestralDiscreteScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='''scaled_linear''' )
__UpperCAmelCase = StableDiffusionInstructPixaPixPipeline(**__a )
__UpperCAmelCase = sd_pipe.to(__a )
sd_pipe.set_progress_bar_config(disable=__a )
__UpperCAmelCase = self.get_dummy_inputs(__a )
__UpperCAmelCase = sd_pipe(**__a ).images
__UpperCAmelCase = image[0, -3:, -3:, -1]
__UpperCAmelCase = [round(__a , 4 ) for x in image_slice.flatten().tolist()]
print(''','''.join([str(__a ) for x in slice] ) )
assert image.shape == (1, 3_2, 3_2, 3)
__UpperCAmelCase = np.array([0.7_4_1_7, 0.3_8_4_2, 0.4_7_3_2, 0.5_7_7_6, 0.5_8_9_1, 0.5_1_3_9, 0.4_0_5_2, 0.5_6_7_3, 0.4_9_8_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def snake_case__ ( self : str ) -> Optional[int]:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def snake_case__ ( self : List[Any] ) -> Tuple:
__UpperCAmelCase = self.get_dummy_components()
__UpperCAmelCase = StableDiffusionInstructPixaPixPipeline(**__a )
__UpperCAmelCase = VaeImageProcessor(do_resize=__a , do_normalize=__a )
__UpperCAmelCase = pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
__UpperCAmelCase = pipe(**self.get_dummy_inputs_by_type(__a , input_image_type='''pt''' ) )[0]
__UpperCAmelCase = components['''vae''']
__UpperCAmelCase = self.get_dummy_inputs_by_type(__a , input_image_type='''pt''' )
for image_param in self.image_latents_params:
if image_param in inputs.keys():
__UpperCAmelCase = vae.encode(inputs[image_param] ).latent_dist.mode()
__UpperCAmelCase = pipe(**__a )[0]
__UpperCAmelCase = np.abs(out - out_latents_inputs ).max()
self.assertLess(__a , 1e-4 , '''passing latents as image input generate different result from passing image''' )
@slow
@require_torch_gpu
class A ( unittest.TestCase ):
def snake_case__ ( self : Dict ) -> Dict:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case__ ( self : List[str] , __a : Dict=0 ) -> List[str]:
__UpperCAmelCase = torch.manual_seed(__a )
__UpperCAmelCase = load_image(
'''https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_pix2pix/example.jpg''' )
__UpperCAmelCase = {
'''prompt''': '''turn him into a cyborg''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 3,
'''guidance_scale''': 7.5,
'''image_guidance_scale''': 1.0,
'''output_type''': '''numpy''',
}
return inputs
def snake_case__ ( self : Optional[Any] ) -> Union[str, Any]:
__UpperCAmelCase = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'''timbrooks/instruct-pix2pix''' , safety_checker=__a )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
pipe.enable_attention_slicing()
__UpperCAmelCase = self.get_inputs()
__UpperCAmelCase = pipe(**__a ).images
__UpperCAmelCase = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__UpperCAmelCase = np.array([0.5_9_0_2, 0.6_0_1_5, 0.6_0_2_7, 0.5_9_8_3, 0.6_0_9_2, 0.6_0_6_1, 0.5_7_6_5, 0.5_7_8_5, 0.5_5_5_5] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def snake_case__ ( self : Any ) -> List[str]:
__UpperCAmelCase = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'''timbrooks/instruct-pix2pix''' , safety_checker=__a )
__UpperCAmelCase = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
pipe.enable_attention_slicing()
__UpperCAmelCase = self.get_inputs()
__UpperCAmelCase = pipe(**__a ).images
__UpperCAmelCase = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__UpperCAmelCase = np.array([0.6_5_7_8, 0.6_8_1_7, 0.6_9_7_2, 0.6_7_6_1, 0.6_8_5_6, 0.6_9_1_6, 0.6_4_2_8, 0.6_5_1_6, 0.6_3_0_1] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def snake_case__ ( self : Tuple ) -> Tuple:
__UpperCAmelCase = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'''timbrooks/instruct-pix2pix''' , safety_checker=__a )
__UpperCAmelCase = DDIMScheduler.from_config(pipe.scheduler.config )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
pipe.enable_attention_slicing()
__UpperCAmelCase = self.get_inputs()
__UpperCAmelCase = pipe(**__a ).images
__UpperCAmelCase = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__UpperCAmelCase = np.array([0.3_8_2_8, 0.3_8_3_4, 0.3_8_1_8, 0.3_7_9_2, 0.3_8_6_5, 0.3_7_5_2, 0.3_7_9_2, 0.3_8_4_7, 0.3_7_5_3] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def snake_case__ ( self : Optional[int] ) -> Optional[Any]:
__UpperCAmelCase = 0
def callback_fn(__a : int , __a : int , __a : torch.FloatTensor ) -> None:
__UpperCAmelCase = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
__UpperCAmelCase = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 6_4, 6_4)
__UpperCAmelCase = latents[0, -3:, -3:, -1]
__UpperCAmelCase = np.array([-0.2_4_6_3, -0.4_6_4_4, -0.9_7_5_6, 1.5_1_7_6, 1.4_4_1_4, 0.7_8_6_6, 0.9_8_9_7, 0.8_5_2_1, 0.7_9_8_3] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
elif step == 2:
__UpperCAmelCase = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 6_4, 6_4)
__UpperCAmelCase = latents[0, -3:, -3:, -1]
__UpperCAmelCase = np.array([-0.2_6_4_4, -0.4_6_2_6, -0.9_6_5_3, 1.5_1_7_6, 1.4_5_5_1, 0.7_6_8_6, 0.9_8_0_5, 0.8_4_5_2, 0.8_1_1_5] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
__UpperCAmelCase = False
__UpperCAmelCase = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'''timbrooks/instruct-pix2pix''' , safety_checker=__a , torch_dtype=torch.floataa )
__UpperCAmelCase = pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
pipe.enable_attention_slicing()
__UpperCAmelCase = self.get_inputs()
pipe(**__a , callback=__a , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def snake_case__ ( self : Any ) -> List[Any]:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__UpperCAmelCase = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'''timbrooks/instruct-pix2pix''' , safety_checker=__a , torch_dtype=torch.floataa )
__UpperCAmelCase = pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
__UpperCAmelCase = self.get_inputs()
__UpperCAmelCase = pipe(**__a )
__UpperCAmelCase = torch.cuda.max_memory_allocated()
# make sure that less than 2.2 GB is allocated
assert mem_bytes < 2.2 * 1_0**9
def snake_case__ ( self : Optional[int] ) -> Any:
__UpperCAmelCase = self.get_inputs()
# resize to resolution that is divisible by 8 but not 16 or 32
__UpperCAmelCase = inputs['''image'''].resize((5_0_4, 5_0_4) )
__UpperCAmelCase = '''timbrooks/instruct-pix2pix'''
__UpperCAmelCase = StableDiffusionInstructPixaPixPipeline.from_pretrained(
__a , safety_checker=__a , )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
pipe.enable_attention_slicing()
__UpperCAmelCase = pipe(**__a )
__UpperCAmelCase = output.images[0]
__UpperCAmelCase = image[2_5_5:2_5_8, 3_8_3:3_8_6, -1]
assert image.shape == (5_0_4, 5_0_4, 3)
__UpperCAmelCase = np.array([0.2_7_2_6, 0.2_5_2_9, 0.2_6_6_4, 0.2_6_5_5, 0.2_6_4_1, 0.2_6_4_2, 0.2_5_9_1, 0.2_6_4_9, 0.2_5_9_0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-3
| 654 |
'''simple docstring'''
from pathlib import Path
import fire
def lowerCAmelCase ( UpperCamelCase__ : str , UpperCamelCase__ : str , UpperCamelCase__ : int ):
"""simple docstring"""
__UpperCAmelCase = Path(UpperCamelCase__ )
__UpperCAmelCase = Path(UpperCamelCase__ )
dest_dir.mkdir(exist_ok=UpperCamelCase__ )
for path in src_dir.iterdir():
__UpperCAmelCase = [x.rstrip() for x in list(path.open().readlines() )][:n]
__UpperCAmelCase = dest_dir.joinpath(path.name )
print(UpperCamelCase__ )
dest_path.open('''w''' ).write('''\n'''.join(UpperCamelCase__ ) )
if __name__ == "__main__":
fire.Fire(minify)
| 654 | 1 |
'''simple docstring'''
from __future__ import annotations
def lowerCAmelCase ( UpperCamelCase__ : list[list[int]] ):
"""simple docstring"""
# preprocessing the first row
for i in range(1 , len(matrix[0] ) ):
matrix[0][i] += matrix[0][i - 1]
# preprocessing the first column
for i in range(1 , len(UpperCamelCase__ ) ):
matrix[i][0] += matrix[i - 1][0]
# updating the path cost for current position
for i in range(1 , len(UpperCamelCase__ ) ):
for j in range(1 , len(matrix[0] ) ):
matrix[i][j] += min(matrix[i - 1][j] , matrix[i][j - 1] )
return matrix[-1][-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 654 |
'''simple docstring'''
def lowerCAmelCase ( UpperCamelCase__ : int ):
"""simple docstring"""
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
__UpperCAmelCase = f"""Input value of [number={number}] must be an integer"""
raise TypeError(UpperCamelCase__ )
if number < 1:
__UpperCAmelCase = f"""Input value of [number={number}] must be > 0"""
raise ValueError(UpperCamelCase__ )
__UpperCAmelCase = 1
for i in range(1 , UpperCamelCase__ ):
current_number *= 4 * i - 2
current_number //= i + 1
return current_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 654 | 1 |
'''simple docstring'''
import unittest
from transformers import JukeboxTokenizer
from transformers.testing_utils import require_torch
class A ( unittest.TestCase ):
a_ = JukeboxTokenizer
a_ = {
'''artist''': '''Zac Brown Band''',
'''genres''': '''Country''',
'''lyrics''': '''I met a traveller from an antique land,
Who said "Two vast and trunkless legs of stone
Stand in the desert. . . . Near them, on the sand,
Half sunk a shattered visage lies, whose frown,
And wrinkled lip, and sneer of cold command,
Tell that its sculptor well those passions read
Which yet survive, stamped on these lifeless things,
The hand that mocked them, and the heart that fed;
And on the pedestal, these words appear:
My name is Ozymandias, King of Kings;
Look on my Works, ye Mighty, and despair!
Nothing beside remains. Round the decay
Of that colossal Wreck, boundless and bare
The lone and level sands stretch far away
''',
}
@require_torch
def snake_case__ ( self : Optional[int] ) -> Any:
import torch
__UpperCAmelCase = JukeboxTokenizer.from_pretrained('''openai/jukebox-1b-lyrics''' )
__UpperCAmelCase = tokenizer(**self.metas )['''input_ids''']
# fmt: off
__UpperCAmelCase = [
torch.tensor([[
0, 0, 0, 7_1_6_9, 5_0_7, 9, 7_6, 3_9, 3_1, 4_6, 7_6, 2_7,
7_6, 4_6, 4_4, 2_7, 4_8, 3_1, 3_8, 3_8, 3_1, 4_4, 7_6, 3_2,
4_4, 4_1, 3_9, 7_6, 2_7, 4_0, 7_6, 2_7, 4_0, 4_6, 3_5, 4_3,
4_7, 3_1, 7_6, 3_8, 2_7, 4_0, 3_0, 6_4, 7_8, 7_6, 7_6, 7_6,
7_6, 7_6, 7_6, 7_6, 7_6, 2_3, 3_4, 4_1, 7_6, 4_5, 2_7, 3_5,
3_0, 7_6, 7_1, 2_0, 4_9, 4_1, 7_6, 4_8, 2_7, 4_5, 4_6, 7_6,
2_7, 4_0, 3_0, 7_6, 4_6, 4_4, 4_7, 4_0, 3_7, 3_8, 3_1, 4_5,
4_5, 7_6, 3_8, 3_1, 3_3, 4_5, 7_6, 4_1, 3_2, 7_6, 4_5, 4_6,
4_1, 4_0, 3_1, 7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6,
1_9, 4_6, 2_7, 4_0, 3_0, 7_6, 3_5, 4_0, 7_6, 4_6, 3_4, 3_1,
7_6, 3_0, 3_1, 4_5, 3_1, 4_4, 4_6, 6_3, 7_6, 6_3, 7_6, 6_3,
7_6, 6_3, 7_6, 1_4, 3_1, 2_7, 4_4, 7_6, 4_6, 3_4, 3_1, 3_9,
6_4, 7_6, 4_1, 4_0, 7_6, 4_6, 3_4, 3_1, 7_6, 4_5, 2_7, 4_0,
3_0, 6_4, 7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 8,
2_7, 3_8, 3_2, 7_6, 4_5, 4_7, 4_0, 3_7, 7_6, 2_7, 7_6, 4_5,
3_4, 2_7, 4_6, 4_6, 3_1, 4_4, 3_1, 3_0, 7_6, 4_8, 3_5, 4_5,
2_7, 3_3, 3_1, 7_6, 3_8, 3_5, 3_1, 4_5, 6_4, 7_6, 4_9, 3_4,
4_1, 4_5, 3_1, 7_6, 3_2, 4_4, 4_1, 4_9, 4_0, 6_4, 7_8, 7_6,
7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 1, 4_0, 3_0, 7_6, 4_9,
4_4, 3_5, 4_0, 3_7, 3_8, 3_1, 3_0, 7_6, 3_8, 3_5, 4_2, 6_4,
7_6, 2_7, 4_0, 3_0, 7_6, 4_5, 4_0, 3_1, 3_1, 4_4, 7_6, 4_1,
3_2, 7_6, 2_9, 4_1, 3_8, 3_0, 7_6, 2_9, 4_1, 3_9, 3_9, 2_7,
4_0, 3_0, 6_4, 7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6,
2_0, 3_1, 3_8, 3_8, 7_6, 4_6, 3_4, 2_7, 4_6, 7_6, 3_5, 4_6,
4_5, 7_6, 4_5, 2_9, 4_7, 3_8, 4_2, 4_6, 4_1, 4_4, 7_6, 4_9,
3_1, 3_8, 3_8, 7_6, 4_6, 3_4, 4_1, 4_5, 3_1, 7_6, 4_2, 2_7,
4_5, 4_5, 3_5, 4_1, 4_0, 4_5, 7_6, 4_4, 3_1, 2_7, 3_0, 7_8,
7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 2_3, 3_4, 3_5, 2_9,
3_4, 7_6, 5_1, 3_1, 4_6, 7_6, 4_5, 4_7, 4_4, 4_8, 3_5, 4_8,
3_1, 6_4, 7_6, 4_5, 4_6, 2_7, 3_9, 4_2, 3_1, 3_0, 7_6, 4_1,
4_0, 7_6, 4_6, 3_4, 3_1, 4_5, 3_1, 7_6, 3_8, 3_5, 3_2, 3_1,
3_8, 3_1, 4_5, 4_5, 7_6, 4_6, 3_4, 3_5, 4_0, 3_3, 4_5, 6_4,
7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 2_0, 3_4, 3_1,
7_6, 3_4, 2_7, 4_0, 3_0, 7_6, 4_6, 3_4, 2_7, 4_6, 7_6, 3_9,
4_1, 2_9, 3_7, 3_1, 3_0, 7_6, 4_6, 3_4, 3_1, 3_9, 6_4, 7_6,
2_7, 4_0, 3_0, 7_6, 4_6, 3_4, 3_1, 7_6, 3_4, 3_1, 2_7, 4_4,
4_6, 7_6, 4_6, 3_4, 2_7, 4_6, 7_6, 3_2, 3_1, 3_0, 6_6, 7_8,
7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 1, 4_0, 3_0, 7_6,
4_1, 4_0, 7_6, 4_6, 3_4, 3_1, 7_6, 4_2, 3_1, 3_0, 3_1, 4_5,
4_6, 2_7, 3_8, 6_4, 7_6, 4_6, 3_4, 3_1, 4_5, 3_1, 7_6, 4_9,
4_1, 4_4, 3_0, 4_5, 7_6, 2_7, 4_2, 4_2, 3_1, 2_7, 4_4, 6_5,
7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 1_3, 5_1, 7_6,
4_0, 2_7, 3_9, 3_1, 7_6, 3_5, 4_5, 7_6, 1_5, 5_2, 5_1, 3_9,
2_7, 4_0, 3_0, 3_5, 2_7, 4_5, 6_4, 7_6, 1_1, 3_5, 4_0, 3_3,
7_6, 4_1, 3_2, 7_6, 1_1, 3_5, 4_0, 3_3, 4_5, 6_6, 7_8, 7_6,
7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 1_2, 4_1, 4_1, 3_7, 7_6,
4_1, 4_0, 7_6, 3_9, 5_1, 7_6, 2_3, 4_1, 4_4, 3_7, 4_5, 6_4,
7_6, 5_1, 3_1, 7_6, 1_3, 3_5, 3_3, 3_4, 4_6, 5_1, 6_4, 7_6,
2_7, 4_0, 3_0, 7_6, 3_0, 3_1, 4_5, 4_2, 2_7, 3_5, 4_4, 6_7,
7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 1_4, 4_1, 4_6,
3_4, 3_5, 4_0, 3_3, 7_6, 2_8, 3_1, 4_5, 3_5, 3_0, 3_1, 7_6,
4_4, 3_1, 3_9, 2_7, 3_5, 4_0, 4_5, 6_3, 7_6, 1_8, 4_1, 4_7,
4_0, 3_0, 7_6, 4_6, 3_4, 3_1, 7_6, 3_0, 3_1, 2_9, 2_7, 5_1,
7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 1_5, 3_2, 7_6,
4_6, 3_4, 2_7, 4_6, 7_6, 2_9, 4_1, 3_8, 4_1, 4_5, 4_5, 2_7,
3_8, 7_6, 2_3, 4_4, 3_1, 2_9, 3_7, 6_4, 7_6, 2_8, 4_1, 4_7,
4_0, 3_0, 3_8, 3_1, 4_5, 4_5, 7_6, 2_7, 4_0, 3_0, 7_6, 2_8,
2_7, 4_4, 3_1, 7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6,
2_0, 3_4, 3_1, 7_6, 3_8, 4_1, 4_0, 3_1, 7_6, 2_7, 4_0, 3_0,
7_6, 3_8, 3_1, 4_8, 3_1, 3_8, 7_6, 4_5, 2_7, 4_0, 3_0, 4_5,
7_6, 4_5, 4_6, 4_4, 3_1, 4_6, 2_9, 3_4, 7_6, 3_2, 2_7, 4_4,
7_6, 2_7, 4_9, 2_7, 5_1, 7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6,
7_6, 7_6]] ),
torch.tensor([[0, 0, 0, 1_0_6_9, 1_1]] ),
torch.tensor([[0, 0, 0, 1_0_6_9, 1_1]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
@require_torch
def snake_case__ ( self : List[str] ) -> Optional[Any]:
import torch
__UpperCAmelCase = JukeboxTokenizer.from_pretrained('''openai/jukebox-5b-lyrics''' )
__UpperCAmelCase = tokenizer(**self.metas )['''input_ids''']
# fmt: off
__UpperCAmelCase = [
torch.tensor([[
0, 0, 0, 1_0_6_9, 1_1, -1, -1, -1, -1, 9, 7_7, 3_9,
3_1, 4_6, 7_7, 2_7, 7_7, 4_6, 4_4, 2_7, 4_8, 3_1, 3_8, 3_8,
3_1, 4_4, 7_7, 3_2, 4_4, 4_1, 3_9, 7_7, 2_7, 4_0, 7_7, 2_7,
4_0, 4_6, 3_5, 4_3, 4_7, 3_1, 7_7, 3_8, 2_7, 4_0, 3_0, 6_4,
7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 2_3, 3_4, 4_1,
7_7, 4_5, 2_7, 3_5, 3_0, 7_7, 7_2, 2_0, 4_9, 4_1, 7_7, 4_8,
2_7, 4_5, 4_6, 7_7, 2_7, 4_0, 3_0, 7_7, 4_6, 4_4, 4_7, 4_0,
3_7, 3_8, 3_1, 4_5, 4_5, 7_7, 3_8, 3_1, 3_3, 4_5, 7_7, 4_1,
3_2, 7_7, 4_5, 4_6, 4_1, 4_0, 3_1, 7_9, 7_7, 7_7, 7_7, 7_7,
7_7, 7_7, 7_7, 7_7, 1_9, 4_6, 2_7, 4_0, 3_0, 7_7, 3_5, 4_0,
7_7, 4_6, 3_4, 3_1, 7_7, 3_0, 3_1, 4_5, 3_1, 4_4, 4_6, 6_3,
7_7, 6_3, 7_7, 6_3, 7_7, 6_3, 7_7, 1_4, 3_1, 2_7, 4_4, 7_7,
4_6, 3_4, 3_1, 3_9, 6_4, 7_7, 4_1, 4_0, 7_7, 4_6, 3_4, 3_1,
7_7, 4_5, 2_7, 4_0, 3_0, 6_4, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7,
7_7, 7_7, 7_7, 8, 2_7, 3_8, 3_2, 7_7, 4_5, 4_7, 4_0, 3_7,
7_7, 2_7, 7_7, 4_5, 3_4, 2_7, 4_6, 4_6, 3_1, 4_4, 3_1, 3_0,
7_7, 4_8, 3_5, 4_5, 2_7, 3_3, 3_1, 7_7, 3_8, 3_5, 3_1, 4_5,
6_4, 7_7, 4_9, 3_4, 4_1, 4_5, 3_1, 7_7, 3_2, 4_4, 4_1, 4_9,
4_0, 6_4, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 1,
4_0, 3_0, 7_7, 4_9, 4_4, 3_5, 4_0, 3_7, 3_8, 3_1, 3_0, 7_7,
3_8, 3_5, 4_2, 6_4, 7_7, 2_7, 4_0, 3_0, 7_7, 4_5, 4_0, 3_1,
3_1, 4_4, 7_7, 4_1, 3_2, 7_7, 2_9, 4_1, 3_8, 3_0, 7_7, 2_9,
4_1, 3_9, 3_9, 2_7, 4_0, 3_0, 6_4, 7_9, 7_7, 7_7, 7_7, 7_7,
7_7, 7_7, 7_7, 7_7, 2_0, 3_1, 3_8, 3_8, 7_7, 4_6, 3_4, 2_7,
4_6, 7_7, 3_5, 4_6, 4_5, 7_7, 4_5, 2_9, 4_7, 3_8, 4_2, 4_6,
4_1, 4_4, 7_7, 4_9, 3_1, 3_8, 3_8, 7_7, 4_6, 3_4, 4_1, 4_5,
3_1, 7_7, 4_2, 2_7, 4_5, 4_5, 3_5, 4_1, 4_0, 4_5, 7_7, 4_4,
3_1, 2_7, 3_0, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7,
2_3, 3_4, 3_5, 2_9, 3_4, 7_7, 5_1, 3_1, 4_6, 7_7, 4_5, 4_7,
4_4, 4_8, 3_5, 4_8, 3_1, 6_4, 7_7, 4_5, 4_6, 2_7, 3_9, 4_2,
3_1, 3_0, 7_7, 4_1, 4_0, 7_7, 4_6, 3_4, 3_1, 4_5, 3_1, 7_7,
3_8, 3_5, 3_2, 3_1, 3_8, 3_1, 4_5, 4_5, 7_7, 4_6, 3_4, 3_5,
4_0, 3_3, 4_5, 6_4, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7,
7_7, 2_0, 3_4, 3_1, 7_7, 3_4, 2_7, 4_0, 3_0, 7_7, 4_6, 3_4,
2_7, 4_6, 7_7, 3_9, 4_1, 2_9, 3_7, 3_1, 3_0, 7_7, 4_6, 3_4,
3_1, 3_9, 6_4, 7_7, 2_7, 4_0, 3_0, 7_7, 4_6, 3_4, 3_1, 7_7,
3_4, 3_1, 2_7, 4_4, 4_6, 7_7, 4_6, 3_4, 2_7, 4_6, 7_7, 3_2,
3_1, 3_0, 6_6, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7,
1, 4_0, 3_0, 7_7, 4_1, 4_0, 7_7, 4_6, 3_4, 3_1, 7_7, 4_2,
3_1, 3_0, 3_1, 4_5, 4_6, 2_7, 3_8, 6_4, 7_7, 4_6, 3_4, 3_1,
4_5, 3_1, 7_7, 4_9, 4_1, 4_4, 3_0, 4_5, 7_7, 2_7, 4_2, 4_2,
3_1, 2_7, 4_4, 6_5, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7,
7_7, 1_3, 5_1, 7_7, 4_0, 2_7, 3_9, 3_1, 7_7, 3_5, 4_5, 7_7,
1_5, 5_2, 5_1, 3_9, 2_7, 4_0, 3_0, 3_5, 2_7, 4_5, 6_4, 7_7,
1_1, 3_5, 4_0, 3_3, 7_7, 4_1, 3_2, 7_7, 1_1, 3_5, 4_0, 3_3,
4_5, 6_6, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 1_2,
4_1, 4_1, 3_7, 7_7, 4_1, 4_0, 7_7, 3_9, 5_1, 7_7, 2_3, 4_1,
4_4, 3_7, 4_5, 6_4, 7_7, 5_1, 3_1, 7_7, 1_3, 3_5, 3_3, 3_4,
4_6, 5_1, 6_4, 7_7, 2_7, 4_0, 3_0, 7_7, 3_0, 3_1, 4_5, 4_2,
2_7, 3_5, 4_4, 6_7, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7,
7_7, 1_4, 4_1, 4_6, 3_4, 3_5, 4_0, 3_3, 7_7, 2_8, 3_1, 4_5,
3_5, 3_0, 3_1, 7_7, 4_4, 3_1, 3_9, 2_7, 3_5, 4_0, 4_5, 6_3,
7_7, 1_8, 4_1, 4_7, 4_0, 3_0, 7_7, 4_6, 3_4, 3_1, 7_7, 3_0,
3_1, 2_9, 2_7, 5_1, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7,
7_7, 1_5, 3_2, 7_7, 4_6, 3_4, 2_7, 4_6, 7_7, 2_9, 4_1, 3_8,
4_1, 4_5, 4_5, 2_7, 3_8, 7_7, 2_3, 4_4, 3_1, 2_9, 3_7, 6_4,
7_7, 2_8, 4_1, 4_7, 4_0, 3_0, 3_8, 3_1, 4_5, 4_5, 7_7, 2_7,
4_0, 3_0, 7_7, 2_8, 2_7, 4_4, 3_1, 7_9, 7_7, 7_7, 7_7, 7_7,
7_7, 7_7, 7_7, 7_7, 2_0, 3_4, 3_1, 7_7, 3_8, 4_1, 4_0, 3_1,
7_7, 2_7, 4_0, 3_0, 7_7, 3_8, 3_1, 4_8, 3_1, 3_8, 7_7, 4_5,
2_7, 4_0, 3_0, 4_5, 7_7, 4_5, 4_6, 4_4, 3_1, 4_6, 2_9, 3_4,
7_7, 3_2, 2_7, 4_4, 7_7, 2_7, 4_9, 2_7, 5_1, 7_9, 7_7, 7_7,
7_7, 7_7, 7_7, 7_7, 7_7, 7_7]] ),
torch.tensor([[0, 0, 0, 1_0_6_9, 1_1, -1, -1, -1, -1]] ),
torch.tensor([[0, 0, 0, 1_0_6_9, 1_1, -1, -1, -1, -1]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
| 654 |
'''simple docstring'''
import unittest
import torch
from torch import nn
from accelerate.test_utils import require_cuda
from accelerate.utils.memory import find_executable_batch_size, release_memory
def lowerCAmelCase ( ):
"""simple docstring"""
raise RuntimeError('''CUDA out of memory.''' )
class A ( nn.Module ):
def __init__( self : Optional[Any] ) -> int:
super().__init__()
__UpperCAmelCase = nn.Linear(3 , 4 )
__UpperCAmelCase = nn.BatchNormad(4 )
__UpperCAmelCase = nn.Linear(4 , 5 )
def snake_case__ ( self : List[str] , __a : Optional[int] ) -> Optional[int]:
return self.lineara(self.batchnorm(self.lineara(__a ) ) )
class A ( unittest.TestCase ):
def snake_case__ ( self : Optional[int] ) -> Any:
__UpperCAmelCase = []
@find_executable_batch_size(starting_batch_size=1_2_8 )
def mock_training_loop_function(__a : Union[str, Any] ):
nonlocal batch_sizes
batch_sizes.append(__a )
if batch_size != 8:
raise_fake_out_of_memory()
mock_training_loop_function()
self.assertListEqual(__a , [1_2_8, 6_4, 3_2, 1_6, 8] )
def snake_case__ ( self : str ) -> int:
__UpperCAmelCase = []
@find_executable_batch_size(starting_batch_size=1_2_8 )
def mock_training_loop_function(__a : str , __a : Optional[int] ):
nonlocal batch_sizes
batch_sizes.append(__a )
if batch_size != 8:
raise_fake_out_of_memory()
return batch_size, arga
__UpperCAmelCase , __UpperCAmelCase = mock_training_loop_function('''hello''' )
self.assertListEqual(__a , [1_2_8, 6_4, 3_2, 1_6, 8] )
self.assertListEqual([bs, arga] , [8, '''hello'''] )
def snake_case__ ( self : Any ) -> int:
@find_executable_batch_size(starting_batch_size=0 )
def mock_training_loop_function(__a : Optional[int] ):
pass
with self.assertRaises(__a ) as cm:
mock_training_loop_function()
self.assertIn('''No executable batch size found, reached zero.''' , cm.exception.args[0] )
def snake_case__ ( self : Any ) -> List[Any]:
@find_executable_batch_size(starting_batch_size=1_6 )
def mock_training_loop_function(__a : Dict ):
if batch_size > 0:
raise_fake_out_of_memory()
pass
with self.assertRaises(__a ) as cm:
mock_training_loop_function()
self.assertIn('''No executable batch size found, reached zero.''' , cm.exception.args[0] )
def snake_case__ ( self : List[Any] ) -> List[str]:
@find_executable_batch_size(starting_batch_size=1_2_8 )
def mock_training_loop_function(__a : str , __a : Union[str, Any] , __a : int ):
if batch_size != 8:
raise raise_fake_out_of_memory()
with self.assertRaises(__a ) as cm:
mock_training_loop_function(1_2_8 , '''hello''' , '''world''' )
self.assertIn('''Batch size was passed into `f`''' , cm.exception.args[0] )
self.assertIn('''`f(arg1=\'hello\', arg2=\'world\')''' , cm.exception.args[0] )
def snake_case__ ( self : Tuple ) -> Optional[Any]:
@find_executable_batch_size(starting_batch_size=1_6 )
def mock_training_loop_function(__a : Tuple ):
raise ValueError('''Oops, we had an error!''' )
with self.assertRaises(__a ) as cm:
mock_training_loop_function()
self.assertIn('''Oops, we had an error!''' , cm.exception.args[0] )
@require_cuda
def snake_case__ ( self : Any ) -> List[Any]:
__UpperCAmelCase = torch.cuda.memory_allocated()
__UpperCAmelCase = ModelForTest()
model.cuda()
self.assertGreater(torch.cuda.memory_allocated() , __a )
__UpperCAmelCase = release_memory(__a )
self.assertEqual(torch.cuda.memory_allocated() , __a )
| 654 | 1 |
'''simple docstring'''
def lowerCAmelCase ( UpperCamelCase__ : int ):
"""simple docstring"""
if upper_limit < 0:
raise ValueError('''Limit for the Catalan sequence must be ≥ 0''' )
__UpperCAmelCase = [0] * (upper_limit + 1)
# Base case: C(0) = C(1) = 1
__UpperCAmelCase = 1
if upper_limit > 0:
__UpperCAmelCase = 1
# Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i
for i in range(2 , upper_limit + 1 ):
for j in range(UpperCamelCase__ ):
catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1]
return catalan_list
if __name__ == "__main__":
print("\n********* Catalan Numbers Using Dynamic Programming ************\n")
print("\n*** Enter -1 at any time to quit ***")
print("\nEnter the upper limit (≥ 0) for the Catalan number sequence: ", end="")
try:
while True:
__lowerCAmelCase : int = int(input().strip())
if N < 0:
print("\n********* Goodbye!! ************")
break
else:
print(F"""The Catalan numbers from 0 through {N} are:""")
print(catalan_numbers(N))
print("Try another upper limit for the sequence: ", end="")
except (NameError, ValueError):
print("\n********* Invalid input, goodbye! ************\n")
import doctest
doctest.testmod()
| 654 |
'''simple docstring'''
from __future__ import annotations
import math
def lowerCAmelCase ( UpperCamelCase__ : float , UpperCamelCase__ : int ):
"""simple docstring"""
__UpperCAmelCase = u
for i in range(1 , UpperCamelCase__ ):
__UpperCAmelCase = temp * (u - i)
return temp
def lowerCAmelCase ( ):
"""simple docstring"""
__UpperCAmelCase = int(input('''enter the numbers of values: ''' ) )
__UpperCAmelCase = []
for _ in range(UpperCamelCase__ ):
y.append([] )
for i in range(UpperCamelCase__ ):
for j in range(UpperCamelCase__ ):
y[i].append(UpperCamelCase__ )
__UpperCAmelCase = 0
print('''enter the values of parameters in a list: ''' )
__UpperCAmelCase = list(map(UpperCamelCase__ , input().split() ) )
print('''enter the values of corresponding parameters: ''' )
for i in range(UpperCamelCase__ ):
__UpperCAmelCase = float(input() )
__UpperCAmelCase = int(input('''enter the value to interpolate: ''' ) )
__UpperCAmelCase = (value - x[0]) / (x[1] - x[0])
# for calculating forward difference table
for i in range(1 , UpperCamelCase__ ):
for j in range(n - i ):
__UpperCAmelCase = y[j + 1][i - 1] - y[j][i - 1]
__UpperCAmelCase = y[0][0]
for i in range(1 , UpperCamelCase__ ):
summ += (ucal(UpperCamelCase__ , UpperCamelCase__ ) * y[0][i]) / math.factorial(UpperCamelCase__ )
print(f"""the value at {value} is {summ}""" )
if __name__ == "__main__":
main()
| 654 | 1 |
'''simple docstring'''
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
__lowerCAmelCase : Tuple = re.compile("[^A-Za-z_0-9]")
# parameters used in DuplicationIndex
__lowerCAmelCase : Any = 10
__lowerCAmelCase : Dict = 256
def lowerCAmelCase ( UpperCamelCase__ : List[str] ):
"""simple docstring"""
if len(UpperCamelCase__ ) < MIN_NUM_TOKENS:
return None
__UpperCAmelCase = MinHash(num_perm=UpperCamelCase__ )
for token in set(UpperCamelCase__ ):
min_hash.update(token.encode() )
return min_hash
def lowerCAmelCase ( UpperCamelCase__ : str ):
"""simple docstring"""
return {t for t in NON_ALPHA.split(UpperCamelCase__ ) if len(t.strip() ) > 0}
class A :
def __init__( self : List[Any] , *,
__a : float = 0.8_5 , ) -> Optional[int]:
__UpperCAmelCase = duplication_jaccard_threshold
__UpperCAmelCase = NUM_PERM
__UpperCAmelCase = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm )
__UpperCAmelCase = defaultdict(__a )
def snake_case__ ( self : Dict , __a : Tuple , __a : MinHash ) -> None:
__UpperCAmelCase = self._index.query(__a )
if code_key in self._index.keys:
print(f"""Duplicate key {code_key}""" )
return
self._index.insert(__a , __a )
if len(__a ) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(__a )
break
else:
self._duplicate_clusters[close_duplicates[0]].add(__a )
def snake_case__ ( self : str ) -> List[List[Dict]]:
__UpperCAmelCase = []
for base, duplicates in self._duplicate_clusters.items():
__UpperCAmelCase = [base] + list(__a )
# reformat the cluster to be a list of dict
__UpperCAmelCase = [{'''base_index''': el[0], '''repo_name''': el[1], '''path''': el[2]} for el in cluster]
duplicate_clusters.append(__a )
return duplicate_clusters
def snake_case__ ( self : int , __a : str ) -> None:
__UpperCAmelCase = self.get_duplicate_clusters()
with open(__a , '''w''' ) as f:
json.dump(__a , __a )
def lowerCAmelCase ( UpperCamelCase__ : List[str] ):
"""simple docstring"""
__UpperCAmelCase , __UpperCAmelCase = element
__UpperCAmelCase = get_min_hash([t for t in NON_ALPHA.split(data['''content'''] ) if len(t.strip() ) > 0] )
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def lowerCAmelCase ( UpperCamelCase__ : Type[Dataset] ):
"""simple docstring"""
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash , ThreadedIterator(UpperCamelCase__ , max_queue_size=1_0_0_0_0 ) , chunksize=1_0_0 , ):
if data is not None:
yield data
def lowerCAmelCase ( UpperCamelCase__ : Type[Dataset] , UpperCamelCase__ : float ):
"""simple docstring"""
__UpperCAmelCase = DuplicationIndex(duplication_jaccard_threshold=UpperCamelCase__ )
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(UpperCamelCase__ ) ) , max_queue_size=1_0_0 ) ):
di.add(UpperCamelCase__ , UpperCamelCase__ )
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def lowerCAmelCase ( UpperCamelCase__ : str , UpperCamelCase__ : str ):
"""simple docstring"""
__UpperCAmelCase = get_tokens(UpperCamelCase__ )
__UpperCAmelCase = get_tokens(UpperCamelCase__ )
return len(tokensa & tokensa ) / len(tokensa | tokensa )
__lowerCAmelCase : str = None
def lowerCAmelCase ( UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Dict ):
"""simple docstring"""
__UpperCAmelCase = []
for elementa in cluster:
__UpperCAmelCase = _shared_dataset[elementa['''base_index''']]['''content''']
for elementa in extremes:
__UpperCAmelCase = _shared_dataset[elementa['''base_index''']]['''content''']
if jaccard_similarity(UpperCamelCase__ , UpperCamelCase__ ) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
__UpperCAmelCase = 1
extremes.append(UpperCamelCase__ )
return extremes
def lowerCAmelCase ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[int] ):
"""simple docstring"""
global _shared_dataset
__UpperCAmelCase = dataset
__UpperCAmelCase = []
__UpperCAmelCase = partial(_find_cluster_extremes_shared , jaccard_threshold=UpperCamelCase__ )
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
UpperCamelCase__ , UpperCamelCase__ , ) , total=len(UpperCamelCase__ ) , ):
extremes_list.append(UpperCamelCase__ )
return extremes_list
def lowerCAmelCase ( UpperCamelCase__ : Type[Dataset] , UpperCamelCase__ : float = 0.85 ):
"""simple docstring"""
__UpperCAmelCase = make_duplicate_clusters(UpperCamelCase__ , UpperCamelCase__ )
__UpperCAmelCase = {x['''base_index'''] for cluster in duplicate_clusters for x in cluster}
__UpperCAmelCase = {}
__UpperCAmelCase = find_extremes(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
for extremes in extremes_clusters:
for element in extremes:
__UpperCAmelCase = element
__UpperCAmelCase = duplicate_indices - set(extreme_dict.keys() )
__UpperCAmelCase = dataset.filter(lambda UpperCamelCase__ , UpperCamelCase__ : idx not in remove_indices , with_indices=UpperCamelCase__ )
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
__UpperCAmelCase = element['''base_index'''] in extreme_dict
if element["is_extreme"]:
__UpperCAmelCase = extreme_dict[element['''base_index''']]['''copies''']
print(f"""Original dataset size: {len(UpperCamelCase__ )}""" )
print(f"""Number of duplicate clusters: {len(UpperCamelCase__ )}""" )
print(f"""Files in duplicate cluster: {len(UpperCamelCase__ )}""" )
print(f"""Unique files in duplicate cluster: {len(UpperCamelCase__ )}""" )
print(f"""Filtered dataset size: {len(UpperCamelCase__ )}""" )
return ds_filter, duplicate_clusters
| 654 |
'''simple docstring'''
import argparse
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import (
RobertaTokenizer,
TrOCRConfig,
TrOCRForCausalLM,
TrOCRProcessor,
VisionEncoderDecoderModel,
ViTConfig,
ViTImageProcessor,
ViTModel,
)
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCAmelCase : Dict = logging.get_logger(__name__)
def lowerCAmelCase ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Union[str, Any] ):
"""simple docstring"""
__UpperCAmelCase = []
for i in range(encoder_config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f"""encoder.deit.blocks.{i}.norm1.weight""", f"""encoder.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((f"""encoder.deit.blocks.{i}.norm1.bias""", f"""encoder.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append(
(f"""encoder.deit.blocks.{i}.attn.proj.weight""", f"""encoder.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append(
(f"""encoder.deit.blocks.{i}.attn.proj.bias""", f"""encoder.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append(
(f"""encoder.deit.blocks.{i}.norm2.weight""", f"""encoder.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((f"""encoder.deit.blocks.{i}.norm2.bias""", f"""encoder.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append(
(f"""encoder.deit.blocks.{i}.mlp.fc1.weight""", f"""encoder.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append(
(f"""encoder.deit.blocks.{i}.mlp.fc1.bias""", f"""encoder.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append(
(f"""encoder.deit.blocks.{i}.mlp.fc2.weight""", f"""encoder.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((f"""encoder.deit.blocks.{i}.mlp.fc2.bias""", f"""encoder.encoder.layer.{i}.output.dense.bias""") )
# cls token, position embeddings and patch embeddings of encoder
rename_keys.extend(
[
('''encoder.deit.cls_token''', '''encoder.embeddings.cls_token'''),
('''encoder.deit.pos_embed''', '''encoder.embeddings.position_embeddings'''),
('''encoder.deit.patch_embed.proj.weight''', '''encoder.embeddings.patch_embeddings.projection.weight'''),
('''encoder.deit.patch_embed.proj.bias''', '''encoder.embeddings.patch_embeddings.projection.bias'''),
('''encoder.deit.norm.weight''', '''encoder.layernorm.weight'''),
('''encoder.deit.norm.bias''', '''encoder.layernorm.bias'''),
] )
return rename_keys
def lowerCAmelCase ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[Any] ):
"""simple docstring"""
for i in range(encoder_config.num_hidden_layers ):
# queries, keys and values (only weights, no biases)
__UpperCAmelCase = state_dict.pop(f"""encoder.deit.blocks.{i}.attn.qkv.weight""" )
__UpperCAmelCase = in_proj_weight[
: encoder_config.hidden_size, :
]
__UpperCAmelCase = in_proj_weight[
encoder_config.hidden_size : encoder_config.hidden_size * 2, :
]
__UpperCAmelCase = in_proj_weight[
-encoder_config.hidden_size :, :
]
def lowerCAmelCase ( UpperCamelCase__ : Any , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[str] ):
"""simple docstring"""
__UpperCAmelCase = dct.pop(UpperCamelCase__ )
__UpperCAmelCase = val
def lowerCAmelCase ( UpperCamelCase__ : Dict ):
"""simple docstring"""
if "handwritten" in checkpoint_url:
__UpperCAmelCase = '''https://fki.tic.heia-fr.ch/static/img/a01-122-02-00.jpg''' # industry
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-12.jpg" # have
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-10.jpg" # let
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02.jpg" #
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122.jpg"
elif "printed" in checkpoint_url or "stage1" in checkpoint_url:
__UpperCAmelCase = '''https://www.researchgate.net/profile/Dinh-Sang/publication/338099565/figure/fig8/AS:840413229350922@1577381536857/An-receipt-example-in-the-SROIE-2019-dataset_Q640.jpg'''
__UpperCAmelCase = Image.open(requests.get(UpperCamelCase__ , stream=UpperCamelCase__ ).raw ).convert('''RGB''' )
return im
@torch.no_grad()
def lowerCAmelCase ( UpperCamelCase__ : Dict , UpperCamelCase__ : Union[str, Any] ):
"""simple docstring"""
__UpperCAmelCase = ViTConfig(image_size=3_8_4 , qkv_bias=UpperCamelCase__ )
__UpperCAmelCase = TrOCRConfig()
# size of the architecture
if "base" in checkpoint_url:
__UpperCAmelCase = 7_6_8
elif "large" in checkpoint_url:
# use ViT-large encoder
__UpperCAmelCase = 1_0_2_4
__UpperCAmelCase = 4_0_9_6
__UpperCAmelCase = 2_4
__UpperCAmelCase = 1_6
__UpperCAmelCase = 1_0_2_4
else:
raise ValueError('''Should either find \'base\' or \'large\' in checkpoint URL''' )
# the large-printed + stage1 checkpoints uses sinusoidal position embeddings, no layernorm afterwards
if "large-printed" in checkpoint_url or "stage1" in checkpoint_url:
__UpperCAmelCase = False
__UpperCAmelCase = '''relu'''
__UpperCAmelCase = 1_0_2_4
__UpperCAmelCase = True
__UpperCAmelCase = False
__UpperCAmelCase = False
# load HuggingFace model
__UpperCAmelCase = ViTModel(UpperCamelCase__ , add_pooling_layer=UpperCamelCase__ )
__UpperCAmelCase = TrOCRForCausalLM(UpperCamelCase__ )
__UpperCAmelCase = VisionEncoderDecoderModel(encoder=UpperCamelCase__ , decoder=UpperCamelCase__ )
model.eval()
# load state_dict of original model, rename some keys
__UpperCAmelCase = torch.hub.load_state_dict_from_url(UpperCamelCase__ , map_location='''cpu''' , check_hash=UpperCamelCase__ )['''model''']
__UpperCAmelCase = create_rename_keys(UpperCamelCase__ , UpperCamelCase__ )
for src, dest in rename_keys:
rename_key(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
read_in_q_k_v(UpperCamelCase__ , UpperCamelCase__ )
# remove parameters we don't need
del state_dict["encoder.deit.head.weight"]
del state_dict["encoder.deit.head.bias"]
del state_dict["decoder.version"]
# add prefix to decoder keys
for key, val in state_dict.copy().items():
__UpperCAmelCase = state_dict.pop(UpperCamelCase__ )
if key.startswith('''decoder''' ) and "output_projection" not in key:
__UpperCAmelCase = val
else:
__UpperCAmelCase = val
# load state dict
model.load_state_dict(UpperCamelCase__ )
# Check outputs on an image
__UpperCAmelCase = ViTImageProcessor(size=encoder_config.image_size )
__UpperCAmelCase = RobertaTokenizer.from_pretrained('''roberta-large''' )
__UpperCAmelCase = TrOCRProcessor(UpperCamelCase__ , UpperCamelCase__ )
__UpperCAmelCase = processor(images=prepare_img(UpperCamelCase__ ) , return_tensors='''pt''' ).pixel_values
# verify logits
__UpperCAmelCase = torch.tensor([[model.config.decoder.decoder_start_token_id]] )
__UpperCAmelCase = model(pixel_values=UpperCamelCase__ , decoder_input_ids=UpperCamelCase__ )
__UpperCAmelCase = outputs.logits
__UpperCAmelCase = torch.Size([1, 1, 5_0_2_6_5] )
if "trocr-base-handwritten" in checkpoint_url:
__UpperCAmelCase = torch.tensor(
[-1.45_02, -4.66_83, -0.53_47, -2.92_91, 9.14_35, -3.05_71, 8.97_64, 1.75_60, 8.73_58, -1.53_11] )
elif "trocr-large-handwritten" in checkpoint_url:
__UpperCAmelCase = torch.tensor(
[-2.64_37, -1.31_29, -2.25_96, -5.34_55, 6.35_39, 1.76_04, 5.49_91, 1.47_02, 5.61_13, 2.01_70] )
elif "trocr-base-printed" in checkpoint_url:
__UpperCAmelCase = torch.tensor(
[-5.68_16, -5.83_88, 1.13_98, -6.90_34, 6.85_05, -2.43_93, 1.22_84, -1.02_32, -1.96_61, -3.92_10] )
elif "trocr-large-printed" in checkpoint_url:
__UpperCAmelCase = torch.tensor(
[-6.01_62, -7.09_59, 4.41_55, -5.10_63, 7.04_68, -3.16_31, 2.64_66, -0.30_81, -0.81_06, -1.75_35] )
if "stage1" not in checkpoint_url:
assert logits.shape == expected_shape, "Shape of logits not as expected"
assert torch.allclose(logits[0, 0, :1_0] , UpperCamelCase__ , atol=1E-3 ), "First elements of logits not as expected"
Path(UpperCamelCase__ ).mkdir(exist_ok=UpperCamelCase__ )
print(f"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(UpperCamelCase__ )
print(f"""Saving processor to {pytorch_dump_folder_path}""" )
processor.save_pretrained(UpperCamelCase__ )
if __name__ == "__main__":
__lowerCAmelCase : Any = argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_url",
default="https://layoutlm.blob.core.windows.net/trocr/model_zoo/fairseq/trocr-base-handwritten.pt",
type=str,
help="URL to the original PyTorch checkpoint (.pth file).",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
__lowerCAmelCase : Optional[int] = parser.parse_args()
convert_tr_ocr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 654 | 1 |
'''simple docstring'''
import argparse
import collections
import os
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_table.py
__lowerCAmelCase : Dict = "src/transformers"
__lowerCAmelCase : List[Any] = "docs/source/en"
__lowerCAmelCase : Tuple = "."
def lowerCAmelCase ( UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Any ):
"""simple docstring"""
with open(UpperCamelCase__ , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
__UpperCAmelCase = f.readlines()
# Find the start prompt.
__UpperCAmelCase = 0
while not lines[start_index].startswith(UpperCamelCase__ ):
start_index += 1
start_index += 1
__UpperCAmelCase = start_index
while not lines[end_index].startswith(UpperCamelCase__ ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# Add here suffixes that are used to identify models, separated by |
__lowerCAmelCase : int = "Model|Encoder|Decoder|ForConditionalGeneration"
# Regexes that match TF/Flax/PT model names.
__lowerCAmelCase : Tuple = re.compile(r"TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
__lowerCAmelCase : Tuple = re.compile(r"Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
__lowerCAmelCase : List[Any] = re.compile(r"(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
# This is to make sure the transformers module imported is the one in the repo.
__lowerCAmelCase : int = direct_transformers_import(TRANSFORMERS_PATH)
def lowerCAmelCase ( UpperCamelCase__ : Optional[Any] ):
"""simple docstring"""
__UpperCAmelCase = re.finditer('''.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)''' , UpperCamelCase__ )
return [m.group(0 ) for m in matches]
def lowerCAmelCase ( UpperCamelCase__ : str , UpperCamelCase__ : Union[str, Any] ):
"""simple docstring"""
__UpperCAmelCase = 2 if text == '''✅''' or text == '''❌''' else len(UpperCamelCase__ )
__UpperCAmelCase = (width - text_length) // 2
__UpperCAmelCase = width - text_length - left_indent
return " " * left_indent + text + " " * right_indent
def lowerCAmelCase ( ):
"""simple docstring"""
__UpperCAmelCase = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
__UpperCAmelCase = {
name: config_maping_names[code]
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if code in config_maping_names
}
__UpperCAmelCase = {name: config.replace('''Config''' , '''''' ) for name, config in model_name_to_config.items()}
# Dictionaries flagging if each model prefix has a slow/fast tokenizer, backend in PT/TF/Flax.
__UpperCAmelCase = collections.defaultdict(UpperCamelCase__ )
__UpperCAmelCase = collections.defaultdict(UpperCamelCase__ )
__UpperCAmelCase = collections.defaultdict(UpperCamelCase__ )
__UpperCAmelCase = collections.defaultdict(UpperCamelCase__ )
__UpperCAmelCase = collections.defaultdict(UpperCamelCase__ )
# Let's lookup through all transformers object (once).
for attr_name in dir(UpperCamelCase__ ):
__UpperCAmelCase = None
if attr_name.endswith('''Tokenizer''' ):
__UpperCAmelCase = slow_tokenizers
__UpperCAmelCase = attr_name[:-9]
elif attr_name.endswith('''TokenizerFast''' ):
__UpperCAmelCase = fast_tokenizers
__UpperCAmelCase = attr_name[:-1_3]
elif _re_tf_models.match(UpperCamelCase__ ) is not None:
__UpperCAmelCase = tf_models
__UpperCAmelCase = _re_tf_models.match(UpperCamelCase__ ).groups()[0]
elif _re_flax_models.match(UpperCamelCase__ ) is not None:
__UpperCAmelCase = flax_models
__UpperCAmelCase = _re_flax_models.match(UpperCamelCase__ ).groups()[0]
elif _re_pt_models.match(UpperCamelCase__ ) is not None:
__UpperCAmelCase = pt_models
__UpperCAmelCase = _re_pt_models.match(UpperCamelCase__ ).groups()[0]
if lookup_dict is not None:
while len(UpperCamelCase__ ) > 0:
if attr_name in model_name_to_prefix.values():
__UpperCAmelCase = True
break
# Try again after removing the last word in the name
__UpperCAmelCase = ''''''.join(camel_case_split(UpperCamelCase__ )[:-1] )
# Let's build that table!
__UpperCAmelCase = list(model_name_to_config.keys() )
model_names.sort(key=str.lower )
__UpperCAmelCase = ['''Model''', '''Tokenizer slow''', '''Tokenizer fast''', '''PyTorch support''', '''TensorFlow support''', '''Flax Support''']
# We'll need widths to properly display everything in the center (+2 is to leave one extra space on each side).
__UpperCAmelCase = [len(UpperCamelCase__ ) + 2 for c in columns]
__UpperCAmelCase = max([len(UpperCamelCase__ ) for name in model_names] ) + 2
# Build the table per se
__UpperCAmelCase = '''|''' + '''|'''.join([_center_text(UpperCamelCase__ , UpperCamelCase__ ) for c, w in zip(UpperCamelCase__ , UpperCamelCase__ )] ) + '''|\n'''
# Use ":-----:" format to center-aligned table cell texts
table += "|" + "|".join([''':''' + '''-''' * (w - 2) + ''':''' for w in widths] ) + "|\n"
__UpperCAmelCase = {True: '''✅''', False: '''❌'''}
for name in model_names:
__UpperCAmelCase = model_name_to_prefix[name]
__UpperCAmelCase = [
name,
check[slow_tokenizers[prefix]],
check[fast_tokenizers[prefix]],
check[pt_models[prefix]],
check[tf_models[prefix]],
check[flax_models[prefix]],
]
table += "|" + "|".join([_center_text(UpperCamelCase__ , UpperCamelCase__ ) for l, w in zip(UpperCamelCase__ , UpperCamelCase__ )] ) + "|\n"
return table
def lowerCAmelCase ( UpperCamelCase__ : Optional[Any]=False ):
"""simple docstring"""
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = _find_text_in_file(
filename=os.path.join(UpperCamelCase__ , '''index.md''' ) , start_prompt='''<!--This table is updated automatically from the auto modules''' , end_prompt='''<!-- End table-->''' , )
__UpperCAmelCase = get_model_table_from_auto_modules()
if current_table != new_table:
if overwrite:
with open(os.path.join(UpperCamelCase__ , '''index.md''' ) , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.writelines(lines[:start_index] + [new_table] + lines[end_index:] )
else:
raise ValueError(
'''The model table in the `index.md` has not been updated. Run `make fix-copies` to fix this.''' )
if __name__ == "__main__":
__lowerCAmelCase : Optional[int] = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
__lowerCAmelCase : Optional[Any] = parser.parse_args()
check_model_table(args.fix_and_overwrite)
| 654 |
'''simple docstring'''
import gc
import unittest
from parameterized import parameterized
from diffusers import FlaxUNetaDConditionModel
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import load_hf_numpy, require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
@slow
@require_flax
class A ( unittest.TestCase ):
def snake_case__ ( self : List[Any] , __a : List[str] , __a : Optional[Any] ) -> List[Any]:
return f"""gaussian_noise_s={seed}_shape={'_'.join([str(__a ) for s in shape] )}.npy"""
def snake_case__ ( self : Dict ) -> Union[str, Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def snake_case__ ( self : Optional[Any] , __a : Tuple=0 , __a : List[Any]=(4, 4, 6_4, 6_4) , __a : Optional[Any]=False ) -> Tuple:
__UpperCAmelCase = jnp.bfloataa if fpaa else jnp.floataa
__UpperCAmelCase = jnp.array(load_hf_numpy(self.get_file_format(__a , __a ) ) , dtype=__a )
return image
def snake_case__ ( self : int , __a : Optional[Any]=False , __a : Optional[Any]="CompVis/stable-diffusion-v1-4" ) -> Any:
__UpperCAmelCase = jnp.bfloataa if fpaa else jnp.floataa
__UpperCAmelCase = '''bf16''' if fpaa else None
__UpperCAmelCase , __UpperCAmelCase = FlaxUNetaDConditionModel.from_pretrained(
__a , subfolder='''unet''' , dtype=__a , revision=__a )
return model, params
def snake_case__ ( self : str , __a : int=0 , __a : Tuple=(4, 7_7, 7_6_8) , __a : Optional[int]=False ) -> Union[str, Any]:
__UpperCAmelCase = jnp.bfloataa if fpaa else jnp.floataa
__UpperCAmelCase = jnp.array(load_hf_numpy(self.get_file_format(__a , __a ) ) , dtype=__a )
return hidden_states
@parameterized.expand(
[
# fmt: off
[8_3, 4, [-0.2_3_2_3, -0.1_3_0_4, 0.0_8_1_3, -0.3_0_9_3, -0.0_9_1_9, -0.1_5_7_1, -0.1_1_2_5, -0.5_8_0_6]],
[1_7, 0.5_5, [-0.0_8_3_1, -0.2_4_4_3, 0.0_9_0_1, -0.0_9_1_9, 0.3_3_9_6, 0.0_1_0_3, -0.3_7_4_3, 0.0_7_0_1]],
[8, 0.8_9, [-0.4_8_6_3, 0.0_8_5_9, 0.0_8_7_5, -0.1_6_5_8, 0.9_1_9_9, -0.0_1_1_4, 0.4_8_3_9, 0.4_6_3_9]],
[3, 1_0_0_0, [-0.5_6_4_9, 0.2_4_0_2, -0.5_5_1_8, 0.1_2_4_8, 1.1_3_2_8, -0.2_4_4_3, -0.0_3_2_5, -1.0_0_7_8]],
# fmt: on
] )
def snake_case__ ( self : Tuple , __a : Tuple , __a : str , __a : Optional[Any] ) -> Any:
__UpperCAmelCase , __UpperCAmelCase = self.get_unet_model(model_id='''CompVis/stable-diffusion-v1-4''' , fpaa=__a )
__UpperCAmelCase = self.get_latents(__a , fpaa=__a )
__UpperCAmelCase = self.get_encoder_hidden_states(__a , fpaa=__a )
__UpperCAmelCase = model.apply(
{'''params''': params} , __a , jnp.array(__a , dtype=jnp.intaa ) , encoder_hidden_states=__a , ).sample
assert sample.shape == latents.shape
__UpperCAmelCase = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
__UpperCAmelCase = jnp.array(__a , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, in the same hardware
assert jnp.allclose(__a , __a , atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[8_3, 4, [0.1_5_1_4, 0.0_8_0_7, 0.1_6_2_4, 0.1_0_1_6, -0.1_8_9_6, 0.0_2_6_3, 0.0_6_7_7, 0.2_3_1_0]],
[1_7, 0.5_5, [0.1_1_6_4, -0.0_2_1_6, 0.0_1_7_0, 0.1_5_8_9, -0.3_1_2_0, 0.1_0_0_5, -0.0_5_8_1, -0.1_4_5_8]],
[8, 0.8_9, [-0.1_7_5_8, -0.0_1_6_9, 0.1_0_0_4, -0.1_4_1_1, 0.1_3_1_2, 0.1_1_0_3, -0.1_9_9_6, 0.2_1_3_9]],
[3, 1_0_0_0, [0.1_2_1_4, 0.0_3_5_2, -0.0_7_3_1, -0.1_5_6_2, -0.0_9_9_4, -0.0_9_0_6, -0.2_3_4_0, -0.0_5_3_9]],
# fmt: on
] )
def snake_case__ ( self : Optional[Any] , __a : Optional[int] , __a : Optional[Any] , __a : Optional[Any] ) -> Union[str, Any]:
__UpperCAmelCase , __UpperCAmelCase = self.get_unet_model(model_id='''stabilityai/stable-diffusion-2''' , fpaa=__a )
__UpperCAmelCase = self.get_latents(__a , shape=(4, 4, 9_6, 9_6) , fpaa=__a )
__UpperCAmelCase = self.get_encoder_hidden_states(__a , shape=(4, 7_7, 1_0_2_4) , fpaa=__a )
__UpperCAmelCase = model.apply(
{'''params''': params} , __a , jnp.array(__a , dtype=jnp.intaa ) , encoder_hidden_states=__a , ).sample
assert sample.shape == latents.shape
__UpperCAmelCase = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
__UpperCAmelCase = jnp.array(__a , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, on the same hardware
assert jnp.allclose(__a , __a , atol=1e-2 )
| 654 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.