code
stringlengths 87
55.2k
| code_codestyle
int64 0
349
| style_context
stringlengths 135
49.1k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
from scipy.stats import spearmanr
import datasets
lowercase__ : List[Any] = '''\nThe Spearman rank-order correlation coefficient is a measure of the\nrelationship between two datasets. Like other correlation coefficients,\nthis one varies between -1 and +1 with 0 implying no correlation.\nPositive correlations imply that as data in dataset x increases, so\ndoes data in dataset y. Negative correlations imply that as x increases,\ny decreases. Correlations of -1 or +1 imply an exact monotonic relationship.\n\nUnlike the Pearson correlation, the Spearman correlation does not\nassume that both datasets are normally distributed.\n\nThe p-value roughly indicates the probability of an uncorrelated system\nproducing datasets that have a Spearman correlation at least as extreme\nas the one computed from these datasets. The p-values are not entirely\nreliable but are probably reasonable for datasets larger than 500 or so.\n'''
lowercase__ : List[Any] = '''\nArgs:\n predictions (`List[float]`): Predicted labels, as returned by a model.\n references (`List[float]`): Ground truth labels.\n return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns\n only the spearmanr score. Defaults to `False`.\nReturns:\n spearmanr (`float`): Spearman correlation coefficient.\n p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input.\nExamples:\n Example 1:\n >>> spearmanr_metric = datasets.load_metric("spearmanr")\n >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4])\n >>> print(results)\n {\'spearmanr\': -0.7}\n\n Example 2:\n >>> spearmanr_metric = datasets.load_metric("spearmanr")\n >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5],\n ... predictions=[10, 9, 2.5, 6, 4],\n ... return_pvalue=True)\n >>> print(results[\'spearmanr\'])\n -0.7\n >>> print(round(results[\'spearmanr_pvalue\'], 2))\n 0.19\n'''
lowercase__ : int = R'''\\n@book{kokoska2000crc,\n title={CRC standard probability and statistics tables and formulae},\n author={Kokoska, Stephen and Zwillinger, Daniel},\n year={2000},\n publisher={Crc Press}\n}\n@article{2020SciPy-NMeth,\n author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and\n Haberland, Matt and Reddy, Tyler and Cournapeau, David and\n Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and\n Bright, Jonathan and {van der Walt}, St{\'e}fan J. and\n Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and\n Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and\n Kern, Robert and Larson, Eric and Carey, C J and\n Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and\n {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and\n Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and\n Harris, Charles R. and Archibald, Anne M. and\n Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and\n {van Mulbregt}, Paul and {SciPy 1.0 Contributors}},\n title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific\n Computing in Python}},\n journal = {Nature Methods},\n year = {2020},\n volume = {17},\n pages = {261--272},\n adsurl = {https://rdcu.be/b08Wh},\n doi = {10.1038/s41592-019-0686-2},\n}\n'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase_ ( datasets.Metric ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE_ ( self ) ->Tuple:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''float''' ),
'''references''': datasets.Value('''float''' ),
} ) , reference_urls=['''https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html'''] , )
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=False ) ->Optional[int]:
lowerCAmelCase = spearmanr(UpperCAmelCase__ , UpperCAmelCase__ )
if return_pvalue:
return {"spearmanr": results[0], "spearmanr_pvalue": results[1]}
else:
return {"spearmanr": results[0]}
| 338 |
"""simple docstring"""
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import symbol_database as _symbol_database
from google.protobuf.internal import builder as _builder
# @@protoc_insertion_point(imports)
_a = _symbol_database.Default()
_a = _descriptor_pool.Default().AddSerializedFile(
b'\n\x19sentencepiece_model.proto\x12\rsentencepiece"\x80\x0c\n\x0bTrainerSpec\x12\r\n\x05input\x18\x01 \x03(\t\x12\x14\n\x0cinput_format\x18\x07 \x01(\t\x12\x14\n\x0cmodel_prefix\x18\x02 \x01(\t\x12\x41\n\nmodel_type\x18\x03 \x01(\x0e\x32$.sentencepiece.TrainerSpec.ModelType:\x07UNIGRAM\x12\x18\n\nvocab_size\x18\x04 \x01(\x05:\x04\x38\x30\x30\x30\x12\x17\n\x0f\x61\x63\x63\x65pt_language\x18\x05 \x03(\t\x12 \n\x15self_test_sample_size\x18\x06 \x01(\x05:\x01\x30\x12*\n\x1b\x65nable_differential_privacy\x18\x32 \x01(\x08:\x05\x66\x61lse\x12+\n differential_privacy_noise_level\x18\x33 \x01(\x02:\x01\x30\x12\x32\n\'differential_privacy_clipping_threshold\x18\x34 \x01(\x04:\x01\x30\x12"\n\x12\x63haracter_coverage\x18\n \x01(\x02:\x06\x30.9995\x12\x1e\n\x13input_sentence_size\x18\x0b \x01(\x04:\x01\x30\x12$\n\x16shuffle_input_sentence\x18\x13 \x01(\x08:\x04true\x12 \n\x14mining_sentence_size\x18\x0c \x01(\x05\x42\x02\x18\x01\x12"\n\x16training_sentence_size\x18\r \x01(\x05\x42\x02\x18\x01\x12(\n\x17seed_sentencepiece_size\x18\x0e \x01(\x05:\x07\x31\x30\x30\x30\x30\x30\x30\x12\x1e\n\x10shrinking_factor\x18\x0f \x01(\x02:\x04\x30.75\x12!\n\x13max_sentence_length\x18\x12 \x01(\x05:\x04\x34\x31\x39\x32\x12\x17\n\x0bnum_threads\x18\x10 \x01(\x05:\x02\x31\x36\x12\x1d\n\x12num_sub_iterations\x18\x11 \x01(\x05:\x01\x32\x12$\n\x18max_sentencepiece_length\x18\x14 \x01(\x05:\x02\x31\x36\x12%\n\x17split_by_unicode_script\x18\x15 \x01(\x08:\x04true\x12\x1d\n\x0fsplit_by_number\x18\x17 \x01(\x08:\x04true\x12!\n\x13split_by_whitespace\x18\x16 \x01(\x08:\x04true\x12)\n\x1atreat_whitespace_as_suffix\x18\x18 \x01(\x08:\x05\x66\x61lse\x12+\n\x1c\x61llow_whitespace_only_pieces\x18\x1a \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x0csplit_digits\x18\x19 \x01(\x08:\x05\x66\x61lse\x12#\n\x19pretokenization_delimiter\x18\x35 \x01(\t:\x00\x12\x17\n\x0f\x63ontrol_symbols\x18\x1e \x03(\t\x12\x1c\n\x14user_defined_symbols\x18\x1f \x03(\t\x12\x16\n\x0erequired_chars\x18$ \x01(\t\x12\x1c\n\rbyte_fallback\x18# \x01(\x08:\x05\x66\x61lse\x12+\n\x1dvocabulary_output_piece_score\x18 \x01(\x08:\x04true\x12\x1e\n\x10hard_vocab_limit\x18! \x01(\x08:\x04true\x12\x1c\n\ruse_all_vocab\x18" \x01(\x08:\x05\x66\x61lse\x12\x11\n\x06unk_id\x18( \x01(\x05:\x01\x30\x12\x11\n\x06\x62os_id\x18) \x01(\x05:\x01\x31\x12\x11\n\x06\x65os_id\x18* \x01(\x05:\x01\x32\x12\x12\n\x06pad_id\x18+ \x01(\x05:\x02-1\x12\x18\n\tunk_piece\x18- \x01(\t:\x05<unk>\x12\x16\n\tbos_piece\x18. \x01(\t:\x03<s>\x12\x17\n\teos_piece\x18/ \x01(\t:\x04</s>\x12\x18\n\tpad_piece\x18\x30 \x01(\t:\x05<pad>\x12\x1a\n\x0bunk_surface\x18, \x01(\t:\x05 \xe2\x81\x87 \x12+\n\x1ctrain_extremely_large_corpus\x18\x31 \x01(\x08:\x05\x66\x61lse"5\n\tModelType\x12\x0b\n\x07UNIGRAM\x10\x01\x12\x07\n\x03\x42PE\x10\x02\x12\x08\n\x04WORD\x10\x03\x12\x08\n\x04\x43HAR\x10\x04*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"\xd1\x01\n\x0eNormalizerSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x1c\n\x14precompiled_charsmap\x18\x02 \x01(\x0c\x12\x1e\n\x10\x61\x64\x64_dummy_prefix\x18\x03 \x01(\x08:\x04true\x12&\n\x18remove_extra_whitespaces\x18\x04 \x01(\x08:\x04true\x12 \n\x12\x65scape_whitespaces\x18\x05 \x01(\x08:\x04true\x12\x1e\n\x16normalization_rule_tsv\x18\x06 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"y\n\x0cSelfTestData\x12\x33\n\x07samples\x18\x01 \x03(\x0b\x32".sentencepiece.SelfTestData.Sample\x1a)\n\x06Sample\x12\r\n\x05input\x18\x01 \x01(\t\x12\x10\n\x08\x65xpected\x18\x02 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"\xfe\x03\n\nModelProto\x12\x37\n\x06pieces\x18\x01 \x03(\x0b\x32\'.sentencepiece.ModelProto.SentencePiece\x12\x30\n\x0ctrainer_spec\x18\x02 \x01(\x0b\x32\x1a.sentencepiece.TrainerSpec\x12\x36\n\x0fnormalizer_spec\x18\x03 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x12\x33\n\x0eself_test_data\x18\x04 \x01(\x0b\x32\x1b.sentencepiece.SelfTestData\x12\x38\n\x11\x64\x65normalizer_spec\x18\x05 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x1a\xd2\x01\n\rSentencePiece\x12\r\n\x05piece\x18\x01 \x01(\t\x12\r\n\x05score\x18\x02 \x01(\x02\x12\x42\n\x04type\x18\x03 \x01(\x0e\x32,.sentencepiece.ModelProto.SentencePiece.Type:\x06NORMAL"T\n\x04Type\x12\n\n\x06NORMAL\x10\x01\x12\x0b\n\x07UNKNOWN\x10\x02\x12\x0b\n\x07\x43ONTROL\x10\x03\x12\x10\n\x0cUSER_DEFINED\x10\x04\x12\x08\n\x04\x42YTE\x10\x06\x12\n\n\x06UNUSED\x10\x05*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\x42\x02H\x03'
)
_a = globals()
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'sentencepiece_model_pb2', _globals)
if _descriptor._USE_C_DESCRIPTORS is False:
_a = None
_a = b'H\003'
# (generated by protobuf compiler, but `_TRAINERSPEC` is not defined)
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._serialized_options = b"\030\001"
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._serialized_options = b"\030\001"
_a = 45
_a = 15_81
_a = 15_17
_a = 15_70
_a = 15_84
_a = 17_93
_a = 17_95
_a = 19_16
_a = 18_64
_a = 19_05
_a = 19_19
_a = 24_29
_a = 22_08
_a = 24_18
_a = 23_23
_a = 24_07
# @@protoc_insertion_point(module_scope)
| 17 | 0 |
'''simple docstring'''
import unittest
from transformers import MraConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_torch_available():
import torch
from transformers import (
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraModel,
)
from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST
class snake_case :
"""simple docstring"""
def __init__( self : str , __A : Tuple , __A : List[str]=2 , __A : str=8 , __A : int=True , __A : str=True , __A : Optional[int]=True , __A : Any=True , __A : Tuple=9_9 , __A : Dict=1_6 , __A : Optional[int]=5 , __A : Any=2 , __A : int=3_6 , __A : List[Any]="gelu" , __A : str=0.0 , __A : Dict=0.0 , __A : List[str]=5_1_2 , __A : Optional[int]=1_6 , __A : Tuple=2 , __A : Tuple=0.02 , __A : int=3 , __A : Tuple=4 , __A : List[Any]=None , ):
__UpperCamelCase = parent
__UpperCamelCase = batch_size
__UpperCamelCase = seq_length
__UpperCamelCase = is_training
__UpperCamelCase = use_input_mask
__UpperCamelCase = use_token_type_ids
__UpperCamelCase = use_labels
__UpperCamelCase = vocab_size
__UpperCamelCase = hidden_size
__UpperCamelCase = num_hidden_layers
__UpperCamelCase = num_attention_heads
__UpperCamelCase = intermediate_size
__UpperCamelCase = hidden_act
__UpperCamelCase = hidden_dropout_prob
__UpperCamelCase = attention_probs_dropout_prob
__UpperCamelCase = max_position_embeddings
__UpperCamelCase = type_vocab_size
__UpperCamelCase = type_sequence_label_size
__UpperCamelCase = initializer_range
__UpperCamelCase = num_labels
__UpperCamelCase = num_choices
__UpperCamelCase = scope
def _lowerCamelCase ( self : str ):
__UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCamelCase = None
if self.use_input_mask:
__UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCamelCase = None
if self.use_token_type_ids:
__UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__UpperCamelCase = None
__UpperCamelCase = None
__UpperCamelCase = None
if self.use_labels:
__UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__UpperCamelCase = ids_tensor([self.batch_size] , self.num_choices )
__UpperCamelCase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowerCamelCase ( self : Optional[Any] ):
return MraConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCAmelCase__ , initializer_range=self.initializer_range , )
def _lowerCamelCase ( self : Union[str, Any] ):
__UpperCamelCase = self.get_config()
__UpperCamelCase = 3_0_0
return config
def _lowerCamelCase ( self : List[str] ):
(
(
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) ,
) = self.prepare_config_and_inputs()
__UpperCamelCase = True
__UpperCamelCase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def _lowerCamelCase ( self : List[Any] , __A : Dict , __A : Optional[int] , __A : List[Any] , __A : Tuple , __A : Dict , __A : Dict , __A : Dict ):
__UpperCamelCase = MraModel(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
__UpperCamelCase = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ )
__UpperCamelCase = model(UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ )
__UpperCamelCase = model(UpperCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCamelCase ( self : Tuple , __A : Union[str, Any] , __A : Any , __A : List[Any] , __A : Union[str, Any] , __A : Optional[Any] , __A : Optional[Any] , __A : Tuple , __A : Tuple , __A : Optional[Any] , ):
__UpperCamelCase = True
__UpperCamelCase = MraModel(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
__UpperCamelCase = model(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , encoder_attention_mask=UpperCAmelCase__ , )
__UpperCamelCase = model(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , )
__UpperCamelCase = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCamelCase ( self : Dict , __A : Union[str, Any] , __A : str , __A : Dict , __A : int , __A : Dict , __A : str , __A : int ):
__UpperCamelCase = MraForMaskedLM(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
__UpperCamelCase = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowerCamelCase ( self : str , __A : Dict , __A : Any , __A : Optional[Any] , __A : int , __A : Tuple , __A : str , __A : List[str] ):
__UpperCamelCase = MraForQuestionAnswering(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
__UpperCamelCase = model(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , start_positions=UpperCAmelCase__ , end_positions=UpperCAmelCase__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _lowerCamelCase ( self : str , __A : Optional[int] , __A : Any , __A : List[str] , __A : List[Any] , __A : Union[str, Any] , __A : Optional[int] , __A : Tuple ):
__UpperCamelCase = self.num_labels
__UpperCamelCase = MraForSequenceClassification(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
__UpperCamelCase = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowerCamelCase ( self : Optional[int] , __A : int , __A : Tuple , __A : List[Any] , __A : List[Any] , __A : List[str] , __A : Optional[int] , __A : Optional[Any] ):
__UpperCamelCase = self.num_labels
__UpperCamelCase = MraForTokenClassification(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
__UpperCamelCase = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _lowerCamelCase ( self : Optional[Any] , __A : Any , __A : int , __A : Dict , __A : int , __A : Union[str, Any] , __A : Tuple , __A : Tuple ):
__UpperCamelCase = self.num_choices
__UpperCamelCase = MraForMultipleChoice(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
__UpperCamelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__UpperCamelCase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__UpperCamelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__UpperCamelCase = model(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _lowerCamelCase ( self : Union[str, Any] ):
__UpperCamelCase = self.prepare_config_and_inputs()
(
(
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) ,
) = config_and_inputs
__UpperCamelCase = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class snake_case ( __lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] =(
(
MraModel,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE_ : Any =False
SCREAMING_SNAKE_CASE_ : List[Any] =False
SCREAMING_SNAKE_CASE_ : Union[str, Any] =False
SCREAMING_SNAKE_CASE_ : List[Any] =False
SCREAMING_SNAKE_CASE_ : List[Any] =()
def _lowerCamelCase ( self : List[Any] ):
__UpperCamelCase = MraModelTester(self )
__UpperCamelCase = ConfigTester(self , config_class=UpperCAmelCase__ , hidden_size=3_7 )
def _lowerCamelCase ( self : Any ):
self.config_tester.run_common_tests()
def _lowerCamelCase ( self : Union[str, Any] ):
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase__ )
def _lowerCamelCase ( self : int ):
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__UpperCamelCase = type
self.model_tester.create_and_check_model(*UpperCAmelCase__ )
def _lowerCamelCase ( self : Tuple ):
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCAmelCase__ )
def _lowerCamelCase ( self : Tuple ):
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*UpperCAmelCase__ )
def _lowerCamelCase ( self : Tuple ):
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCAmelCase__ )
def _lowerCamelCase ( self : List[str] ):
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCAmelCase__ )
def _lowerCamelCase ( self : int ):
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCAmelCase__ )
@slow
def _lowerCamelCase ( self : Any ):
for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase = MraModel.from_pretrained(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
@unittest.skip(reason='MRA does not output attentions' )
def _lowerCamelCase ( self : Union[str, Any] ):
return
@require_torch
class snake_case ( unittest.TestCase ):
"""simple docstring"""
@slow
def _lowerCamelCase ( self : Tuple ):
__UpperCamelCase = MraModel.from_pretrained('uw-madison/mra-base-512-4' )
__UpperCamelCase = torch.arange(2_5_6 ).unsqueeze(0 )
with torch.no_grad():
__UpperCamelCase = model(UpperCAmelCase__ )[0]
__UpperCamelCase = torch.Size((1, 2_5_6, 7_6_8) )
self.assertEqual(output.shape , UpperCAmelCase__ )
__UpperCamelCase = torch.tensor(
[[[-0.0140, 0.0830, -0.0381], [0.1546, 0.1402, 0.0220], [0.1162, 0.0851, 0.0165]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCAmelCase__ , atol=1e-4 ) )
@slow
def _lowerCamelCase ( self : Union[str, Any] ):
__UpperCamelCase = MraForMaskedLM.from_pretrained('uw-madison/mra-base-512-4' )
__UpperCamelCase = torch.arange(2_5_6 ).unsqueeze(0 )
with torch.no_grad():
__UpperCamelCase = model(UpperCAmelCase__ )[0]
__UpperCamelCase = 5_0_2_6_5
__UpperCamelCase = torch.Size((1, 2_5_6, vocab_size) )
self.assertEqual(output.shape , UpperCAmelCase__ )
__UpperCamelCase = torch.tensor(
[[[9.2595, -3.6038, 11.8819], [9.3869, -3.2693, 11.0956], [11.8524, -3.4938, 13.1210]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCAmelCase__ , atol=1e-4 ) )
@slow
def _lowerCamelCase ( self : Tuple ):
__UpperCamelCase = MraForMaskedLM.from_pretrained('uw-madison/mra-base-4096-8-d3' )
__UpperCamelCase = torch.arange(4_0_9_6 ).unsqueeze(0 )
with torch.no_grad():
__UpperCamelCase = model(UpperCAmelCase__ )[0]
__UpperCamelCase = 5_0_2_6_5
__UpperCamelCase = torch.Size((1, 4_0_9_6, vocab_size) )
self.assertEqual(output.shape , UpperCAmelCase__ )
__UpperCamelCase = torch.tensor(
[[[5.4789, -2.3564, 7.5064], [7.9067, -1.3369, 9.9668], [9.0712, -1.8106, 7.0380]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCAmelCase__ , atol=1e-4 ) )
| 53 |
"""simple docstring"""
import baseaa
def _A ( UpperCamelCase_ : str) -> bytes:
'''simple docstring'''
return baseaa.baaencode(string.encode("utf-8"))
def _A ( UpperCamelCase_ : bytes) -> str:
'''simple docstring'''
return baseaa.baadecode(UpperCamelCase_).decode("utf-8")
if __name__ == "__main__":
_a = 'Hello World!'
_a = baseaa_encode(test)
print(encoded)
_a = baseaa_decode(encoded)
print(decoded)
| 17 | 0 |
from math import factorial
def UpperCamelCase ( __lowerCamelCase : int = 100 ):
return sum(map(UpperCamelCase_ , str(factorial(UpperCamelCase_ ) ) ) )
if __name__ == "__main__":
print(solution(int(input("""Enter the Number: """).strip())))
| 59 |
"""simple docstring"""
def _A ( UpperCamelCase_ : Any) -> List[str]:
'''simple docstring'''
__lowercase ,__lowercase = [], []
while len(UpperCamelCase_) > 1:
__lowercase ,__lowercase = min(UpperCamelCase_), max(UpperCamelCase_)
start.append(UpperCamelCase_)
end.append(UpperCamelCase_)
collection.remove(UpperCamelCase_)
collection.remove(UpperCamelCase_)
end.reverse()
return start + collection + end
if __name__ == "__main__":
_a = input('Enter numbers separated by a comma:\n').strip()
_a = [int(item) for item in user_input.split(',')]
print(*merge_sort(unsorted), sep=',')
| 17 | 0 |
import math
lowerCAmelCase : Union[str, Any] = 10
lowerCAmelCase : List[str] = 7
lowerCAmelCase : Dict = BALLS_PER_COLOUR * NUM_COLOURS
def A_ ( _UpperCAmelCase = 20 ):
SCREAMING_SNAKE_CASE_: Any = math.comb(UpperCamelCase_ , UpperCamelCase_ )
SCREAMING_SNAKE_CASE_: Union[str, Any] = math.comb(NUM_BALLS - BALLS_PER_COLOUR , UpperCamelCase_ )
SCREAMING_SNAKE_CASE_: Union[str, Any] = NUM_COLOURS * (1 - missing_colour / total)
return f"{result:.9f}"
if __name__ == "__main__":
print(solution(20))
| 13 |
"""simple docstring"""
def _A ( UpperCamelCase_ : list[int]) -> float:
'''simple docstring'''
if not nums: # Makes sure that the list is not empty
raise ValueError("List is empty")
__lowercase = sum(UpperCamelCase_) / len(UpperCamelCase_) # Calculate the average
return sum(abs(x - average) for x in nums) / len(UpperCamelCase_)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 17 | 0 |
import baseaa
def A ( a_ ) -> bytes:
return baseaa.baaencode(string.encode('utf-8' ) )
def A ( a_ ) -> str:
return baseaa.baadecode(UpperCamelCase_ ).decode('utf-8' )
if __name__ == "__main__":
A_ :int = '''Hello World!'''
A_ :Dict = baseaa_encode(test)
print(encoded)
A_ :List[Any] = baseaa_decode(encoded)
print(decoded)
| 71 |
"""simple docstring"""
import inspect
import unittest
import numpy as np
from transformers import BeitConfig
from transformers.testing_utils import require_flax, require_vision, slow
from transformers.utils import cached_property, is_flax_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor
if is_flax_available():
import jax
from transformers import FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling, FlaxBeitModel
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : str, UpperCAmelCase__ : List[Any], UpperCAmelCase__ : int=1_0_0, UpperCAmelCase__ : Any=1_3, UpperCAmelCase__ : List[Any]=3_0, UpperCAmelCase__ : Dict=2, UpperCAmelCase__ : Any=3, UpperCAmelCase__ : Optional[Any]=True, UpperCAmelCase__ : List[str]=True, UpperCAmelCase__ : Optional[Any]=3_2, UpperCAmelCase__ : Any=5, UpperCAmelCase__ : Any=4, UpperCAmelCase__ : Any=3_7, UpperCAmelCase__ : Optional[int]="gelu", UpperCAmelCase__ : Dict=0.1, UpperCAmelCase__ : Optional[int]=0.1, UpperCAmelCase__ : Dict=1_0, UpperCAmelCase__ : Tuple=0.02, UpperCAmelCase__ : List[Any]=3, ):
__lowercase = parent
__lowercase = vocab_size
__lowercase = batch_size
__lowercase = image_size
__lowercase = patch_size
__lowercase = num_channels
__lowercase = is_training
__lowercase = use_labels
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = type_sequence_label_size
__lowercase = initializer_range
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
__lowercase = (image_size // patch_size) ** 2
__lowercase = num_patches + 1
def _lowercase ( self : int ):
__lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowercase = None
if self.use_labels:
__lowercase = ids_tensor([self.batch_size], self.type_sequence_label_size )
__lowercase = BeitConfig(
vocab_size=self.vocab_size, image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, is_decoder=UpperCAmelCase__, initializer_range=self.initializer_range, )
return config, pixel_values, labels
def _lowercase ( self : Any, UpperCAmelCase__ : List[Any], UpperCAmelCase__ : List[str], UpperCAmelCase__ : List[str] ):
__lowercase = FlaxBeitModel(config=UpperCAmelCase__ )
__lowercase = model(UpperCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def _lowercase ( self : Optional[Any], UpperCAmelCase__ : Optional[Any], UpperCAmelCase__ : int, UpperCAmelCase__ : List[Any] ):
__lowercase = FlaxBeitForMaskedImageModeling(config=UpperCAmelCase__ )
__lowercase = model(UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length - 1, self.vocab_size) )
def _lowercase ( self : Dict, UpperCAmelCase__ : List[Any], UpperCAmelCase__ : Any, UpperCAmelCase__ : Union[str, Any] ):
__lowercase = self.type_sequence_label_size
__lowercase = FlaxBeitForImageClassification(config=UpperCAmelCase__ )
__lowercase = model(UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__lowercase = 1
__lowercase = FlaxBeitForImageClassification(UpperCAmelCase__ )
__lowercase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__lowercase = model(UpperCAmelCase__ )
def _lowercase ( self : List[str] ):
__lowercase = self.prepare_config_and_inputs()
(
(
__lowercase
) ,(
__lowercase
) ,(
__lowercase
) ,
) = config_and_inputs
__lowercase = {"pixel_values": pixel_values}
return config, inputs_dict
@require_flax
class _lowerCAmelCase ( lowercase ,unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : str = (
(FlaxBeitModel, FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling) if is_flax_available() else ()
)
def _lowercase ( self : List[Any] ):
__lowercase = FlaxBeitModelTester(self )
__lowercase = ConfigTester(self, config_class=UpperCAmelCase__, has_text_modality=UpperCAmelCase__, hidden_size=3_7 )
def _lowercase ( self : Union[str, Any] ):
self.config_tester.run_common_tests()
def _lowercase ( self : Optional[int] ):
__lowercase ,__lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = model_class(UpperCAmelCase__ )
__lowercase = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase = [*signature.parameters.keys()]
__lowercase = ["pixel_values"]
self.assertListEqual(arg_names[:1], UpperCAmelCase__ )
def _lowercase ( self : Tuple ):
__lowercase ,__lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__lowercase = self._prepare_for_class(UpperCAmelCase__, UpperCAmelCase__ )
__lowercase = model_class(UpperCAmelCase__ )
@jax.jit
def model_jitted(UpperCAmelCase__ : str, **UpperCAmelCase__ : Dict ):
return model(pixel_values=UpperCAmelCase__, **UpperCAmelCase__ )
with self.subTest("JIT Enabled" ):
__lowercase = model_jitted(**UpperCAmelCase__ ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
__lowercase = model_jitted(**UpperCAmelCase__ ).to_tuple()
self.assertEqual(len(UpperCAmelCase__ ), len(UpperCAmelCase__ ) )
for jitted_output, output in zip(UpperCAmelCase__, UpperCAmelCase__ ):
self.assertEqual(jitted_output.shape, output.shape )
def _lowercase ( self : List[str] ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase__ )
def _lowercase ( self : int ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCAmelCase__ )
def _lowercase ( self : Tuple ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase__ )
@slow
def _lowercase ( self : Union[str, Any] ):
for model_class_name in self.all_model_classes:
__lowercase = model_class_name.from_pretrained("microsoft/beit-base-patch16-224" )
__lowercase = model(np.ones((1, 3, 2_2_4, 2_2_4) ) )
self.assertIsNotNone(UpperCAmelCase__ )
def _A ( ) -> str:
'''simple docstring'''
__lowercase = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
return image
@require_vision
@require_flax
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _lowercase ( self : Optional[int] ):
return BeitImageProcessor.from_pretrained("microsoft/beit-base-patch16-224" ) if is_vision_available() else None
@slow
def _lowercase ( self : Any ):
__lowercase = FlaxBeitForMaskedImageModeling.from_pretrained("microsoft/beit-base-patch16-224-pt22k" )
__lowercase = self.default_image_processor
__lowercase = prepare_img()
__lowercase = image_processor(images=UpperCAmelCase__, return_tensors="np" ).pixel_values
# prepare bool_masked_pos
__lowercase = np.ones((1, 1_9_6), dtype=UpperCAmelCase__ )
# forward pass
__lowercase = model(pixel_values=UpperCAmelCase__, bool_masked_pos=UpperCAmelCase__ )
__lowercase = outputs.logits
# verify the logits
__lowercase = (1, 1_9_6, 8_1_9_2)
self.assertEqual(logits.shape, UpperCAmelCase__ )
__lowercase = np.array(
[[-3.2_437, 0.5_072, -13.9_174], [-3.2_456, 0.4_948, -13.9_401], [-3.2_033, 0.5_121, -13.8_550]] )
self.assertTrue(np.allclose(logits[bool_masked_pos][:3, :3], UpperCAmelCase__, atol=1E-2 ) )
@slow
def _lowercase ( self : Any ):
__lowercase = FlaxBeitForImageClassification.from_pretrained("microsoft/beit-base-patch16-224" )
__lowercase = self.default_image_processor
__lowercase = prepare_img()
__lowercase = image_processor(images=UpperCAmelCase__, return_tensors="np" )
# forward pass
__lowercase = model(**UpperCAmelCase__ )
__lowercase = outputs.logits
# verify the logits
__lowercase = (1, 1_0_0_0)
self.assertEqual(logits.shape, UpperCAmelCase__ )
__lowercase = np.array([-1.2_385, -1.0_987, -1.0_108] )
self.assertTrue(np.allclose(logits[0, :3], UpperCAmelCase__, atol=1E-4 ) )
__lowercase = 2_8_1
self.assertEqual(logits.argmax(-1 ).item(), UpperCAmelCase__ )
@slow
def _lowercase ( self : List[str] ):
__lowercase = FlaxBeitForImageClassification.from_pretrained("microsoft/beit-large-patch16-224-pt22k-ft22k" )
__lowercase = self.default_image_processor
__lowercase = prepare_img()
__lowercase = image_processor(images=UpperCAmelCase__, return_tensors="np" )
# forward pass
__lowercase = model(**UpperCAmelCase__ )
__lowercase = outputs.logits
# verify the logits
__lowercase = (1, 2_1_8_4_1)
self.assertEqual(logits.shape, UpperCAmelCase__ )
__lowercase = np.array([1.6_881, -0.2_787, 0.5_901] )
self.assertTrue(np.allclose(logits[0, :3], UpperCAmelCase__, atol=1E-4 ) )
__lowercase = 2_3_9_6
self.assertEqual(logits.argmax(-1 ).item(), UpperCAmelCase__ )
| 17 | 0 |
'''simple docstring'''
import unittest
from parameterized import parameterized
from transformers import LlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer
class _A :
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase=99 , __UpperCAmelCase=32 , __UpperCAmelCase=5 , __UpperCAmelCase=4 , __UpperCAmelCase=37 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=512 , __UpperCAmelCase=16 , __UpperCAmelCase=2 , __UpperCAmelCase=0.02 , __UpperCAmelCase=3 , __UpperCAmelCase=4 , __UpperCAmelCase=None , ) -> Optional[int]:
'''simple docstring'''
__UpperCAmelCase : Optional[int] = parent
__UpperCAmelCase : List[Any] = batch_size
__UpperCAmelCase : Tuple = seq_length
__UpperCAmelCase : Any = is_training
__UpperCAmelCase : Dict = use_input_mask
__UpperCAmelCase : Any = use_token_type_ids
__UpperCAmelCase : str = use_labels
__UpperCAmelCase : Optional[int] = vocab_size
__UpperCAmelCase : List[str] = hidden_size
__UpperCAmelCase : List[str] = num_hidden_layers
__UpperCAmelCase : Optional[int] = num_attention_heads
__UpperCAmelCase : str = intermediate_size
__UpperCAmelCase : Any = hidden_act
__UpperCAmelCase : List[str] = hidden_dropout_prob
__UpperCAmelCase : List[str] = attention_probs_dropout_prob
__UpperCAmelCase : str = max_position_embeddings
__UpperCAmelCase : Tuple = type_vocab_size
__UpperCAmelCase : Optional[int] = type_sequence_label_size
__UpperCAmelCase : Tuple = initializer_range
__UpperCAmelCase : Optional[Any] = num_labels
__UpperCAmelCase : Optional[int] = num_choices
__UpperCAmelCase : Optional[int] = scope
def __A ( self ) -> List[str]:
'''simple docstring'''
__UpperCAmelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCAmelCase : List[Any] = None
if self.use_input_mask:
__UpperCAmelCase : List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCAmelCase : Optional[int] = None
if self.use_token_type_ids:
__UpperCAmelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__UpperCAmelCase : List[Any] = None
__UpperCAmelCase : int = None
__UpperCAmelCase : Optional[Any] = None
if self.use_labels:
__UpperCAmelCase : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCAmelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__UpperCAmelCase : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices )
__UpperCAmelCase : str = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __A ( self ) -> Optional[int]:
'''simple docstring'''
return LlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCAmelCase__ , initializer_range=self.initializer_range , )
def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Dict:
'''simple docstring'''
__UpperCAmelCase : Any = LlamaModel(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
__UpperCAmelCase : Optional[Any] = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ )
__UpperCAmelCase : Union[str, Any] = model(UpperCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) -> int:
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = True
__UpperCAmelCase : str = LlamaModel(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
__UpperCAmelCase : Any = model(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , encoder_attention_mask=UpperCAmelCase__ , )
__UpperCAmelCase : Optional[int] = model(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , )
__UpperCAmelCase : List[Any] = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) -> Dict:
'''simple docstring'''
__UpperCAmelCase : List[str] = LlamaForCausalLM(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
__UpperCAmelCase : Union[str, Any] = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) -> List[Any]:
'''simple docstring'''
__UpperCAmelCase : List[str] = True
__UpperCAmelCase : Any = True
__UpperCAmelCase : List[str] = LlamaForCausalLM(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
# first forward pass
__UpperCAmelCase : List[Any] = model(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , encoder_attention_mask=UpperCAmelCase__ , use_cache=UpperCAmelCase__ , )
__UpperCAmelCase : Optional[Any] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
__UpperCAmelCase : Union[str, Any] = ids_tensor((self.batch_size, 3) , config.vocab_size )
__UpperCAmelCase : Dict = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
__UpperCAmelCase : Optional[int] = torch.cat([input_ids, next_tokens] , dim=-1 )
__UpperCAmelCase : Optional[int] = torch.cat([input_mask, next_mask] , dim=-1 )
__UpperCAmelCase : Dict = model(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , encoder_attention_mask=UpperCAmelCase__ , output_hidden_states=UpperCAmelCase__ , )["""hidden_states"""][0]
__UpperCAmelCase : List[Any] = model(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , encoder_attention_mask=UpperCAmelCase__ , past_key_values=UpperCAmelCase__ , output_hidden_states=UpperCAmelCase__ , )["""hidden_states"""][0]
# select random slice
__UpperCAmelCase : Dict = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__UpperCAmelCase : Any = output_from_no_past[:, -3:, random_slice_idx].detach()
__UpperCAmelCase : Any = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1E-3 ) )
def __A ( self ) -> int:
'''simple docstring'''
__UpperCAmelCase : Tuple = self.prepare_config_and_inputs()
(
(
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) ,
) : Optional[Any] = config_and_inputs
__UpperCAmelCase : Optional[int] = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class _A ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
_SCREAMING_SNAKE_CASE : Optional[Any] = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else ()
_SCREAMING_SNAKE_CASE : str = (LlamaForCausalLM,) if is_torch_available() else ()
_SCREAMING_SNAKE_CASE : Optional[Any] = (
{
"feature-extraction": LlamaModel,
"text-classification": LlamaForSequenceClassification,
"text-generation": LlamaForCausalLM,
"zero-shot": LlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
_SCREAMING_SNAKE_CASE : Union[str, Any] = False
_SCREAMING_SNAKE_CASE : List[Any] = False
def __A ( self ) -> Tuple:
'''simple docstring'''
__UpperCAmelCase : Any = LlamaModelTester(self )
__UpperCAmelCase : str = ConfigTester(self , config_class=UpperCAmelCase__ , hidden_size=37 )
def __A ( self ) -> List[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def __A ( self ) -> Tuple:
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase__ )
def __A ( self ) -> List[str]:
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__UpperCAmelCase : List[str] = type
self.model_tester.create_and_check_model(*UpperCAmelCase__ )
def __A ( self ) -> int:
'''simple docstring'''
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase : str = 3
__UpperCAmelCase : Optional[int] = input_dict["""input_ids"""]
__UpperCAmelCase : str = input_ids.ne(1 ).to(UpperCAmelCase__ )
__UpperCAmelCase : Union[str, Any] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
__UpperCAmelCase : int = LlamaForSequenceClassification(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
__UpperCAmelCase : Union[str, Any] = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def __A ( self ) -> str:
'''simple docstring'''
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase : Tuple = 3
__UpperCAmelCase : Optional[int] = """single_label_classification"""
__UpperCAmelCase : Optional[int] = input_dict["""input_ids"""]
__UpperCAmelCase : List[str] = input_ids.ne(1 ).to(UpperCAmelCase__ )
__UpperCAmelCase : List[str] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
__UpperCAmelCase : List[str] = LlamaForSequenceClassification(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
__UpperCAmelCase : Tuple = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def __A ( self ) -> Dict:
'''simple docstring'''
__UpperCAmelCase , __UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase : Union[str, Any] = 3
__UpperCAmelCase : Union[str, Any] = """multi_label_classification"""
__UpperCAmelCase : Dict = input_dict["""input_ids"""]
__UpperCAmelCase : Tuple = input_ids.ne(1 ).to(UpperCAmelCase__ )
__UpperCAmelCase : Tuple = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
__UpperCAmelCase : str = LlamaForSequenceClassification(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
__UpperCAmelCase : int = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip("""LLaMA buffers include complex numbers, which breaks this test""" )
def __A ( self ) -> str:
'''simple docstring'''
pass
@parameterized.expand([("""linear""",), ("""dynamic""",)] )
def __A ( self , __UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
__UpperCAmelCase , __UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase : Dict = ids_tensor([1, 10] , config.vocab_size )
__UpperCAmelCase : List[Any] = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
__UpperCAmelCase : Optional[Any] = LlamaModel(UpperCAmelCase__ )
original_model.to(UpperCAmelCase__ )
original_model.eval()
__UpperCAmelCase : Dict = original_model(UpperCAmelCase__ ).last_hidden_state
__UpperCAmelCase : List[str] = original_model(UpperCAmelCase__ ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
__UpperCAmelCase : Tuple = {"""type""": scaling_type, """factor""": 10.0}
__UpperCAmelCase : Dict = LlamaModel(UpperCAmelCase__ )
scaled_model.to(UpperCAmelCase__ )
scaled_model.eval()
__UpperCAmelCase : str = scaled_model(UpperCAmelCase__ ).last_hidden_state
__UpperCAmelCase : Optional[Any] = scaled_model(UpperCAmelCase__ ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1E-5 ) )
@require_torch
class _A ( unittest.TestCase ):
@unittest.skip("""Logits are not exactly the same, once we fix the instabalities somehow, will update!""" )
@slow
def __A ( self ) -> Union[str, Any]:
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338]
__UpperCAmelCase : str = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-7b-hf""" , device_map="""auto""" )
__UpperCAmelCase : List[Any] = model(torch.tensor([input_ids] ) )
# Expected mean on dim = -1
__UpperCAmelCase : List[Any] = torch.tensor([[-6.6550, -4.1227, -4.9859, -3.2406, 0.8262, -3.0033, 1.2964, -3.3699]] )
torch.testing.assert_close(out.mean(-1 ) , UpperCAmelCase__ , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
__UpperCAmelCase : Any = torch.tensor([-12.8281, -7.4453, -0.4639, -8.0625, -7.2500, -8.0000, -6.4883, -7.7695, -7.8438, -7.0312, -6.2188, -7.1328, -1.8496, 1.9961, -8.6250, -6.7227, -12.8281, -6.9492, -7.0742, -7.7852, -7.5820, -7.9062, -6.9375, -7.9805, -8.3438, -8.1562, -8.0469, -7.6250, -7.7422, -7.3398,] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , UpperCAmelCase__ , atol=1E-5 , rtol=1E-5 )
@unittest.skip("""Logits are not exactly the same, once we fix the instabalities somehow, will update!""" )
@slow
def __A ( self ) -> Tuple:
'''simple docstring'''
__UpperCAmelCase : Dict = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338]
__UpperCAmelCase : int = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-13b-hf""" , device_map="""auto""" )
__UpperCAmelCase : Tuple = model(torch.tensor(UpperCAmelCase__ ) )
# Expected mean on dim = -1
__UpperCAmelCase : List[str] = torch.tensor([[-2.0622, -1.2794, -1.1638, -0.9788, -1.4603, -1.0238, -1.7893, -1.4411]] )
torch.testing.assert_close(out.mean(-1 ) , UpperCAmelCase__ , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
__UpperCAmelCase : str = torch.tensor([-8.1406, -8.0547, 2.7461, -1.2344, -0.1448, -1.8262, -1.0020, -1.8154, -1.6895, -1.8516, -2.3574, -0.9277, 3.7598, 6.5742, -1.2998, -0.1177, -8.1406, -2.9688, -2.9199, -3.1699, -3.5254, -2.3555, -2.7988, -3.4141, -2.8262, -4.5195, -3.3379, -3.3164, -2.7832, -3.0273] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , UpperCAmelCase__ , atol=1E-5 , rtol=1E-5 )
@unittest.skip("""Logits are not exactly the same, once we fix the instabalities somehow, will update!""" )
@slow
def __A ( self ) -> List[Any]:
'''simple docstring'''
__UpperCAmelCase : int = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338]
__UpperCAmelCase : Dict = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-13b-chat-hf""" , device_map="""auto""" )
__UpperCAmelCase : Any = model(torch.tensor(UpperCAmelCase__ ) )
# Expected mean on dim = -1
__UpperCAmelCase : Union[str, Any] = torch.tensor([[-0.8562, -1.8520, -0.7551, -0.4162, -1.5161, -1.2038, -2.4823, -2.3254]] )
torch.testing.assert_close(out.mean(-1 ) , UpperCAmelCase__ , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
__UpperCAmelCase : int = torch.tensor([-2.2227, 4.8828, 0.9023, -0.4578, -0.7871, -0.1033, -0.6221, -0.5786, -0.7803, -1.0674, -1.2920, -0.1570, 0.8008, 2.0723, -0.9497, 0.2771, -2.2227, -0.7612, -1.4346, -1.2061, -1.6426, -0.3000, -0.7139, -1.1934, -1.8691, -1.6973, -1.5947, -1.2705, -0.3523, -0.5513] )
# fmt: on
torch.testing.assert_close(out.mean(-1 ) , UpperCAmelCase__ , atol=1E-2 , rtol=1E-2 )
@unittest.skip(
"""Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test""" )
@slow
def __A ( self ) -> Any:
'''simple docstring'''
__UpperCAmelCase : int = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338]
__UpperCAmelCase : int = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-70b-hf""" , device_map="""auto""" )
__UpperCAmelCase : str = model(torch.tensor(UpperCAmelCase__ ) )
__UpperCAmelCase : Optional[Any] = torch.tensor(
[[-4.2327, -3.3360, -4.6665, -4.7631, -1.8180, -3.4170, -1.4211, -3.1810]] , dtype=torch.floataa )
torch.testing.assert_close(out.mean(-1 ) , UpperCAmelCase__ , atol=1E-2 , rtol=1E-2 )
# fmt: off
__UpperCAmelCase : Tuple = torch.tensor([-9.4922, -3.9551, 1.7998, -5.6758, -5.1055, -5.8984, -4.8320, -6.8086, -6.5391, -5.6172, -5.5820, -5.5352, 1.7881, 3.6289, -6.5117, -3.4785, -9.5000, -6.0352, -6.8125, -6.0195, -6.6836, -5.4727, -6.2812, -6.0391, -7.3398, -7.4297, -7.4844, -6.5820, -5.8789, -5.5312] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , UpperCAmelCase__ , atol=1E-5 , rtol=1E-5 )
@unittest.skip("""Model is curently gated""" )
@slow
def __A ( self ) -> Tuple:
'''simple docstring'''
__UpperCAmelCase : Optional[int] = """Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the \"princi"""
__UpperCAmelCase : Union[str, Any] = """Simply put, the theory of relativity states that """
__UpperCAmelCase : int = LlamaTokenizer.from_pretrained("""meta-llama/Llama-2-13b-chat-hf""" )
__UpperCAmelCase : int = tokenizer.encode(UpperCAmelCase__ , return_tensors="""pt""" )
__UpperCAmelCase : List[str] = LlamaForCausalLM.from_pretrained(
"""meta-llama/Llama-2-13b-chat-hf""" , device_map="""sequential""" , use_safetensors=UpperCAmelCase__ )
# greedy generation outputs
__UpperCAmelCase : Union[str, Any] = model.generate(UpperCAmelCase__ , max_new_tokens=64 , top_p=UpperCAmelCase__ , temperature=1 , do_sample=UpperCAmelCase__ )
__UpperCAmelCase : List[Any] = tokenizer.decode(generated_ids[0] , skip_special_tokens=UpperCAmelCase__ )
self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__ )
| 254 |
"""simple docstring"""
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
class _lowerCAmelCase ( unittest.TestCase ,lowercase ):
"""simple docstring"""
def _lowercase ( self : List[Any] ):
__lowercase = load_tool("text-classification" )
self.tool.setup()
__lowercase = load_tool("text-classification", remote=UpperCAmelCase__ )
def _lowercase ( self : str ):
__lowercase = self.tool("That's quite cool", ["positive", "negative"] )
self.assertEqual(UpperCAmelCase__, "positive" )
def _lowercase ( self : str ):
__lowercase = self.remote_tool("That's quite cool", ["positive", "negative"] )
self.assertEqual(UpperCAmelCase__, "positive" )
def _lowercase ( self : List[str] ):
__lowercase = self.tool(text="That's quite cool", labels=["positive", "negative"] )
self.assertEqual(UpperCAmelCase__, "positive" )
def _lowercase ( self : Tuple ):
__lowercase = self.remote_tool(text="That's quite cool", labels=["positive", "negative"] )
self.assertEqual(UpperCAmelCase__, "positive" )
| 17 | 0 |
'''simple docstring'''
import functools
import logging
import os
import sys
import threading
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
import huggingface_hub.utils as hf_hub_utils
from tqdm import auto as tqdm_lib
a : Tuple = threading.Lock()
a : Any = None
a : Union[str, Any] = {
"""debug""": logging.DEBUG,
"""info""": logging.INFO,
"""warning""": logging.WARNING,
"""error""": logging.ERROR,
"""critical""": logging.CRITICAL,
}
a : Union[str, Any] = logging.WARNING
a : Optional[Any] = True
def __lowerCamelCase ( ) -> List[str]:
UpperCAmelCase : Optional[Any] = os.getenv("""TRANSFORMERS_VERBOSITY""" , UpperCamelCase_ )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
F'''Unknown option TRANSFORMERS_VERBOSITY={env_level_str}, '''
F'''has to be one of: { ', '.join(log_levels.keys() ) }''' )
return _default_log_level
def __lowerCamelCase ( ) -> str:
return __name__.split(""".""" )[0]
def __lowerCamelCase ( ) -> logging.Logger:
return logging.getLogger(_get_library_name() )
def __lowerCamelCase ( ) -> None:
global _default_handler
with _lock:
if _default_handler:
# This library has already configured the library root logger.
return
UpperCAmelCase : List[Any] = logging.StreamHandler() # Set sys.stderr as stream.
UpperCAmelCase : List[Any] = sys.stderr.flush
# Apply our default configuration to the library root logger.
UpperCAmelCase : List[str] = _get_library_root_logger()
library_root_logger.addHandler(_default_handler )
library_root_logger.setLevel(_get_default_logging_level() )
UpperCAmelCase : Union[str, Any] = False
def __lowerCamelCase ( ) -> None:
global _default_handler
with _lock:
if not _default_handler:
return
UpperCAmelCase : int = _get_library_root_logger()
library_root_logger.removeHandler(_default_handler )
library_root_logger.setLevel(logging.NOTSET )
UpperCAmelCase : Optional[int] = None
def __lowerCamelCase ( ) -> int:
return log_levels
def __lowerCamelCase ( _lowercase = None ) -> logging.Logger:
if name is None:
UpperCAmelCase : int = _get_library_name()
_configure_library_root_logger()
return logging.getLogger(UpperCamelCase_ )
def __lowerCamelCase ( ) -> int:
_configure_library_root_logger()
return _get_library_root_logger().getEffectiveLevel()
def __lowerCamelCase ( _lowercase ) -> None:
_configure_library_root_logger()
_get_library_root_logger().setLevel(UpperCamelCase_ )
def __lowerCamelCase ( ) -> Union[str, Any]:
return set_verbosity(UpperCamelCase_ )
def __lowerCamelCase ( ) -> Optional[Any]:
return set_verbosity(UpperCamelCase_ )
def __lowerCamelCase ( ) -> Dict:
return set_verbosity(UpperCamelCase_ )
def __lowerCamelCase ( ) -> List[str]:
return set_verbosity(UpperCamelCase_ )
def __lowerCamelCase ( ) -> None:
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().removeHandler(_default_handler )
def __lowerCamelCase ( ) -> None:
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().addHandler(_default_handler )
def __lowerCamelCase ( _lowercase ) -> None:
_configure_library_root_logger()
assert handler is not None
_get_library_root_logger().addHandler(UpperCamelCase_ )
def __lowerCamelCase ( _lowercase ) -> None:
_configure_library_root_logger()
assert handler is not None and handler not in _get_library_root_logger().handlers
_get_library_root_logger().removeHandler(UpperCamelCase_ )
def __lowerCamelCase ( ) -> None:
_configure_library_root_logger()
UpperCAmelCase : Optional[int] = False
def __lowerCamelCase ( ) -> None:
_configure_library_root_logger()
UpperCAmelCase : Any = True
def __lowerCamelCase ( ) -> None:
UpperCAmelCase : int = _get_library_root_logger().handlers
for handler in handlers:
UpperCAmelCase : Tuple = logging.Formatter("""[%(levelname)s|%(filename)s:%(lineno)s] %(asctime)s >> %(message)s""" )
handler.setFormatter(UpperCamelCase_ )
def __lowerCamelCase ( ) -> None:
UpperCAmelCase : Dict = _get_library_root_logger().handlers
for handler in handlers:
handler.setFormatter(UpperCamelCase_ )
def __lowerCamelCase ( self , *_lowercase , **_lowercase ) -> Union[str, Any]:
UpperCAmelCase : int = os.getenv("""TRANSFORMERS_NO_ADVISORY_WARNINGS""" , UpperCamelCase_ )
if no_advisory_warnings:
return
self.warning(*UpperCamelCase_ , **UpperCamelCase_ )
a : Union[str, Any] = warning_advice
@functools.lru_cache(UpperCamelCase_ )
def __lowerCamelCase ( self , *_lowercase , **_lowercase ) -> Any:
self.warning(*UpperCamelCase_ , **UpperCamelCase_ )
a : str = warning_once
class UpperCamelCase_ :
def __init__( self , *A , **A ) -> Tuple: # pylint: disable=unused-argument
UpperCAmelCase : Tuple = args[0] if args else None
def __iter__( self ) -> Any:
return iter(self._iterator )
def __getattr__( self , A ) -> str:
def empty_fn(*A , **A ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__( self ) -> Optional[int]:
return self
def __exit__( self , A , A , A ) -> Optional[Any]:
return
class UpperCamelCase_ :
def __call__( self , *A , **A ) -> Optional[Any]:
if _tqdm_active:
return tqdm_lib.tqdm(*UpperCAmelCase__ , **UpperCAmelCase__ )
else:
return EmptyTqdm(*UpperCAmelCase__ , **UpperCAmelCase__ )
def _lowercase( self , *A , **A ) -> Tuple:
UpperCAmelCase : Optional[int] = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*UpperCAmelCase__ , **UpperCAmelCase__ )
def _lowercase( self ) -> Tuple:
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
a : List[Any] = _tqdm_cls()
def __lowerCamelCase ( ) -> bool:
global _tqdm_active
return bool(_tqdm_active )
def __lowerCamelCase ( ) -> Dict:
global _tqdm_active
UpperCAmelCase : List[Any] = True
hf_hub_utils.enable_progress_bars()
def __lowerCamelCase ( ) -> Tuple:
global _tqdm_active
UpperCAmelCase : Optional[Any] = False
hf_hub_utils.disable_progress_bars()
| 265 |
"""simple docstring"""
from typing import Any, Callable, Dict, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
_a = 'CompVis/stable-diffusion-v1-1'
_a = 'CompVis/stable-diffusion-v1-2'
_a = 'CompVis/stable-diffusion-v1-3'
_a = 'CompVis/stable-diffusion-v1-4'
class _lowerCAmelCase ( lowercase ):
"""simple docstring"""
def __init__( self : Dict, UpperCAmelCase__ : AutoencoderKL, UpperCAmelCase__ : CLIPTextModel, UpperCAmelCase__ : CLIPTokenizer, UpperCAmelCase__ : UNetaDConditionModel, UpperCAmelCase__ : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler], UpperCAmelCase__ : StableDiffusionSafetyChecker, UpperCAmelCase__ : CLIPImageProcessor, UpperCAmelCase__ : bool = True, ):
super()._init_()
__lowercase = StableDiffusionPipeline.from_pretrained(UpperCAmelCase__ )
__lowercase = StableDiffusionPipeline.from_pretrained(UpperCAmelCase__ )
__lowercase = StableDiffusionPipeline.from_pretrained(UpperCAmelCase__ )
__lowercase = StableDiffusionPipeline(
vae=UpperCAmelCase__, text_encoder=UpperCAmelCase__, tokenizer=UpperCAmelCase__, unet=UpperCAmelCase__, scheduler=UpperCAmelCase__, safety_checker=UpperCAmelCase__, feature_extractor=UpperCAmelCase__, requires_safety_checker=UpperCAmelCase__, )
self.register_modules(pipelinea=self.pipea, pipelinea=self.pipea, pipelinea=self.pipea, pipelinea=self.pipea )
@property
def _lowercase ( self : List[str] ):
return {k: getattr(self, UpperCAmelCase__ ) for k in self.config.keys() if not k.startswith("_" )}
def _lowercase ( self : Union[str, Any], UpperCAmelCase__ : Optional[Union[str, int]] = "auto" ):
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
__lowercase = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(UpperCAmelCase__ )
def _lowercase ( self : List[str] ):
self.enable_attention_slicing(UpperCAmelCase__ )
@torch.no_grad()
def _lowercase ( self : Optional[Any], UpperCAmelCase__ : Union[str, List[str]], UpperCAmelCase__ : int = 5_1_2, UpperCAmelCase__ : int = 5_1_2, UpperCAmelCase__ : int = 5_0, UpperCAmelCase__ : float = 7.5, UpperCAmelCase__ : Optional[Union[str, List[str]]] = None, UpperCAmelCase__ : Optional[int] = 1, UpperCAmelCase__ : float = 0.0, UpperCAmelCase__ : Optional[torch.Generator] = None, UpperCAmelCase__ : Optional[torch.FloatTensor] = None, UpperCAmelCase__ : Optional[str] = "pil", UpperCAmelCase__ : bool = True, UpperCAmelCase__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None, UpperCAmelCase__ : int = 1, **UpperCAmelCase__ : Tuple, ):
return self.pipea(
prompt=UpperCAmelCase__, height=UpperCAmelCase__, width=UpperCAmelCase__, num_inference_steps=UpperCAmelCase__, guidance_scale=UpperCAmelCase__, negative_prompt=UpperCAmelCase__, num_images_per_prompt=UpperCAmelCase__, eta=UpperCAmelCase__, generator=UpperCAmelCase__, latents=UpperCAmelCase__, output_type=UpperCAmelCase__, return_dict=UpperCAmelCase__, callback=UpperCAmelCase__, callback_steps=UpperCAmelCase__, **UpperCAmelCase__, )
@torch.no_grad()
def _lowercase ( self : Tuple, UpperCAmelCase__ : Union[str, List[str]], UpperCAmelCase__ : int = 5_1_2, UpperCAmelCase__ : int = 5_1_2, UpperCAmelCase__ : int = 5_0, UpperCAmelCase__ : float = 7.5, UpperCAmelCase__ : Optional[Union[str, List[str]]] = None, UpperCAmelCase__ : Optional[int] = 1, UpperCAmelCase__ : float = 0.0, UpperCAmelCase__ : Optional[torch.Generator] = None, UpperCAmelCase__ : Optional[torch.FloatTensor] = None, UpperCAmelCase__ : Optional[str] = "pil", UpperCAmelCase__ : bool = True, UpperCAmelCase__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None, UpperCAmelCase__ : int = 1, **UpperCAmelCase__ : str, ):
return self.pipea(
prompt=UpperCAmelCase__, height=UpperCAmelCase__, width=UpperCAmelCase__, num_inference_steps=UpperCAmelCase__, guidance_scale=UpperCAmelCase__, negative_prompt=UpperCAmelCase__, num_images_per_prompt=UpperCAmelCase__, eta=UpperCAmelCase__, generator=UpperCAmelCase__, latents=UpperCAmelCase__, output_type=UpperCAmelCase__, return_dict=UpperCAmelCase__, callback=UpperCAmelCase__, callback_steps=UpperCAmelCase__, **UpperCAmelCase__, )
@torch.no_grad()
def _lowercase ( self : str, UpperCAmelCase__ : Union[str, List[str]], UpperCAmelCase__ : int = 5_1_2, UpperCAmelCase__ : int = 5_1_2, UpperCAmelCase__ : int = 5_0, UpperCAmelCase__ : float = 7.5, UpperCAmelCase__ : Optional[Union[str, List[str]]] = None, UpperCAmelCase__ : Optional[int] = 1, UpperCAmelCase__ : float = 0.0, UpperCAmelCase__ : Optional[torch.Generator] = None, UpperCAmelCase__ : Optional[torch.FloatTensor] = None, UpperCAmelCase__ : Optional[str] = "pil", UpperCAmelCase__ : bool = True, UpperCAmelCase__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None, UpperCAmelCase__ : int = 1, **UpperCAmelCase__ : Any, ):
return self.pipea(
prompt=UpperCAmelCase__, height=UpperCAmelCase__, width=UpperCAmelCase__, num_inference_steps=UpperCAmelCase__, guidance_scale=UpperCAmelCase__, negative_prompt=UpperCAmelCase__, num_images_per_prompt=UpperCAmelCase__, eta=UpperCAmelCase__, generator=UpperCAmelCase__, latents=UpperCAmelCase__, output_type=UpperCAmelCase__, return_dict=UpperCAmelCase__, callback=UpperCAmelCase__, callback_steps=UpperCAmelCase__, **UpperCAmelCase__, )
@torch.no_grad()
def _lowercase ( self : Union[str, Any], UpperCAmelCase__ : Union[str, List[str]], UpperCAmelCase__ : int = 5_1_2, UpperCAmelCase__ : int = 5_1_2, UpperCAmelCase__ : int = 5_0, UpperCAmelCase__ : float = 7.5, UpperCAmelCase__ : Optional[Union[str, List[str]]] = None, UpperCAmelCase__ : Optional[int] = 1, UpperCAmelCase__ : float = 0.0, UpperCAmelCase__ : Optional[torch.Generator] = None, UpperCAmelCase__ : Optional[torch.FloatTensor] = None, UpperCAmelCase__ : Optional[str] = "pil", UpperCAmelCase__ : bool = True, UpperCAmelCase__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None, UpperCAmelCase__ : int = 1, **UpperCAmelCase__ : Optional[int], ):
return self.pipea(
prompt=UpperCAmelCase__, height=UpperCAmelCase__, width=UpperCAmelCase__, num_inference_steps=UpperCAmelCase__, guidance_scale=UpperCAmelCase__, negative_prompt=UpperCAmelCase__, num_images_per_prompt=UpperCAmelCase__, eta=UpperCAmelCase__, generator=UpperCAmelCase__, latents=UpperCAmelCase__, output_type=UpperCAmelCase__, return_dict=UpperCAmelCase__, callback=UpperCAmelCase__, callback_steps=UpperCAmelCase__, **UpperCAmelCase__, )
@torch.no_grad()
def _lowercase ( self : Optional[Any], UpperCAmelCase__ : Union[str, List[str]], UpperCAmelCase__ : int = 5_1_2, UpperCAmelCase__ : int = 5_1_2, UpperCAmelCase__ : int = 5_0, UpperCAmelCase__ : float = 7.5, UpperCAmelCase__ : Optional[Union[str, List[str]]] = None, UpperCAmelCase__ : Optional[int] = 1, UpperCAmelCase__ : float = 0.0, UpperCAmelCase__ : Optional[torch.Generator] = None, UpperCAmelCase__ : Optional[torch.FloatTensor] = None, UpperCAmelCase__ : Optional[str] = "pil", UpperCAmelCase__ : bool = True, UpperCAmelCase__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None, UpperCAmelCase__ : int = 1, **UpperCAmelCase__ : str, ):
__lowercase = "cuda" if torch.cuda.is_available() else "cpu"
self.to(UpperCAmelCase__ )
# Checks if the height and width are divisible by 8 or not
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"""`height` and `width` must be divisible by 8 but are {height} and {width}.""" )
# Get first result from Stable Diffusion Checkpoint v1.1
__lowercase = self.textaimg_sda_a(
prompt=UpperCAmelCase__, height=UpperCAmelCase__, width=UpperCAmelCase__, num_inference_steps=UpperCAmelCase__, guidance_scale=UpperCAmelCase__, negative_prompt=UpperCAmelCase__, num_images_per_prompt=UpperCAmelCase__, eta=UpperCAmelCase__, generator=UpperCAmelCase__, latents=UpperCAmelCase__, output_type=UpperCAmelCase__, return_dict=UpperCAmelCase__, callback=UpperCAmelCase__, callback_steps=UpperCAmelCase__, **UpperCAmelCase__, )
# Get first result from Stable Diffusion Checkpoint v1.2
__lowercase = self.textaimg_sda_a(
prompt=UpperCAmelCase__, height=UpperCAmelCase__, width=UpperCAmelCase__, num_inference_steps=UpperCAmelCase__, guidance_scale=UpperCAmelCase__, negative_prompt=UpperCAmelCase__, num_images_per_prompt=UpperCAmelCase__, eta=UpperCAmelCase__, generator=UpperCAmelCase__, latents=UpperCAmelCase__, output_type=UpperCAmelCase__, return_dict=UpperCAmelCase__, callback=UpperCAmelCase__, callback_steps=UpperCAmelCase__, **UpperCAmelCase__, )
# Get first result from Stable Diffusion Checkpoint v1.3
__lowercase = self.textaimg_sda_a(
prompt=UpperCAmelCase__, height=UpperCAmelCase__, width=UpperCAmelCase__, num_inference_steps=UpperCAmelCase__, guidance_scale=UpperCAmelCase__, negative_prompt=UpperCAmelCase__, num_images_per_prompt=UpperCAmelCase__, eta=UpperCAmelCase__, generator=UpperCAmelCase__, latents=UpperCAmelCase__, output_type=UpperCAmelCase__, return_dict=UpperCAmelCase__, callback=UpperCAmelCase__, callback_steps=UpperCAmelCase__, **UpperCAmelCase__, )
# Get first result from Stable Diffusion Checkpoint v1.4
__lowercase = self.textaimg_sda_a(
prompt=UpperCAmelCase__, height=UpperCAmelCase__, width=UpperCAmelCase__, num_inference_steps=UpperCAmelCase__, guidance_scale=UpperCAmelCase__, negative_prompt=UpperCAmelCase__, num_images_per_prompt=UpperCAmelCase__, eta=UpperCAmelCase__, generator=UpperCAmelCase__, latents=UpperCAmelCase__, output_type=UpperCAmelCase__, return_dict=UpperCAmelCase__, callback=UpperCAmelCase__, callback_steps=UpperCAmelCase__, **UpperCAmelCase__, )
# Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result
return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]] )
| 17 | 0 |
"""simple docstring"""
def UpperCAmelCase ( UpperCamelCase__ ):
"""simple docstring"""
if not all(char in '01' for char in bin_string ):
raise ValueError('Non-binary value was passed to the function' )
if not bin_string:
raise ValueError('Empty string was passed to the function' )
A__ = ''
while len(UpperCamelCase_ ) % 3 != 0:
A__ = '0' + bin_string
A__ = [
bin_string[index : index + 3]
for index in range(len(UpperCamelCase_ ) )
if index % 3 == 0
]
for bin_group in bin_string_in_3_list:
A__ = 0
for index, val in enumerate(UpperCamelCase_ ):
oct_val += int(2 ** (2 - index) * int(UpperCamelCase_ ) )
oct_string += str(UpperCamelCase_ )
return oct_string
if __name__ == "__main__":
from doctest import testmod
testmod()
| 221 |
"""simple docstring"""
import random
import unittest
import numpy as np
import torch
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionUpscalePipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class _lowerCAmelCase ( lowercase ,unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : str = "ssube/stable-diffusion-x4-upscaler-onnx"
def _lowercase ( self : Union[str, Any], UpperCAmelCase__ : List[str]=0 ):
__lowercase = floats_tensor((1, 3, 1_2_8, 1_2_8), rng=random.Random(UpperCAmelCase__ ) )
__lowercase = torch.manual_seed(UpperCAmelCase__ )
__lowercase = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 7.5,
"output_type": "numpy",
}
return inputs
def _lowercase ( self : Any ):
__lowercase = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint, provider="CPUExecutionProvider" )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
__lowercase = self.get_dummy_inputs()
__lowercase = pipe(**UpperCAmelCase__ ).images
__lowercase = image[0, -3:, -3:, -1].flatten()
# started as 128, should now be 512
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__lowercase = np.array(
[0.6_974_782, 0.68_902_093, 0.70_135_885, 0.7_583_618, 0.7_804_545, 0.7_854_912, 0.78_667_426, 0.78_743_863, 0.78_070_223] )
assert np.abs(image_slice - expected_slice ).max() < 1E-1
def _lowercase ( self : Optional[Any] ):
__lowercase = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint, provider="CPUExecutionProvider" )
__lowercase = PNDMScheduler.from_config(pipe.scheduler.config, skip_prk_steps=UpperCAmelCase__ )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
__lowercase = self.get_dummy_inputs()
__lowercase = pipe(**UpperCAmelCase__ ).images
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__lowercase = np.array(
[0.6_898_892, 0.59_240_556, 0.52_499_527, 0.58_866_215, 0.52_258_235, 0.52_572_715, 0.62_414_473, 0.6_174_387, 0.6_214_964] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def _lowercase ( self : int ):
__lowercase = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint, provider="CPUExecutionProvider" )
__lowercase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
__lowercase = self.get_dummy_inputs()
__lowercase = pipe(**UpperCAmelCase__ ).images
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__lowercase = np.array(
[0.7_659_278, 0.76_437_664, 0.75_579_107, 0.7_691_116, 0.77_666_986, 0.7_727_672, 0.7_758_664, 0.7_812_226, 0.76_942_515] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def _lowercase ( self : str ):
__lowercase = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint, provider="CPUExecutionProvider" )
__lowercase = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
__lowercase = self.get_dummy_inputs()
__lowercase = pipe(**UpperCAmelCase__ ).images
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__lowercase = np.array(
[0.6_974_782, 0.68_902_093, 0.70_135_885, 0.7_583_618, 0.7_804_545, 0.7_854_912, 0.78_667_426, 0.78_743_863, 0.78_070_223] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def _lowercase ( self : Any ):
__lowercase = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint, provider="CPUExecutionProvider" )
__lowercase = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
__lowercase = self.get_dummy_inputs()
__lowercase = pipe(**UpperCAmelCase__ ).images
__lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__lowercase = np.array(
[0.77_424_496, 0.773_601, 0.7_645_288, 0.7_769_598, 0.7_772_739, 0.7_738_688, 0.78_187_233, 0.77_879_584, 0.767_043] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
@nightly
@require_onnxruntime
@require_torch_gpu
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@property
def _lowercase ( self : Tuple ):
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def _lowercase ( self : Dict ):
__lowercase = ort.SessionOptions()
__lowercase = False
return options
def _lowercase ( self : Dict ):
__lowercase = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
__lowercase = init_image.resize((1_2_8, 1_2_8) )
# using the PNDM scheduler by default
__lowercase = OnnxStableDiffusionUpscalePipeline.from_pretrained(
"ssube/stable-diffusion-x4-upscaler-onnx", provider=self.gpu_provider, sess_options=self.gpu_options, )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
__lowercase = "A fantasy landscape, trending on artstation"
__lowercase = torch.manual_seed(0 )
__lowercase = pipe(
prompt=UpperCAmelCase__, image=UpperCAmelCase__, guidance_scale=7.5, num_inference_steps=1_0, generator=UpperCAmelCase__, output_type="np", )
__lowercase = output.images
__lowercase = images[0, 2_5_5:2_5_8, 3_8_3:3_8_6, -1]
assert images.shape == (1, 5_1_2, 5_1_2, 3)
__lowercase = np.array([0.4_883, 0.4_947, 0.4_980, 0.4_975, 0.4_982, 0.4_980, 0.5_000, 0.5_006, 0.4_972] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
def _lowercase ( self : str ):
__lowercase = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
__lowercase = init_image.resize((1_2_8, 1_2_8) )
__lowercase = LMSDiscreteScheduler.from_pretrained(
"ssube/stable-diffusion-x4-upscaler-onnx", subfolder="scheduler" )
__lowercase = OnnxStableDiffusionUpscalePipeline.from_pretrained(
"ssube/stable-diffusion-x4-upscaler-onnx", scheduler=UpperCAmelCase__, provider=self.gpu_provider, sess_options=self.gpu_options, )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
__lowercase = "A fantasy landscape, trending on artstation"
__lowercase = torch.manual_seed(0 )
__lowercase = pipe(
prompt=UpperCAmelCase__, image=UpperCAmelCase__, guidance_scale=7.5, num_inference_steps=2_0, generator=UpperCAmelCase__, output_type="np", )
__lowercase = output.images
__lowercase = images[0, 2_5_5:2_5_8, 3_8_3:3_8_6, -1]
assert images.shape == (1, 5_1_2, 5_1_2, 3)
__lowercase = np.array(
[0.50_173_753, 0.50_223_356, 0.502_039, 0.50_233_036, 0.5_023_725, 0.5_022_601, 0.5_018_758, 0.50_234_085, 0.50_241_566] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
| 17 | 0 |
import os
import tempfile
import unittest
import numpy as np
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline
@require_flax
class _lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
# pipeline has Flax weights
__snake_case : Optional[Any] = FlaxDiffusionPipeline.from_pretrained(
"hf-internal-testing/tiny-stable-diffusion-pipe" , safety_checker=UpperCAmelCase__ , cache_dir=UpperCAmelCase__ )
__snake_case : List[Any] = [t[-1] for t in os.walk(os.path.join(UpperCAmelCase__ , os.listdir(UpperCAmelCase__ )[0] , "snapshots" ) )]
__snake_case : List[str] = [item for sublist in all_root_files for item in sublist]
# None of the downloaded files should be a PyTorch file even if we have some here:
# https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin
assert not any(f.endswith(".bin" ) for f in files )
@slow
@require_flax
class _lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
__snake_case , __snake_case : Union[str, Any] = FlaxStableDiffusionPipeline.from_pretrained(
"hf-internal-testing/tiny-stable-diffusion-pipe" , safety_checker=UpperCAmelCase__ )
__snake_case : Union[str, Any] = (
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
__snake_case : Dict = jax.random.PRNGKey(0 )
__snake_case : Union[str, Any] = 4
__snake_case : List[str] = jax.device_count()
__snake_case : Dict = num_samples * [prompt]
__snake_case : Optional[Any] = pipeline.prepare_inputs(UpperCAmelCase__ )
# shard inputs and rng
__snake_case : int = replicate(UpperCAmelCase__ )
__snake_case : Optional[int] = jax.random.split(UpperCAmelCase__ , UpperCAmelCase__ )
__snake_case : Optional[int] = shard(UpperCAmelCase__ )
__snake_case : Dict = pipeline(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , jit=UpperCAmelCase__ ).images
assert images.shape == (num_samples, 1, 64, 64, 3)
if jax.device_count() == 8:
assert np.abs(np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 4.1_514_745 ) < 1E-3
assert np.abs(np.abs(UpperCAmelCase__ , dtype=np.floataa ).sum() - 49947.875 ) < 5E-1
__snake_case : Optional[Any] = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:] ) ) )
assert len(UpperCAmelCase__ ) == num_samples
def UpperCAmelCase ( self ) -> str:
'''simple docstring'''
__snake_case , __snake_case : int = FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="flax" , safety_checker=UpperCAmelCase__ )
__snake_case : str = (
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
__snake_case : int = jax.random.PRNGKey(0 )
__snake_case : Union[str, Any] = 50
__snake_case : Tuple = jax.device_count()
__snake_case : List[Any] = num_samples * [prompt]
__snake_case : Tuple = pipeline.prepare_inputs(UpperCAmelCase__ )
# shard inputs and rng
__snake_case : Dict = replicate(UpperCAmelCase__ )
__snake_case : Optional[Any] = jax.random.split(UpperCAmelCase__ , UpperCAmelCase__ )
__snake_case : Any = shard(UpperCAmelCase__ )
__snake_case : Dict = pipeline(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , jit=UpperCAmelCase__ ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.05_652_401) ) < 1E-3
assert np.abs((np.abs(UpperCAmelCase__ , dtype=np.floataa ).sum() - 2383808.2) ) < 5E-1
def UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
__snake_case , __snake_case : Dict = FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="bf16" , dtype=jnp.bfloataa , safety_checker=UpperCAmelCase__ )
__snake_case : str = (
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
__snake_case : str = jax.random.PRNGKey(0 )
__snake_case : Tuple = 50
__snake_case : str = jax.device_count()
__snake_case : Union[str, Any] = num_samples * [prompt]
__snake_case : int = pipeline.prepare_inputs(UpperCAmelCase__ )
# shard inputs and rng
__snake_case : List[Any] = replicate(UpperCAmelCase__ )
__snake_case : Optional[int] = jax.random.split(UpperCAmelCase__ , UpperCAmelCase__ )
__snake_case : Dict = shard(UpperCAmelCase__ )
__snake_case : List[Any] = pipeline(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , jit=UpperCAmelCase__ ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.04_003_906) ) < 1E-3
assert np.abs((np.abs(UpperCAmelCase__ , dtype=np.floataa ).sum() - 2373516.75) ) < 5E-1
def UpperCAmelCase ( self ) -> int:
'''simple docstring'''
__snake_case , __snake_case : List[str] = FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="bf16" , dtype=jnp.bfloataa )
__snake_case : str = (
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
__snake_case : Dict = jax.random.PRNGKey(0 )
__snake_case : Any = 50
__snake_case : Any = jax.device_count()
__snake_case : Optional[Any] = num_samples * [prompt]
__snake_case : Dict = pipeline.prepare_inputs(UpperCAmelCase__ )
# shard inputs and rng
__snake_case : int = replicate(UpperCAmelCase__ )
__snake_case : Optional[Any] = jax.random.split(UpperCAmelCase__ , UpperCAmelCase__ )
__snake_case : Dict = shard(UpperCAmelCase__ )
__snake_case : Union[str, Any] = pipeline(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , jit=UpperCAmelCase__ ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.04_003_906) ) < 1E-3
assert np.abs((np.abs(UpperCAmelCase__ , dtype=np.floataa ).sum() - 2373516.75) ) < 5E-1
def UpperCAmelCase ( self ) -> int:
'''simple docstring'''
__snake_case : Any = FlaxDDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="scaled_linear" , set_alpha_to_one=UpperCAmelCase__ , steps_offset=1 , )
__snake_case , __snake_case : Optional[Any] = FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="bf16" , dtype=jnp.bfloataa , scheduler=UpperCAmelCase__ , safety_checker=UpperCAmelCase__ , )
__snake_case : Tuple = scheduler.create_state()
__snake_case : List[Any] = scheduler_state
__snake_case : List[str] = (
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
__snake_case : Any = jax.random.PRNGKey(0 )
__snake_case : Any = 50
__snake_case : Tuple = jax.device_count()
__snake_case : Tuple = num_samples * [prompt]
__snake_case : Optional[Any] = pipeline.prepare_inputs(UpperCAmelCase__ )
# shard inputs and rng
__snake_case : Tuple = replicate(UpperCAmelCase__ )
__snake_case : int = jax.random.split(UpperCAmelCase__ , UpperCAmelCase__ )
__snake_case : int = shard(UpperCAmelCase__ )
__snake_case : List[str] = pipeline(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , jit=UpperCAmelCase__ ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.045_043_945) ) < 1E-3
assert np.abs((np.abs(UpperCAmelCase__ , dtype=np.floataa ).sum() - 2347693.5) ) < 5E-1
def UpperCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
__snake_case : Optional[int] = (
"A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"
" field, close up, split lighting, cinematic"
)
__snake_case : List[str] = jax.device_count()
__snake_case : Optional[Any] = num_samples * [prompt]
__snake_case : int = jax.random.split(jax.random.PRNGKey(0 ) , UpperCAmelCase__ )
__snake_case , __snake_case : Dict = FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="bf16" , dtype=jnp.bfloataa , safety_checker=UpperCAmelCase__ , )
__snake_case : int = replicate(UpperCAmelCase__ )
__snake_case : Optional[int] = pipeline.prepare_inputs(UpperCAmelCase__ )
__snake_case : Union[str, Any] = shard(UpperCAmelCase__ )
__snake_case : Optional[int] = pipeline(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , jit=UpperCAmelCase__ ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
__snake_case : Optional[int] = images[2, 0, 256, 10:17, 1]
# With memory efficient attention
__snake_case , __snake_case : Tuple = FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="bf16" , dtype=jnp.bfloataa , safety_checker=UpperCAmelCase__ , use_memory_efficient_attention=UpperCAmelCase__ , )
__snake_case : str = replicate(UpperCAmelCase__ )
__snake_case : Tuple = pipeline.prepare_inputs(UpperCAmelCase__ )
__snake_case : Dict = shard(UpperCAmelCase__ )
__snake_case : Dict = pipeline(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , jit=UpperCAmelCase__ ).images
assert images_eff.shape == (num_samples, 1, 512, 512, 3)
__snake_case : List[str] = images[2, 0, 256, 10:17, 1]
# I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum`
# over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now.
assert abs(slice_eff - slice ).max() < 1E-2
| 326 |
"""simple docstring"""
import itertools
from dataclasses import dataclass
from typing import Any, Callable, Dict, List, Optional, Union
import pandas as pd
import pyarrow as pa
import datasets
import datasets.config
from datasets.features.features import require_storage_cast
from datasets.table import table_cast
from datasets.utils.py_utils import Literal
_a = datasets.utils.logging.get_logger(__name__)
_a = ['names', 'prefix']
_a = ['warn_bad_lines', 'error_bad_lines', 'mangle_dupe_cols']
_a = ['encoding_errors', 'on_bad_lines']
_a = ['date_format']
@dataclass
class _lowerCAmelCase ( datasets.BuilderConfig ):
"""simple docstring"""
__UpperCAmelCase : str = ","
__UpperCAmelCase : Optional[str] = None
__UpperCAmelCase : Optional[Union[int, List[int], str]] = "infer"
__UpperCAmelCase : Optional[List[str]] = None
__UpperCAmelCase : Optional[List[str]] = None
__UpperCAmelCase : Optional[Union[int, str, List[int], List[str]]] = None
__UpperCAmelCase : Optional[Union[List[int], List[str]]] = None
__UpperCAmelCase : Optional[str] = None
__UpperCAmelCase : bool = True
__UpperCAmelCase : Optional[Literal["c", "python", "pyarrow"]] = None
__UpperCAmelCase : Dict[Union[int, str], Callable[[Any], Any]] = None
__UpperCAmelCase : Optional[list] = None
__UpperCAmelCase : Optional[list] = None
__UpperCAmelCase : bool = False
__UpperCAmelCase : Optional[Union[int, List[int]]] = None
__UpperCAmelCase : Optional[int] = None
__UpperCAmelCase : Optional[Union[str, List[str]]] = None
__UpperCAmelCase : bool = True
__UpperCAmelCase : bool = True
__UpperCAmelCase : bool = False
__UpperCAmelCase : bool = True
__UpperCAmelCase : Optional[str] = None
__UpperCAmelCase : str = "."
__UpperCAmelCase : Optional[str] = None
__UpperCAmelCase : str = '"'
__UpperCAmelCase : int = 0
__UpperCAmelCase : Optional[str] = None
__UpperCAmelCase : Optional[str] = None
__UpperCAmelCase : Optional[str] = None
__UpperCAmelCase : Optional[str] = None
__UpperCAmelCase : bool = True
__UpperCAmelCase : bool = True
__UpperCAmelCase : int = 0
__UpperCAmelCase : bool = True
__UpperCAmelCase : bool = False
__UpperCAmelCase : Optional[str] = None
__UpperCAmelCase : int = 1_0_0_0_0
__UpperCAmelCase : Optional[datasets.Features] = None
__UpperCAmelCase : Optional[str] = "strict"
__UpperCAmelCase : Literal["error", "warn", "skip"] = "error"
__UpperCAmelCase : Optional[str] = None
def _lowercase ( self : Tuple ):
if self.delimiter is not None:
__lowercase = self.delimiter
if self.column_names is not None:
__lowercase = self.column_names
@property
def _lowercase ( self : Union[str, Any] ):
__lowercase = {
"sep": self.sep,
"header": self.header,
"names": self.names,
"index_col": self.index_col,
"usecols": self.usecols,
"prefix": self.prefix,
"mangle_dupe_cols": self.mangle_dupe_cols,
"engine": self.engine,
"converters": self.converters,
"true_values": self.true_values,
"false_values": self.false_values,
"skipinitialspace": self.skipinitialspace,
"skiprows": self.skiprows,
"nrows": self.nrows,
"na_values": self.na_values,
"keep_default_na": self.keep_default_na,
"na_filter": self.na_filter,
"verbose": self.verbose,
"skip_blank_lines": self.skip_blank_lines,
"thousands": self.thousands,
"decimal": self.decimal,
"lineterminator": self.lineterminator,
"quotechar": self.quotechar,
"quoting": self.quoting,
"escapechar": self.escapechar,
"comment": self.comment,
"encoding": self.encoding,
"dialect": self.dialect,
"error_bad_lines": self.error_bad_lines,
"warn_bad_lines": self.warn_bad_lines,
"skipfooter": self.skipfooter,
"doublequote": self.doublequote,
"memory_map": self.memory_map,
"float_precision": self.float_precision,
"chunksize": self.chunksize,
"encoding_errors": self.encoding_errors,
"on_bad_lines": self.on_bad_lines,
"date_format": self.date_format,
}
# some kwargs must not be passed if they don't have a default value
# some others are deprecated and we can also not pass them if they are the default value
for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS:
if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig(), UpperCAmelCase__ ):
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 2.0 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 2):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 1.3 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
return pd_read_csv_kwargs
class _lowerCAmelCase ( datasets.ArrowBasedBuilder ):
"""simple docstring"""
__UpperCAmelCase : Tuple = CsvConfig
def _lowercase ( self : List[str] ):
return datasets.DatasetInfo(features=self.config.features )
def _lowercase ( self : List[Any], UpperCAmelCase__ : Dict ):
if not self.config.data_files:
raise ValueError(F"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
__lowercase = dl_manager.download_and_extract(self.config.data_files )
if isinstance(UpperCAmelCase__, (str, list, tuple) ):
__lowercase = data_files
if isinstance(UpperCAmelCase__, UpperCAmelCase__ ):
__lowercase = [files]
__lowercase = [dl_manager.iter_files(UpperCAmelCase__ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"files": files} )]
__lowercase = []
for split_name, files in data_files.items():
if isinstance(UpperCAmelCase__, UpperCAmelCase__ ):
__lowercase = [files]
__lowercase = [dl_manager.iter_files(UpperCAmelCase__ ) for file in files]
splits.append(datasets.SplitGenerator(name=UpperCAmelCase__, gen_kwargs={"files": files} ) )
return splits
def _lowercase ( self : Dict, UpperCAmelCase__ : pa.Table ):
if self.config.features is not None:
__lowercase = self.config.features.arrow_schema
if all(not require_storage_cast(UpperCAmelCase__ ) for feature in self.config.features.values() ):
# cheaper cast
__lowercase = pa.Table.from_arrays([pa_table[field.name] for field in schema], schema=UpperCAmelCase__ )
else:
# more expensive cast; allows str <-> int/float or str to Audio for example
__lowercase = table_cast(UpperCAmelCase__, UpperCAmelCase__ )
return pa_table
def _lowercase ( self : Optional[Any], UpperCAmelCase__ : List[str] ):
__lowercase = self.config.features.arrow_schema if self.config.features else None
# dtype allows reading an int column as str
__lowercase = (
{
name: dtype.to_pandas_dtype() if not require_storage_cast(UpperCAmelCase__ ) else object
for name, dtype, feature in zip(schema.names, schema.types, self.config.features.values() )
}
if schema is not None
else None
)
for file_idx, file in enumerate(itertools.chain.from_iterable(UpperCAmelCase__ ) ):
__lowercase = pd.read_csv(UpperCAmelCase__, iterator=UpperCAmelCase__, dtype=UpperCAmelCase__, **self.config.pd_read_csv_kwargs )
try:
for batch_idx, df in enumerate(UpperCAmelCase__ ):
__lowercase = pa.Table.from_pandas(UpperCAmelCase__ )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(UpperCAmelCase__ )
except ValueError as e:
logger.error(F"""Failed to read file '{file}' with error {type(UpperCAmelCase__ )}: {e}""" )
raise
| 17 | 0 |
from __future__ import annotations
import math
_snake_case = "2020.9.26"
_snake_case = "xcodz-dot, cclaus, dhruvmanila"
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_,snake_case_,snake_case_ ):
if not all(isinstance(UpperCamelCase_,(float, int) ) for val in locals().values() ):
_A : Optional[Any] = f'''Input values must either be float or int: {list(locals().values() )}'''
raise TypeError(UpperCamelCase_ )
_A : Dict = ((x * distance) / (z + distance)) * scale
_A : Any = ((y * distance) / (z + distance)) * scale
return projected_x, projected_y
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_,snake_case_,snake_case_ ):
if not isinstance(UpperCamelCase_,UpperCamelCase_ ):
raise TypeError("""Axis must be a str""" )
_A : List[Any] = locals()
del input_variables["axis"]
if not all(isinstance(UpperCamelCase_,(float, int) ) for val in input_variables.values() ):
_A : Any = (
"""Input values except axis must either be float or int: """
f'''{list(input_variables.values() )}'''
)
raise TypeError(UpperCamelCase_ )
_A : Optional[Any] = (angle % 360) / 450 * 180 / math.pi
if axis == "z":
_A : Union[str, Any] = x * math.cos(UpperCamelCase_ ) - y * math.sin(UpperCamelCase_ )
_A : Tuple = y * math.cos(UpperCamelCase_ ) + x * math.sin(UpperCamelCase_ )
_A : List[str] = z
elif axis == "x":
_A : int = y * math.cos(UpperCamelCase_ ) - z * math.sin(UpperCamelCase_ )
_A : Union[str, Any] = z * math.cos(UpperCamelCase_ ) + y * math.sin(UpperCamelCase_ )
_A : str = x
elif axis == "y":
_A : int = x * math.cos(UpperCamelCase_ ) - z * math.sin(UpperCamelCase_ )
_A : Union[str, Any] = z * math.cos(UpperCamelCase_ ) + x * math.sin(UpperCamelCase_ )
_A : List[str] = y
else:
raise ValueError("""not a valid axis, choose one of 'x', 'y', 'z'""" )
return new_x, new_y, new_z
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f"""{convert_to_ad(1.0, 2.0, 3.0, 1_0.0, 1_0.0) = }""")
print(f"""{rotate(1.0, 2.0, 3.0, "y", 9_0.0) = }""")
| 26 |
"""simple docstring"""
from scipy.stats import spearmanr
import datasets
_a = '\nThe Spearman rank-order correlation coefficient is a measure of the\nrelationship between two datasets. Like other correlation coefficients,\nthis one varies between -1 and +1 with 0 implying no correlation.\nPositive correlations imply that as data in dataset x increases, so\ndoes data in dataset y. Negative correlations imply that as x increases,\ny decreases. Correlations of -1 or +1 imply an exact monotonic relationship.\n\nUnlike the Pearson correlation, the Spearman correlation does not\nassume that both datasets are normally distributed.\n\nThe p-value roughly indicates the probability of an uncorrelated system\nproducing datasets that have a Spearman correlation at least as extreme\nas the one computed from these datasets. The p-values are not entirely\nreliable but are probably reasonable for datasets larger than 500 or so.\n'
_a = '\nArgs:\n predictions (`List[float]`): Predicted labels, as returned by a model.\n references (`List[float]`): Ground truth labels.\n return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns\n only the spearmanr score. Defaults to `False`.\nReturns:\n spearmanr (`float`): Spearman correlation coefficient.\n p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input.\nExamples:\n Example 1:\n >>> spearmanr_metric = datasets.load_metric("spearmanr")\n >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4])\n >>> print(results)\n {\'spearmanr\': -0.7}\n\n Example 2:\n >>> spearmanr_metric = datasets.load_metric("spearmanr")\n >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5],\n ... predictions=[10, 9, 2.5, 6, 4],\n ... return_pvalue=True)\n >>> print(results[\'spearmanr\'])\n -0.7\n >>> print(round(results[\'spearmanr_pvalue\'], 2))\n 0.19\n'
_a = r'\\n@book{kokoska2000crc,\n title={CRC standard probability and statistics tables and formulae},\n author={Kokoska, Stephen and Zwillinger, Daniel},\n year={2000},\n publisher={Crc Press}\n}\n@article{2020SciPy-NMeth,\n author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and\n Haberland, Matt and Reddy, Tyler and Cournapeau, David and\n Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and\n Bright, Jonathan and {van der Walt}, St{\'e}fan J. and\n Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and\n Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and\n Kern, Robert and Larson, Eric and Carey, C J and\n Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and\n {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and\n Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and\n Harris, Charles R. and Archibald, Anne M. and\n Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and\n {van Mulbregt}, Paul and {SciPy 1.0 Contributors}},\n title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific\n Computing in Python}},\n journal = {Nature Methods},\n year = {2020},\n volume = {17},\n pages = {261--272},\n adsurl = {https://rdcu.be/b08Wh},\n doi = {10.1038/s41592-019-0686-2},\n}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class _lowerCAmelCase ( datasets.Metric ):
"""simple docstring"""
def _lowercase ( self : Dict ):
return datasets.MetricInfo(
description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features(
{
"predictions": datasets.Value("float" ),
"references": datasets.Value("float" ),
} ), reference_urls=["https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html"], )
def _lowercase ( self : Optional[Any], UpperCAmelCase__ : Optional[int], UpperCAmelCase__ : List[str], UpperCAmelCase__ : Optional[int]=False ):
__lowercase = spearmanr(UpperCAmelCase__, UpperCAmelCase__ )
if return_pvalue:
return {"spearmanr": results[0], "spearmanr_pvalue": results[1]}
else:
return {"spearmanr": results[0]}
| 17 | 0 |
"""simple docstring"""
import os
import sys
import unittest
a = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
a = os.path.join(git_repo_path, '''src''', '''transformers''')
a = '''\n{0} = None\n'''
a = '''\nclass {0}(metaclass=DummyObject):\n _backends = {1}\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, {1})\n'''
a = '''\ndef {0}(*args, **kwargs):\n requires_backends({0}, {1})\n'''
class lowercase_ ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase_ ( self : Optional[int] ):
_A = find_backend(' _import_structure[\"models.albert\"].append(\"AlbertTokenizerFast\")' )
self.assertIsNone(UpperCAmelCase__ )
_A = find_backend(' if not is_tokenizers_available():' )
self.assertEqual(UpperCAmelCase__ , 'tokenizers' )
_A = find_backend(' if not is_tensorflow_text_available():' )
self.assertEqual(UpperCAmelCase__ , 'tensorflow_text' )
_A = find_backend(' if not (is_sentencepiece_available() and is_tokenizers_available()):' )
self.assertEqual(UpperCAmelCase__ , 'sentencepiece_and_tokenizers' )
_A = find_backend(
' if not (is_sentencepiece_available() and is_tensorflow_text_available()):' )
self.assertEqual(UpperCAmelCase__ , 'sentencepiece_and_tensorflow_text' )
_A = find_backend(
' if not (is_sentencepiece_available() and is_tokenizers_available() and is_vision_available()):' )
self.assertEqual(UpperCAmelCase__ , 'sentencepiece_and_tokenizers_and_vision' )
def lowerCAmelCase_ ( self : Tuple ):
_A = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn('torch' , UpperCAmelCase__ )
self.assertIn('tensorflow_text' , UpperCAmelCase__ )
self.assertIn('sentencepiece_and_tokenizers' , UpperCAmelCase__ )
# Likewise, we can't assert on the exact content of a key
self.assertIn('BertModel' , objects['torch'] )
self.assertIn('TFBertModel' , objects['tf'] )
self.assertIn('FlaxBertModel' , objects['flax'] )
self.assertIn('BertModel' , objects['torch'] )
self.assertIn('TFBertTokenizer' , objects['tensorflow_text'] )
self.assertIn('convert_slow_tokenizer' , objects['sentencepiece_and_tokenizers'] )
def lowerCAmelCase_ ( self : Optional[Any] ):
_A = create_dummy_object('CONSTANT' , '\'torch\'' )
self.assertEqual(UpperCAmelCase__ , '\nCONSTANT = None\n' )
_A = create_dummy_object('function' , '\'torch\'' )
self.assertEqual(
UpperCAmelCase__ , '\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n' )
_A = '\nclass FakeClass(metaclass=DummyObject):\n _backends = \'torch\'\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, \'torch\')\n'
_A = create_dummy_object('FakeClass' , '\'torch\'' )
self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__ )
def lowerCAmelCase_ ( self : List[Any] ):
_A = '# This file is autogenerated by the command `make fix-copies`, do not edit.\nfrom ..utils import DummyObject, requires_backends\n\n\nCONSTANT = None\n\n\ndef function(*args, **kwargs):\n requires_backends(function, [\"torch\"])\n\n\nclass FakeClass(metaclass=DummyObject):\n _backends = [\"torch\"]\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, [\"torch\"])\n'
_A = create_dummy_files({'torch': ['CONSTANT', 'function', 'FakeClass']} )
self.assertEqual(dummy_files['torch'] , UpperCAmelCase__ )
| 315 |
"""simple docstring"""
from collections.abc import Sequence
def _A ( UpperCamelCase_ : Sequence[float], UpperCamelCase_ : float) -> float:
'''simple docstring'''
return sum(c * (x**i) for i, c in enumerate(UpperCamelCase_))
def _A ( UpperCamelCase_ : Sequence[float], UpperCamelCase_ : float) -> float:
'''simple docstring'''
__lowercase = 0.0
for coeff in reversed(UpperCamelCase_):
__lowercase = result * x + coeff
return result
if __name__ == "__main__":
_a = (0.0, 0.0, 5.0, 9.3, 7.0)
_a = 10.0
print(evaluate_poly(poly, x))
print(horner(poly, x))
| 17 | 0 |
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SegformerConfig,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowercase__ : List[str] = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__=False ) -> Optional[int]:
lowerCAmelCase = OrderedDict()
for key, value in state_dict.items():
if encoder_only and not key.startswith('''head''' ):
lowerCAmelCase = '''segformer.encoder.''' + key
if key.startswith('''backbone''' ):
lowerCAmelCase = key.replace('''backbone''' , '''segformer.encoder''' )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
lowerCAmelCase = key[key.find('''patch_embed''' ) + len('''patch_embed''' )]
lowerCAmelCase = key.replace(f"patch_embed{idx}" , f"patch_embeddings.{int(UpperCamelCase_ )-1}" )
if "norm" in key:
lowerCAmelCase = key.replace('''norm''' , '''layer_norm''' )
if "segformer.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
lowerCAmelCase = key[key.find('''segformer.encoder.layer_norm''' ) + len('''segformer.encoder.layer_norm''' )]
lowerCAmelCase = key.replace(f"layer_norm{idx}" , f"layer_norm.{int(UpperCamelCase_ )-1}" )
if "layer_norm1" in key:
lowerCAmelCase = key.replace('''layer_norm1''' , '''layer_norm_1''' )
if "layer_norm2" in key:
lowerCAmelCase = key.replace('''layer_norm2''' , '''layer_norm_2''' )
if "block" in key:
# replace for example block1 by block.0
lowerCAmelCase = key[key.find('''block''' ) + len('''block''' )]
lowerCAmelCase = key.replace(f"block{idx}" , f"block.{int(UpperCamelCase_ )-1}" )
if "attn.q" in key:
lowerCAmelCase = key.replace('''attn.q''' , '''attention.self.query''' )
if "attn.proj" in key:
lowerCAmelCase = key.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in key:
lowerCAmelCase = key.replace('''attn''' , '''attention.self''' )
if "fc1" in key:
lowerCAmelCase = key.replace('''fc1''' , '''dense1''' )
if "fc2" in key:
lowerCAmelCase = key.replace('''fc2''' , '''dense2''' )
if "linear_pred" in key:
lowerCAmelCase = key.replace('''linear_pred''' , '''classifier''' )
if "linear_fuse" in key:
lowerCAmelCase = key.replace('''linear_fuse.conv''' , '''linear_fuse''' )
lowerCAmelCase = key.replace('''linear_fuse.bn''' , '''batch_norm''' )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
lowerCAmelCase = key[key.find('''linear_c''' ) + len('''linear_c''' )]
lowerCAmelCase = key.replace(f"linear_c{idx}" , f"linear_c.{int(UpperCamelCase_ )-1}" )
if key.startswith('''head''' ):
lowerCAmelCase = key.replace('''head''' , '''classifier''' )
lowerCAmelCase = value
return new_state_dict
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ ) -> Dict:
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
lowerCAmelCase = state_dict.pop(f"segformer.encoder.block.{i}.{j}.attention.self.kv.weight" )
lowerCAmelCase = state_dict.pop(f"segformer.encoder.block.{i}.{j}.attention.self.kv.bias" )
# next, add keys and values (in that order) to the state dict
lowerCAmelCase = kv_weight[
: config.hidden_sizes[i], :
]
lowerCAmelCase = kv_bias[: config.hidden_sizes[i]]
lowerCAmelCase = kv_weight[
config.hidden_sizes[i] :, :
]
lowerCAmelCase = kv_bias[
config.hidden_sizes[i] :
]
def SCREAMING_SNAKE_CASE_ ( ) -> List[Any]:
lowerCAmelCase = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowerCAmelCase = Image.open(requests.get(UpperCamelCase_ , stream=UpperCamelCase_ ).raw )
return image
@torch.no_grad()
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ ) -> List[Any]:
lowerCAmelCase = SegformerConfig()
lowerCAmelCase = False
# set attributes based on model_name
lowerCAmelCase = '''huggingface/label-files'''
if "segformer" in model_name:
lowerCAmelCase = model_name[len('''segformer.''' ) : len('''segformer.''' ) + 2]
if "ade" in model_name:
lowerCAmelCase = 1_5_0
lowerCAmelCase = '''ade20k-id2label.json'''
lowerCAmelCase = (1, 1_5_0, 1_2_8, 1_2_8)
elif "city" in model_name:
lowerCAmelCase = 1_9
lowerCAmelCase = '''cityscapes-id2label.json'''
lowerCAmelCase = (1, 1_9, 1_2_8, 1_2_8)
else:
raise ValueError(f"Model {model_name} not supported" )
elif "mit" in model_name:
lowerCAmelCase = True
lowerCAmelCase = model_name[4:6]
lowerCAmelCase = 1_0_0_0
lowerCAmelCase = '''imagenet-1k-id2label.json'''
lowerCAmelCase = (1, 1_0_0_0)
else:
raise ValueError(f"Model {model_name} not supported" )
# set config attributes
lowerCAmelCase = json.load(open(hf_hub_download(UpperCamelCase_ , UpperCamelCase_ , repo_type='''dataset''' ) , '''r''' ) )
lowerCAmelCase = {int(UpperCamelCase_ ): v for k, v in idalabel.items()}
lowerCAmelCase = idalabel
lowerCAmelCase = {v: k for k, v in idalabel.items()}
if size == "b0":
pass
elif size == "b1":
lowerCAmelCase = [6_4, 1_2_8, 3_2_0, 5_1_2]
lowerCAmelCase = 2_5_6
elif size == "b2":
lowerCAmelCase = [6_4, 1_2_8, 3_2_0, 5_1_2]
lowerCAmelCase = 7_6_8
lowerCAmelCase = [3, 4, 6, 3]
elif size == "b3":
lowerCAmelCase = [6_4, 1_2_8, 3_2_0, 5_1_2]
lowerCAmelCase = 7_6_8
lowerCAmelCase = [3, 4, 1_8, 3]
elif size == "b4":
lowerCAmelCase = [6_4, 1_2_8, 3_2_0, 5_1_2]
lowerCAmelCase = 7_6_8
lowerCAmelCase = [3, 8, 2_7, 3]
elif size == "b5":
lowerCAmelCase = [6_4, 1_2_8, 3_2_0, 5_1_2]
lowerCAmelCase = 7_6_8
lowerCAmelCase = [3, 6, 4_0, 3]
else:
raise ValueError(f"Size {size} not supported" )
# load image processor (only resize + normalize)
lowerCAmelCase = SegformerImageProcessor(
image_scale=(5_1_2, 5_1_2) , keep_ratio=UpperCamelCase_ , align=UpperCamelCase_ , do_random_crop=UpperCamelCase_ )
# prepare image
lowerCAmelCase = prepare_img()
lowerCAmelCase = image_processor(images=UpperCamelCase_ , return_tensors='''pt''' ).pixel_values
logger.info(f"Converting model {model_name}..." )
# load original state dict
if encoder_only:
lowerCAmelCase = torch.load(UpperCamelCase_ , map_location=torch.device('''cpu''' ) )
else:
lowerCAmelCase = torch.load(UpperCamelCase_ , map_location=torch.device('''cpu''' ) )['''state_dict''']
# rename keys
lowerCAmelCase = rename_keys(UpperCamelCase_ , encoder_only=UpperCamelCase_ )
if not encoder_only:
del state_dict["decode_head.conv_seg.weight"]
del state_dict["decode_head.conv_seg.bias"]
# key and value matrices need special treatment
read_in_k_v(UpperCamelCase_ , UpperCamelCase_ )
# create HuggingFace model and load state dict
if encoder_only:
lowerCAmelCase = False
lowerCAmelCase = SegformerForImageClassification(UpperCamelCase_ )
else:
lowerCAmelCase = SegformerForSemanticSegmentation(UpperCamelCase_ )
model.load_state_dict(UpperCamelCase_ )
model.eval()
# forward pass
lowerCAmelCase = model(UpperCamelCase_ )
lowerCAmelCase = outputs.logits
# set expected_slice based on model name
# ADE20k checkpoints
if model_name == "segformer.b0.512x512.ade.160k":
lowerCAmelCase = torch.tensor(
[
[[-4.63_10, -5.52_32, -6.23_56], [-5.19_21, -6.14_44, -6.59_96], [-5.44_24, -6.27_90, -6.75_74]],
[[-12.13_91, -13.31_22, -13.95_54], [-12.87_32, -13.93_52, -14.35_63], [-12.94_38, -13.82_26, -14.25_13]],
[[-12.51_34, -13.46_86, -14.49_15], [-12.86_69, -14.43_43, -14.77_58], [-13.25_23, -14.58_19, -15.06_94]],
] )
elif model_name == "segformer.b1.512x512.ade.160k":
lowerCAmelCase = torch.tensor(
[
[[-7.58_20, -8.72_31, -8.32_15], [-8.06_00, -10.35_29, -10.03_04], [-7.52_08, -9.41_03, -9.62_39]],
[[-12.69_18, -13.89_94, -13.71_37], [-13.31_96, -15.75_23, -15.47_89], [-12.93_43, -14.87_57, -14.96_89]],
[[-11.19_11, -11.94_21, -11.32_43], [-11.33_42, -13.68_39, -13.35_81], [-10.39_09, -12.18_32, -12.48_58]],
] )
elif model_name == "segformer.b2.512x512.ade.160k":
lowerCAmelCase = torch.tensor(
[
[[-11.81_73, -14.38_50, -16.31_28], [-14.56_48, -16.58_04, -18.65_68], [-14.72_23, -15.73_87, -18.42_18]],
[[-15.72_90, -17.91_71, -19.44_23], [-18.31_05, -19.94_48, -21.46_61], [-17.92_96, -18.64_97, -20.79_10]],
[[-15.07_83, -17.03_36, -18.27_89], [-16.87_71, -18.68_70, -20.16_12], [-16.24_54, -17.14_26, -19.50_55]],
] )
elif model_name == "segformer.b3.512x512.ade.160k":
lowerCAmelCase = torch.tensor(
[
[[-9.08_78, -10.20_81, -10.18_91], [-9.31_44, -10.79_41, -10.98_43], [-9.22_94, -10.38_55, -10.57_04]],
[[-12.23_16, -13.90_68, -13.61_02], [-12.91_61, -14.37_02, -14.32_35], [-12.52_33, -13.71_74, -13.79_32]],
[[-14.62_75, -15.24_90, -14.97_27], [-14.34_00, -15.96_87, -16.28_27], [-14.14_84, -15.40_33, -15.89_37]],
] )
elif model_name == "segformer.b4.512x512.ade.160k":
lowerCAmelCase = torch.tensor(
[
[[-12.31_44, -13.24_47, -14.08_02], [-13.36_14, -14.58_16, -15.61_17], [-13.33_40, -14.44_33, -16.22_19]],
[[-19.27_81, -20.41_28, -20.75_06], [-20.61_53, -21.65_66, -22.09_98], [-19.98_00, -21.04_30, -22.14_94]],
[[-18.87_39, -19.78_04, -21.18_34], [-20.12_33, -21.67_65, -23.29_44], [-20.03_15, -21.26_41, -23.69_44]],
] )
elif model_name == "segformer.b5.640x640.ade.160k":
lowerCAmelCase = torch.tensor(
[
[[-9.55_24, -12.08_35, -11.73_48], [-10.52_29, -13.64_46, -14.56_62], [-9.58_42, -12.88_51, -13.94_14]],
[[-15.34_32, -17.53_23, -17.08_18], [-16.33_30, -18.92_55, -19.21_01], [-15.13_40, -17.78_48, -18.39_71]],
[[-12.60_72, -14.94_86, -14.66_31], [-13.76_29, -17.09_07, -17.77_45], [-12.78_99, -16.16_95, -17.16_71]],
] )
# Cityscapes checkpoints
elif model_name == "segformer.b0.1024x1024.city.160k":
lowerCAmelCase = torch.tensor(
[
[[-11.92_95, -13.40_57, -14.81_06], [-13.34_31, -14.81_79, -15.37_81], [-14.28_36, -15.59_42, -16.15_88]],
[[-11.49_06, -12.80_67, -13.65_64], [-13.11_89, -14.05_00, -14.15_43], [-13.87_48, -14.51_36, -14.87_89]],
[[0.53_74, 0.10_67, -0.47_42], [0.11_41, -0.22_55, -0.70_99], [-0.30_00, -0.59_24, -1.31_05]],
] )
elif model_name == "segformer.b0.512x1024.city.160k":
lowerCAmelCase = torch.tensor(
[
[[-7.82_17, -9.87_67, -10.17_17], [-9.44_38, -10.90_58, -11.40_47], [-9.79_39, -12.34_95, -12.10_79]],
[[-7.15_14, -9.53_36, -10.08_60], [-9.77_76, -11.68_22, -11.84_39], [-10.14_11, -12.76_55, -12.89_72]],
[[0.30_21, 0.08_05, -0.23_10], [-0.03_28, -0.16_05, -0.27_14], [-0.14_08, -0.54_77, -0.69_76]],
] )
elif model_name == "segformer.b0.640x1280.city.160k":
lowerCAmelCase = torch.tensor(
[
[
[-1.13_72E01, -1.27_87E01, -1.34_77E01],
[-1.25_36E01, -1.41_94E01, -1.44_09E01],
[-1.32_17E01, -1.48_88E01, -1.53_27E01],
],
[
[-1.47_91E01, -1.71_22E01, -1.82_77E01],
[-1.71_63E01, -1.91_92E01, -1.95_33E01],
[-1.78_97E01, -1.99_91E01, -2.03_15E01],
],
[
[7.67_23E-01, 4.19_21E-01, -7.78_78E-02],
[4.77_72E-01, 9.55_57E-03, -2.80_82E-01],
[3.60_32E-01, -2.48_26E-01, -5.11_68E-01],
],
] )
elif model_name == "segformer.b0.768x768.city.160k":
lowerCAmelCase = torch.tensor(
[
[[-9.49_59, -11.30_87, -11.74_79], [-11.00_25, -12.65_40, -12.33_19], [-11.40_64, -13.04_87, -12.99_05]],
[[-9.89_05, -11.30_84, -12.08_54], [-11.17_26, -12.76_98, -12.95_83], [-11.59_85, -13.32_78, -14.17_74]],
[[0.22_13, 0.01_92, -0.24_66], [-0.17_31, -0.42_13, -0.48_74], [-0.31_26, -0.65_41, -1.13_89]],
] )
elif model_name == "segformer.b1.1024x1024.city.160k":
lowerCAmelCase = torch.tensor(
[
[[-13.57_48, -13.91_11, -12.65_00], [-14.35_00, -15.36_83, -14.23_28], [-14.75_32, -16.04_24, -15.60_87]],
[[-17.16_51, -15.87_25, -12.96_53], [-17.25_80, -17.37_18, -14.82_23], [-16.60_58, -16.87_83, -16.74_52]],
[[-3.64_56, -3.02_09, -1.42_03], [-3.07_97, -3.19_59, -2.00_00], [-1.87_57, -1.92_17, -1.69_97]],
] )
elif model_name == "segformer.b2.1024x1024.city.160k":
lowerCAmelCase = torch.tensor(
[
[[-16.09_76, -16.48_56, -17.39_62], [-16.62_34, -19.03_42, -19.76_85], [-16.09_00, -18.06_61, -19.11_80]],
[[-18.47_50, -18.84_88, -19.50_74], [-19.40_30, -22.15_70, -22.59_77], [-19.11_91, -20.84_86, -22.37_83]],
[[-4.51_78, -5.50_37, -6.51_09], [-5.08_84, -7.21_74, -8.03_34], [-4.41_56, -5.81_17, -7.29_70]],
] )
elif model_name == "segformer.b3.1024x1024.city.160k":
lowerCAmelCase = torch.tensor(
[
[[-14.20_81, -14.47_32, -14.19_77], [-14.58_67, -16.44_23, -16.63_56], [-13.44_41, -14.96_85, -16.86_96]],
[[-14.45_76, -14.70_73, -15.04_51], [-15.08_16, -17.62_37, -17.98_73], [-14.42_13, -16.01_99, -18.59_92]],
[[-4.73_49, -4.95_88, -5.09_66], [-4.32_10, -6.93_25, -7.25_91], [-3.43_12, -4.74_84, -7.19_17]],
] )
elif model_name == "segformer.b4.1024x1024.city.160k":
lowerCAmelCase = torch.tensor(
[
[[-11.77_37, -11.95_26, -11.32_73], [-13.66_92, -14.45_74, -13.88_78], [-13.89_37, -14.69_24, -15.93_45]],
[[-14.67_06, -14.53_30, -14.13_06], [-16.15_02, -16.81_80, -16.42_69], [-16.83_38, -17.89_39, -20.17_46]],
[[1.04_91, 0.82_89, 1.03_10], [1.10_44, 0.52_19, 0.80_55], [1.08_99, 0.69_26, 0.55_90]],
] )
elif model_name == "segformer.b5.1024x1024.city.160k":
lowerCAmelCase = torch.tensor(
[
[[-12.56_41, -13.47_77, -13.06_84], [-13.95_87, -15.89_83, -16.65_57], [-13.31_09, -15.73_50, -16.31_41]],
[[-14.70_74, -15.43_52, -14.59_44], [-16.63_53, -18.16_63, -18.61_20], [-15.17_02, -18.03_29, -18.15_47]],
[[-1.79_90, -2.09_51, -1.77_84], [-2.63_97, -3.82_45, -3.96_86], [-1.52_64, -2.81_26, -2.93_16]],
] )
else:
lowerCAmelCase = logits.argmax(-1 ).item()
print('''Predicted class:''' , model.config.idalabel[predicted_class_idx] )
# verify logits
if not encoder_only:
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3, :3, :3] , UpperCamelCase_ , atol=1E-2 )
# finally, save model and image processor
logger.info(f"Saving PyTorch model and image processor to {pytorch_dump_folder_path}..." )
Path(UpperCamelCase_ ).mkdir(exist_ok=UpperCamelCase_ )
model.save_pretrained(UpperCamelCase_ )
image_processor.save_pretrained(UpperCamelCase_ )
if __name__ == "__main__":
lowercase__ : int = argparse.ArgumentParser()
parser.add_argument(
'''--model_name''',
default='''segformer.b0.512x512.ade.160k''',
type=str,
help='''Name of the model you\'d like to convert.''',
)
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, help='''Path to the original PyTorch checkpoint (.pth file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
lowercase__ : Optional[Any] = parser.parse_args()
convert_segformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 338 |
"""simple docstring"""
import argparse
import pytorch_lightning as pl
import torch
from torch import nn
from transformers import LongformerForQuestionAnswering, LongformerModel
class _lowerCAmelCase ( pl.LightningModule ):
"""simple docstring"""
def __init__( self : Optional[Any], UpperCAmelCase__ : str ):
super().__init__()
__lowercase = model
__lowercase = 2
__lowercase = nn.Linear(self.model.config.hidden_size, self.num_labels )
def _lowercase ( self : Optional[int] ):
pass
def _A ( UpperCamelCase_ : str, UpperCamelCase_ : str, UpperCamelCase_ : str) -> str:
'''simple docstring'''
__lowercase = LongformerModel.from_pretrained(UpperCamelCase_)
__lowercase = LightningModel(UpperCamelCase_)
__lowercase = torch.load(UpperCamelCase_, map_location=torch.device("cpu"))
lightning_model.load_state_dict(ckpt["state_dict"])
# init longformer question answering model
__lowercase = LongformerForQuestionAnswering.from_pretrained(UpperCamelCase_)
# transfer weights
longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict())
longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict())
longformer_for_qa.eval()
# save model
longformer_for_qa.save_pretrained(UpperCamelCase_)
print(F"""Conversion successful. Model saved under {pytorch_dump_folder_path}""")
if __name__ == "__main__":
_a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--longformer_model',
default=None,
type=str,
required=True,
help='model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.',
)
parser.add_argument(
'--longformer_question_answering_ckpt_path',
default=None,
type=str,
required=True,
help='Path the official PyTorch Lightning Checkpoint.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
_a = parser.parse_args()
convert_longformer_qa_checkpoint_to_pytorch(
args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path
)
| 17 | 0 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import XGLMConfig, XGLMTokenizer, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.xglm.modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
)
@require_tf
class snake_case :
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple =XGLMConfig
SCREAMING_SNAKE_CASE_ : Optional[Any] ={}
SCREAMING_SNAKE_CASE_ : Union[str, Any] ="gelu"
def __init__( self : Optional[int] , __A : List[str] , __A : Optional[int]=1_4 , __A : str=7 , __A : Optional[Any]=True , __A : List[Any]=True , __A : int=True , __A : List[str]=9_9 , __A : Union[str, Any]=3_2 , __A : Union[str, Any]=2 , __A : Union[str, Any]=4 , __A : Tuple=3_7 , __A : List[Any]="gelu" , __A : List[str]=0.1 , __A : Optional[int]=0.1 , __A : Tuple=5_1_2 , __A : Optional[Any]=0.02 , ):
__UpperCamelCase = parent
__UpperCamelCase = batch_size
__UpperCamelCase = seq_length
__UpperCamelCase = is_training
__UpperCamelCase = use_input_mask
__UpperCamelCase = use_labels
__UpperCamelCase = vocab_size
__UpperCamelCase = d_model
__UpperCamelCase = num_hidden_layers
__UpperCamelCase = num_attention_heads
__UpperCamelCase = ffn_dim
__UpperCamelCase = activation_function
__UpperCamelCase = activation_dropout
__UpperCamelCase = attention_dropout
__UpperCamelCase = max_position_embeddings
__UpperCamelCase = initializer_range
__UpperCamelCase = None
__UpperCamelCase = 0
__UpperCamelCase = 2
__UpperCamelCase = 1
def _lowerCamelCase ( self : Union[str, Any] ):
return XGLMConfig.from_pretrained('facebook/xglm-564M' )
def _lowerCamelCase ( self : Tuple ):
__UpperCamelCase = tf.clip_by_value(
ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) , clip_value_min=0 , clip_value_max=3 )
__UpperCamelCase = None
if self.use_input_mask:
__UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCamelCase = self.get_config()
__UpperCamelCase = floats_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
)
def _lowerCamelCase ( self : List[Any] ):
return XGLMConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , num_layers=self.num_hidden_layers , attention_heads=self.num_attention_heads , ffn_dim=self.ffn_dim , activation_function=self.activation_function , activation_dropout=self.activation_dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , use_cache=UpperCAmelCase__ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , return_dict=UpperCAmelCase__ , )
def _lowerCamelCase ( self : Dict ):
__UpperCamelCase = self.prepare_config_and_inputs()
(
(
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) ,
) = config_and_inputs
__UpperCamelCase = {
'input_ids': input_ids,
'head_mask': head_mask,
}
return config, inputs_dict
@require_tf
class snake_case ( __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] =(TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else ()
SCREAMING_SNAKE_CASE_ : List[str] =(TFXGLMForCausalLM,) if is_tf_available() else ()
SCREAMING_SNAKE_CASE_ : Any =(
{"feature-extraction": TFXGLMModel, "text-generation": TFXGLMForCausalLM} if is_tf_available() else {}
)
SCREAMING_SNAKE_CASE_ : Optional[Any] =False
SCREAMING_SNAKE_CASE_ : List[str] =False
SCREAMING_SNAKE_CASE_ : int =False
def _lowerCamelCase ( self : Optional[Any] ):
__UpperCamelCase = TFXGLMModelTester(self )
__UpperCamelCase = ConfigTester(self , config_class=UpperCAmelCase__ , n_embd=3_7 )
def _lowerCamelCase ( self : Any ):
self.config_tester.run_common_tests()
@slow
def _lowerCamelCase ( self : List[str] ):
for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase = TFXGLMModel.from_pretrained(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
@unittest.skip(reason='Currently, model embeddings are going to undergo a major refactor.' )
def _lowerCamelCase ( self : int ):
super().test_resize_token_embeddings()
@require_tf
class snake_case ( unittest.TestCase ):
"""simple docstring"""
@slow
def _lowerCamelCase ( self : Dict , __A : Optional[int]=True ):
__UpperCamelCase = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' )
__UpperCamelCase = tf.convert_to_tensor([[2, 2_6_8, 9_8_6_5]] , dtype=tf.intaa ) # The dog
# </s> The dog is a very friendly dog. He is very affectionate and loves to play with other
# fmt: off
__UpperCamelCase = [2, 2_6_8, 9_8_6_5, 6_7, 1_1, 1_9_8_8, 5_7_2_5_2, 9_8_6_5, 5, 9_8_4, 6_7, 1_9_8_8, 2_1_3_8_3_8, 1_6_5_8, 5_3, 7_0_4_4_6, 3_3, 6_6_5_7, 2_7_8, 1_5_8_1]
# fmt: on
__UpperCamelCase = model.generate(UpperCAmelCase__ , do_sample=UpperCAmelCase__ , num_beams=1 )
if verify_outputs:
self.assertListEqual(output_ids[0].numpy().tolist() , UpperCAmelCase__ )
@slow
def _lowerCamelCase ( self : List[Any] ):
__UpperCamelCase = XGLMTokenizer.from_pretrained('facebook/xglm-564M' )
__UpperCamelCase = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' )
tf.random.set_seed(0 )
__UpperCamelCase = tokenizer('Today is a nice day and' , return_tensors='tf' )
__UpperCamelCase = tokenized.input_ids
# forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices)
with tf.device(':/CPU:0' ):
__UpperCamelCase = model.generate(UpperCAmelCase__ , do_sample=UpperCAmelCase__ , seed=[7, 0] )
__UpperCamelCase = tokenizer.decode(output_ids[0] , skip_special_tokens=UpperCAmelCase__ )
__UpperCamelCase = (
'Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due'
)
self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__ )
@slow
def _lowerCamelCase ( self : Dict ):
__UpperCamelCase = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' )
__UpperCamelCase = XGLMTokenizer.from_pretrained('facebook/xglm-564M' )
__UpperCamelCase = 'left'
# use different length sentences to test batching
__UpperCamelCase = [
'This is an extremelly long sentence that only exists to test the ability of the model to cope with '
'left-padding, such as in batched generation. The output for the sequence below should be the same '
'regardless of whether left padding is applied or not. When',
'Hello, my dog is a little',
]
__UpperCamelCase = tokenizer(UpperCAmelCase__ , return_tensors='tf' , padding=UpperCAmelCase__ )
__UpperCamelCase = inputs['input_ids']
__UpperCamelCase = model.generate(input_ids=UpperCAmelCase__ , attention_mask=inputs['attention_mask'] , max_new_tokens=1_2 )
__UpperCamelCase = tokenizer(sentences[0] , return_tensors='tf' ).input_ids
__UpperCamelCase = model.generate(input_ids=UpperCAmelCase__ , max_new_tokens=1_2 )
__UpperCamelCase = tokenizer(sentences[1] , return_tensors='tf' ).input_ids
__UpperCamelCase = model.generate(input_ids=UpperCAmelCase__ , max_new_tokens=1_2 )
__UpperCamelCase = tokenizer.batch_decode(UpperCAmelCase__ , skip_special_tokens=UpperCAmelCase__ )
__UpperCamelCase = tokenizer.decode(output_non_padded[0] , skip_special_tokens=UpperCAmelCase__ )
__UpperCamelCase = tokenizer.decode(output_padded[0] , skip_special_tokens=UpperCAmelCase__ )
__UpperCamelCase = [
'This is an extremelly long sentence that only exists to test the ability of the model to cope with '
'left-padding, such as in batched generation. The output for the sequence below should be the same '
'regardless of whether left padding is applied or not. When left padding is applied, the sequence will be '
'a single',
'Hello, my dog is a little bit of a shy one, but he is very friendly',
]
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , [non_padded_sentence, padded_sentence] )
| 53 |
"""simple docstring"""
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("TEST_SAGEMAKER" ,"False" ) ) is not True ,reason="Skipping test because should only be run when releasing minor transformers version" ,)
@pytest.mark.usefixtures("sm_env" )
@parameterized_class(
[
{
"framework": "pytorch",
"script": "run_glue_model_parallelism.py",
"model_name_or_path": "roberta-large",
"instance_type": "ml.p3dn.24xlarge",
"results": {"train_runtime": 1_6_0_0, "eval_accuracy": 0.3, "eval_loss": 1.2},
},
{
"framework": "pytorch",
"script": "run_glue.py",
"model_name_or_path": "roberta-large",
"instance_type": "ml.p3dn.24xlarge",
"results": {"train_runtime": 1_6_0_0, "eval_accuracy": 0.3, "eval_loss": 1.2},
},
] )
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def _lowercase ( self : Optional[int] ):
if self.framework == "pytorch":
subprocess.run(
F"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split(), encoding="utf-8", check=UpperCAmelCase__, )
assert hasattr(self, "env" )
def _lowercase ( self : str, UpperCAmelCase__ : List[Any] ):
# configuration for running training on smdistributed Model Parallel
__lowercase = {
"enabled": True,
"processes_per_host": 8,
}
__lowercase = {
"enabled": True,
"parameters": {
"microbatches": 4,
"placement_strategy": "spread",
"pipeline": "interleaved",
"optimize": "speed",
"partitions": 4,
"ddp": True,
},
}
__lowercase = {"smdistributed": {"modelparallel": smp_options}, "mpi": mpi_options}
__lowercase = "trainer" if self.script == "run_glue.py" else "smtrainer"
# creates estimator
return HuggingFace(
entry_point=self.script, source_dir=self.env.test_path, role=self.env.role, image_uri=self.env.image_uri, base_job_name=F"""{self.env.base_job_name}-{instance_count}-smp-{name_extension}""", instance_count=UpperCAmelCase__, instance_type=self.instance_type, debugger_hook_config=UpperCAmelCase__, hyperparameters={
**self.env.hyperparameters,
"model_name_or_path": self.model_name_or_path,
"max_steps": 5_0_0,
}, metric_definitions=self.env.metric_definitions, distribution=UpperCAmelCase__, py_version="py36", )
def _lowercase ( self : Tuple, UpperCAmelCase__ : int ):
TrainingJobAnalytics(UpperCAmelCase__ ).export_csv(F"""{self.env.test_path}/{job_name}_metrics.csv""" )
@parameterized.expand([(1,)] )
def _lowercase ( self : str, UpperCAmelCase__ : Union[str, Any] ):
# create estimator
__lowercase = self.create_estimator(UpperCAmelCase__ )
# run training
estimator.fit()
# result dataframe
__lowercase = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
__lowercase = list(result_metrics_df[result_metrics_df.metric_name == "eval_accuracy"]["value"] )
__lowercase = list(result_metrics_df[result_metrics_df.metric_name == "eval_loss"]["value"] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
__lowercase = (
Session().describe_training_job(estimator.latest_training_job.name ).get("TrainingTimeInSeconds", 9_9_9_9_9_9 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["eval_accuracy"] for t in eval_accuracy )
assert all(t <= self.results["eval_loss"] for t in eval_loss )
# dump tests result into json file to share in PR
with open(F"""{estimator.latest_training_job.name}.json""", "w" ) as outfile:
json.dump({"train_time": train_runtime, "eval_accuracy": eval_accuracy, "eval_loss": eval_loss}, UpperCAmelCase__ )
| 17 | 0 |
import argparse
import os
import platform
import numpy as np
import psutil
import torch
from accelerate import __version__ as version
from accelerate.commands.config import default_config_file, load_config_from_file
from ..utils import is_npu_available, is_xpu_available
def UpperCamelCase ( __lowerCamelCase : Tuple=None ):
if subparsers is not None:
snake_case : Tuple = subparsers.add_parser("env" )
else:
snake_case : List[Any] = argparse.ArgumentParser("Accelerate env command" )
parser.add_argument(
"--config_file" , default=UpperCamelCase_ , help="The config file to use for the default values in the launching script." )
if subparsers is not None:
parser.set_defaults(func=UpperCamelCase_ )
return parser
def UpperCamelCase ( __lowerCamelCase : Union[str, Any] ):
snake_case : str = torch.__version__
snake_case : Union[str, Any] = torch.cuda.is_available()
snake_case : Any = is_xpu_available()
snake_case : int = is_npu_available()
snake_case : str = "Not found"
# Get the default from the config file.
if args.config_file is not None or os.path.isfile(UpperCamelCase_ ):
snake_case : Any = load_config_from_file(args.config_file ).to_dict()
snake_case : Tuple = {
"`Accelerate` version": version,
"Platform": platform.platform(),
"Python version": platform.python_version(),
"Numpy version": np.__version__,
"PyTorch version (GPU?)": f"""{pt_version} ({pt_cuda_available})""",
"PyTorch XPU available": str(UpperCamelCase_ ),
"PyTorch NPU available": str(UpperCamelCase_ ),
"System RAM": f"""{psutil.virtual_memory().total / 1024 ** 3:.2f} GB""",
}
if pt_cuda_available:
snake_case : int = torch.cuda.get_device_name()
print("\nCopy-and-paste the text below in your GitHub issue\n" )
print("\n".join([f"""- {prop}: {val}""" for prop, val in info.items()] ) )
print("- `Accelerate` default config:" if args.config_file is None else "- `Accelerate` config passed:" )
snake_case : Tuple = (
"\n".join([f"""\t- {prop}: {val}""" for prop, val in accelerate_config.items()] )
if isinstance(UpperCamelCase_ , UpperCamelCase_ )
else f"""\t{accelerate_config}"""
)
print(UpperCamelCase_ )
snake_case : Union[str, Any] = accelerate_config
return info
def UpperCamelCase ( ):
snake_case : Tuple = env_command_parser()
snake_case : List[Any] = parser.parse_args()
env_command(UpperCamelCase_ )
return 0
if __name__ == "__main__":
raise SystemExit(main())
| 59 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor
from .base import PipelineTool
class _lowerCAmelCase ( lowercase ):
"""simple docstring"""
__UpperCAmelCase : Tuple = "openai/whisper-base"
__UpperCAmelCase : Union[str, Any] = (
"This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the "
"transcribed text."
)
__UpperCAmelCase : List[str] = "transcriber"
__UpperCAmelCase : Optional[Any] = WhisperProcessor
__UpperCAmelCase : str = WhisperForConditionalGeneration
__UpperCAmelCase : List[str] = ["audio"]
__UpperCAmelCase : Tuple = ["text"]
def _lowercase ( self : str, UpperCAmelCase__ : int ):
return self.pre_processor(UpperCAmelCase__, return_tensors="pt" ).input_features
def _lowercase ( self : Union[str, Any], UpperCAmelCase__ : Optional[Any] ):
return self.model.generate(inputs=UpperCAmelCase__ )
def _lowercase ( self : Dict, UpperCAmelCase__ : Optional[int] ):
return self.pre_processor.batch_decode(UpperCAmelCase__, skip_special_tokens=UpperCAmelCase__ )[0]
| 17 | 0 |
def A_ ( _UpperCAmelCase ):
if not numbers:
return 0
if not isinstance(UpperCamelCase_ , (list, tuple) ) or not all(
isinstance(UpperCamelCase_ , UpperCamelCase_ ) for number in numbers ):
raise ValueError("numbers must be an iterable of integers" )
SCREAMING_SNAKE_CASE_: int = numbers[0]
for i in range(1 , len(UpperCamelCase_ ) ):
# update the maximum and minimum subarray products
SCREAMING_SNAKE_CASE_: Dict = numbers[i]
if number < 0:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: str = min_till_now, max_till_now
SCREAMING_SNAKE_CASE_: List[Any] = max(UpperCamelCase_ , max_till_now * number )
SCREAMING_SNAKE_CASE_: Optional[int] = min(UpperCamelCase_ , min_till_now * number )
# update the maximum product found till now
SCREAMING_SNAKE_CASE_: List[str] = max(UpperCamelCase_ , UpperCamelCase_ )
return max_prod
| 13 |
"""simple docstring"""
import inspect
from typing import Optional, Union
import numpy as np
import PIL
import torch
from torch.nn import functional as F
from torchvision import transforms
from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
DPMSolverMultistepScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.utils import (
PIL_INTERPOLATION,
randn_tensor,
)
def _A ( UpperCamelCase_ : Union[str, Any], UpperCamelCase_ : Union[str, Any], UpperCamelCase_ : List[str]) -> Optional[int]:
'''simple docstring'''
if isinstance(UpperCamelCase_, torch.Tensor):
return image
elif isinstance(UpperCamelCase_, PIL.Image.Image):
__lowercase = [image]
if isinstance(image[0], PIL.Image.Image):
__lowercase = [np.array(i.resize((w, h), resample=PIL_INTERPOLATION["lanczos"]))[None, :] for i in image]
__lowercase = np.concatenate(UpperCamelCase_, axis=0)
__lowercase = np.array(UpperCamelCase_).astype(np.floataa) / 255.0
__lowercase = image.transpose(0, 3, 1, 2)
__lowercase = 2.0 * image - 1.0
__lowercase = torch.from_numpy(UpperCamelCase_)
elif isinstance(image[0], torch.Tensor):
__lowercase = torch.cat(UpperCamelCase_, dim=0)
return image
def _A ( UpperCamelCase_ : Dict, UpperCamelCase_ : str, UpperCamelCase_ : Union[str, Any], UpperCamelCase_ : List[Any]=0.9_995) -> int:
'''simple docstring'''
if not isinstance(UpperCamelCase_, np.ndarray):
__lowercase = True
__lowercase = va.device
__lowercase = va.cpu().numpy()
__lowercase = va.cpu().numpy()
__lowercase = np.sum(va * va / (np.linalg.norm(UpperCamelCase_) * np.linalg.norm(UpperCamelCase_)))
if np.abs(UpperCamelCase_) > DOT_THRESHOLD:
__lowercase = (1 - t) * va + t * va
else:
__lowercase = np.arccos(UpperCamelCase_)
__lowercase = np.sin(UpperCamelCase_)
__lowercase = theta_a * t
__lowercase = np.sin(UpperCamelCase_)
__lowercase = np.sin(theta_a - theta_t) / sin_theta_a
__lowercase = sin_theta_t / sin_theta_a
__lowercase = sa * va + sa * va
if inputs_are_torch:
__lowercase = torch.from_numpy(UpperCamelCase_).to(UpperCamelCase_)
return va
def _A ( UpperCamelCase_ : List[str], UpperCamelCase_ : Union[str, Any]) -> int:
'''simple docstring'''
__lowercase = F.normalize(UpperCamelCase_, dim=-1)
__lowercase = F.normalize(UpperCamelCase_, dim=-1)
return (x - y).norm(dim=-1).div(2).arcsin().pow(2).mul(2)
def _A ( UpperCamelCase_ : Optional[int], UpperCamelCase_ : str) -> Optional[int]:
'''simple docstring'''
for param in model.parameters():
__lowercase = value
class _lowerCAmelCase ( lowercase ):
"""simple docstring"""
def __init__( self : Dict, UpperCAmelCase__ : AutoencoderKL, UpperCAmelCase__ : CLIPTextModel, UpperCAmelCase__ : CLIPModel, UpperCAmelCase__ : CLIPTokenizer, UpperCAmelCase__ : UNetaDConditionModel, UpperCAmelCase__ : Union[PNDMScheduler, LMSDiscreteScheduler, DDIMScheduler, DPMSolverMultistepScheduler], UpperCAmelCase__ : CLIPFeatureExtractor, UpperCAmelCase__ : Union[str, Any]=None, UpperCAmelCase__ : List[str]=None, UpperCAmelCase__ : Any=None, ):
super().__init__()
self.register_modules(
vae=UpperCAmelCase__, text_encoder=UpperCAmelCase__, clip_model=UpperCAmelCase__, tokenizer=UpperCAmelCase__, unet=UpperCAmelCase__, scheduler=UpperCAmelCase__, feature_extractor=UpperCAmelCase__, coca_model=UpperCAmelCase__, coca_tokenizer=UpperCAmelCase__, coca_transform=UpperCAmelCase__, )
__lowercase = (
feature_extractor.size
if isinstance(feature_extractor.size, UpperCAmelCase__ )
else feature_extractor.size["shortest_edge"]
)
__lowercase = transforms.Normalize(mean=feature_extractor.image_mean, std=feature_extractor.image_std )
set_requires_grad(self.text_encoder, UpperCAmelCase__ )
set_requires_grad(self.clip_model, UpperCAmelCase__ )
def _lowercase ( self : Tuple, UpperCAmelCase__ : Optional[Union[str, int]] = "auto" ):
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
__lowercase = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(UpperCAmelCase__ )
def _lowercase ( self : int ):
self.enable_attention_slicing(UpperCAmelCase__ )
def _lowercase ( self : str ):
set_requires_grad(self.vae, UpperCAmelCase__ )
def _lowercase ( self : Any ):
set_requires_grad(self.vae, UpperCAmelCase__ )
def _lowercase ( self : Union[str, Any] ):
set_requires_grad(self.unet, UpperCAmelCase__ )
def _lowercase ( self : Any ):
set_requires_grad(self.unet, UpperCAmelCase__ )
def _lowercase ( self : List[str], UpperCAmelCase__ : Dict, UpperCAmelCase__ : Any, UpperCAmelCase__ : Optional[Any] ):
# get the original timestep using init_timestep
__lowercase = min(int(num_inference_steps * strength ), UpperCAmelCase__ )
__lowercase = max(num_inference_steps - init_timestep, 0 )
__lowercase = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def _lowercase ( self : List[str], UpperCAmelCase__ : Union[str, Any], UpperCAmelCase__ : Union[str, Any], UpperCAmelCase__ : Optional[Any], UpperCAmelCase__ : Dict, UpperCAmelCase__ : Any, UpperCAmelCase__ : int=None ):
if not isinstance(UpperCAmelCase__, torch.Tensor ):
raise ValueError(F"""`image` has to be of type `torch.Tensor` but is {type(UpperCAmelCase__ )}""" )
__lowercase = image.to(device=UpperCAmelCase__, dtype=UpperCAmelCase__ )
if isinstance(UpperCAmelCase__, UpperCAmelCase__ ):
__lowercase = [
self.vae.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(UpperCAmelCase__ )
]
__lowercase = torch.cat(UpperCAmelCase__, dim=0 )
else:
__lowercase = self.vae.encode(UpperCAmelCase__ ).latent_dist.sample(UpperCAmelCase__ )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
__lowercase = 0.18_215 * init_latents
__lowercase = init_latents.repeat_interleave(UpperCAmelCase__, dim=0 )
__lowercase = randn_tensor(init_latents.shape, generator=UpperCAmelCase__, device=UpperCAmelCase__, dtype=UpperCAmelCase__ )
# get latents
__lowercase = self.scheduler.add_noise(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
__lowercase = init_latents
return latents
def _lowercase ( self : Optional[int], UpperCAmelCase__ : Dict ):
__lowercase = self.coca_transform(UpperCAmelCase__ ).unsqueeze(0 )
with torch.no_grad(), torch.cuda.amp.autocast():
__lowercase = self.coca_model.generate(transformed_image.to(device=self.device, dtype=self.coca_model.dtype ) )
__lowercase = self.coca_tokenizer.decode(generated[0].cpu().numpy() )
return generated.split("<end_of_text>" )[0].replace("<start_of_text>", "" ).rstrip(" .," )
def _lowercase ( self : Tuple, UpperCAmelCase__ : Union[str, Any], UpperCAmelCase__ : Tuple ):
__lowercase = self.feature_extractor.preprocess(UpperCAmelCase__ )
__lowercase = torch.from_numpy(clip_image_input["pixel_values"][0] ).unsqueeze(0 ).to(self.device ).half()
__lowercase = self.clip_model.get_image_features(UpperCAmelCase__ )
__lowercase = image_embeddings_clip / image_embeddings_clip.norm(p=2, dim=-1, keepdim=UpperCAmelCase__ )
__lowercase = image_embeddings_clip.repeat_interleave(UpperCAmelCase__, dim=0 )
return image_embeddings_clip
@torch.enable_grad()
def _lowercase ( self : str, UpperCAmelCase__ : List[Any], UpperCAmelCase__ : Dict, UpperCAmelCase__ : List[str], UpperCAmelCase__ : Dict, UpperCAmelCase__ : List[str], UpperCAmelCase__ : Union[str, Any], UpperCAmelCase__ : Optional[int], ):
__lowercase = latents.detach().requires_grad_()
__lowercase = self.scheduler.scale_model_input(UpperCAmelCase__, UpperCAmelCase__ )
# predict the noise residual
__lowercase = self.unet(UpperCAmelCase__, UpperCAmelCase__, encoder_hidden_states=UpperCAmelCase__ ).sample
if isinstance(self.scheduler, (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler) ):
__lowercase = self.scheduler.alphas_cumprod[timestep]
__lowercase = 1 - alpha_prod_t
# compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
__lowercase = (latents - beta_prod_t ** 0.5 * noise_pred) / alpha_prod_t ** 0.5
__lowercase = torch.sqrt(UpperCAmelCase__ )
__lowercase = pred_original_sample * (fac) + latents * (1 - fac)
elif isinstance(self.scheduler, UpperCAmelCase__ ):
__lowercase = self.scheduler.sigmas[index]
__lowercase = latents - sigma * noise_pred
else:
raise ValueError(F"""scheduler type {type(self.scheduler )} not supported""" )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
__lowercase = 1 / 0.18_215 * sample
__lowercase = self.vae.decode(UpperCAmelCase__ ).sample
__lowercase = (image / 2 + 0.5).clamp(0, 1 )
__lowercase = transforms.Resize(self.feature_extractor_size )(UpperCAmelCase__ )
__lowercase = self.normalize(UpperCAmelCase__ ).to(latents.dtype )
__lowercase = self.clip_model.get_image_features(UpperCAmelCase__ )
__lowercase = image_embeddings_clip / image_embeddings_clip.norm(p=2, dim=-1, keepdim=UpperCAmelCase__ )
__lowercase = spherical_dist_loss(UpperCAmelCase__, UpperCAmelCase__ ).mean() * clip_guidance_scale
__lowercase = -torch.autograd.grad(UpperCAmelCase__, UpperCAmelCase__ )[0]
if isinstance(self.scheduler, UpperCAmelCase__ ):
__lowercase = latents.detach() + grads * (sigma**2)
__lowercase = noise_pred_original
else:
__lowercase = noise_pred_original - torch.sqrt(UpperCAmelCase__ ) * grads
return noise_pred, latents
@torch.no_grad()
def __call__( self : str, UpperCAmelCase__ : Union[torch.FloatTensor, PIL.Image.Image], UpperCAmelCase__ : Union[torch.FloatTensor, PIL.Image.Image], UpperCAmelCase__ : Optional[str] = None, UpperCAmelCase__ : Optional[str] = None, UpperCAmelCase__ : Optional[int] = 5_1_2, UpperCAmelCase__ : Optional[int] = 5_1_2, UpperCAmelCase__ : float = 0.6, UpperCAmelCase__ : Optional[int] = 5_0, UpperCAmelCase__ : Optional[float] = 7.5, UpperCAmelCase__ : Optional[int] = 1, UpperCAmelCase__ : float = 0.0, UpperCAmelCase__ : Optional[float] = 1_0_0, UpperCAmelCase__ : Optional[torch.Generator] = None, UpperCAmelCase__ : Optional[str] = "pil", UpperCAmelCase__ : bool = True, UpperCAmelCase__ : float = 0.8, UpperCAmelCase__ : float = 0.1, UpperCAmelCase__ : float = 0.1, ):
if isinstance(UpperCAmelCase__, UpperCAmelCase__ ) and len(UpperCAmelCase__ ) != batch_size:
raise ValueError(F"""You have passed {batch_size} batch_size, but only {len(UpperCAmelCase__ )} generators.""" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"""`height` and `width` have to be divisible by 8 but are {height} and {width}.""" )
if isinstance(UpperCAmelCase__, torch.Generator ) and batch_size > 1:
__lowercase = [generator] + [None] * (batch_size - 1)
__lowercase = [
("model", self.coca_model is None),
("tokenizer", self.coca_tokenizer is None),
("transform", self.coca_transform is None),
]
__lowercase = [x[0] for x in coca_is_none if x[1]]
__lowercase = ", ".join(UpperCAmelCase__ )
# generate prompts with coca model if prompt is None
if content_prompt is None:
if len(UpperCAmelCase__ ):
raise ValueError(
F"""Content prompt is None and CoCa [{coca_is_none_str}] is None."""
F"""Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.""" )
__lowercase = self.get_image_description(UpperCAmelCase__ )
if style_prompt is None:
if len(UpperCAmelCase__ ):
raise ValueError(
F"""Style prompt is None and CoCa [{coca_is_none_str}] is None."""
F""" Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.""" )
__lowercase = self.get_image_description(UpperCAmelCase__ )
# get prompt text embeddings for content and style
__lowercase = self.tokenizer(
UpperCAmelCase__, padding="max_length", max_length=self.tokenizer.model_max_length, truncation=UpperCAmelCase__, return_tensors="pt", )
__lowercase = self.text_encoder(content_text_input.input_ids.to(self.device ) )[0]
__lowercase = self.tokenizer(
UpperCAmelCase__, padding="max_length", max_length=self.tokenizer.model_max_length, truncation=UpperCAmelCase__, return_tensors="pt", )
__lowercase = self.text_encoder(style_text_input.input_ids.to(self.device ) )[0]
__lowercase = slerp(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
# duplicate text embeddings for each generation per prompt
__lowercase = text_embeddings.repeat_interleave(UpperCAmelCase__, dim=0 )
# set timesteps
__lowercase = "offset" in set(inspect.signature(self.scheduler.set_timesteps ).parameters.keys() )
__lowercase = {}
if accepts_offset:
__lowercase = 1
self.scheduler.set_timesteps(UpperCAmelCase__, **UpperCAmelCase__ )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
self.scheduler.timesteps.to(self.device )
__lowercase ,__lowercase = self.get_timesteps(UpperCAmelCase__, UpperCAmelCase__, self.device )
__lowercase = timesteps[:1].repeat(UpperCAmelCase__ )
# Preprocess image
__lowercase = preprocess(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
__lowercase = self.prepare_latents(
UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, text_embeddings.dtype, self.device, UpperCAmelCase__ )
__lowercase = preprocess(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
__lowercase = self.prepare_latents(
UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, text_embeddings.dtype, self.device, UpperCAmelCase__ )
__lowercase = slerp(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
if clip_guidance_scale > 0:
__lowercase = self.get_clip_image_embeddings(UpperCAmelCase__, UpperCAmelCase__ )
__lowercase = self.get_clip_image_embeddings(UpperCAmelCase__, UpperCAmelCase__ )
__lowercase = slerp(
UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
__lowercase = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
__lowercase = content_text_input.input_ids.shape[-1]
__lowercase = self.tokenizer([""], padding="max_length", max_length=UpperCAmelCase__, return_tensors="pt" )
__lowercase = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt
__lowercase = uncond_embeddings.repeat_interleave(UpperCAmelCase__, dim=0 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
__lowercase = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
__lowercase = (batch_size, self.unet.config.in_channels, height // 8, width // 8)
__lowercase = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not work reproducibly on mps
__lowercase = torch.randn(UpperCAmelCase__, generator=UpperCAmelCase__, device="cpu", dtype=UpperCAmelCase__ ).to(
self.device )
else:
__lowercase = torch.randn(UpperCAmelCase__, generator=UpperCAmelCase__, device=self.device, dtype=UpperCAmelCase__ )
else:
if latents.shape != latents_shape:
raise ValueError(F"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" )
__lowercase = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
__lowercase = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
__lowercase = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
__lowercase = {}
if accepts_eta:
__lowercase = eta
# check if the scheduler accepts generator
__lowercase = "generator" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
if accepts_generator:
__lowercase = generator
with self.progress_bar(total=UpperCAmelCase__ ):
for i, t in enumerate(UpperCAmelCase__ ):
# expand the latents if we are doing classifier free guidance
__lowercase = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
__lowercase = self.scheduler.scale_model_input(UpperCAmelCase__, UpperCAmelCase__ )
# predict the noise residual
__lowercase = self.unet(UpperCAmelCase__, UpperCAmelCase__, encoder_hidden_states=UpperCAmelCase__ ).sample
# perform classifier free guidance
if do_classifier_free_guidance:
__lowercase ,__lowercase = noise_pred.chunk(2 )
__lowercase = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# perform clip guidance
if clip_guidance_scale > 0:
__lowercase = (
text_embeddings.chunk(2 )[1] if do_classifier_free_guidance else text_embeddings
)
__lowercase ,__lowercase = self.cond_fn(
UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, )
# compute the previous noisy sample x_t -> x_t-1
__lowercase = self.scheduler.step(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, **UpperCAmelCase__ ).prev_sample
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
__lowercase = 1 / 0.18_215 * latents
__lowercase = self.vae.decode(UpperCAmelCase__ ).sample
__lowercase = (image / 2 + 0.5).clamp(0, 1 )
__lowercase = image.cpu().permute(0, 2, 3, 1 ).numpy()
if output_type == "pil":
__lowercase = self.numpy_to_pil(UpperCAmelCase__ )
if not return_dict:
return (image, None)
return StableDiffusionPipelineOutput(images=UpperCAmelCase__, nsfw_content_detected=UpperCAmelCase__ )
| 17 | 0 |
import os
import unittest
from huggingface_hub.utils import are_progress_bars_disabled
import transformers.models.bart.tokenization_bart
from transformers import logging
from transformers.testing_utils import CaptureLogger, mockenv, mockenv_context
from transformers.utils.logging import disable_progress_bar, enable_progress_bar
class __A ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : List[str] =logging.get_logger()
# the current default level is logging.WARNING
__UpperCamelCase : Optional[int] =logging.get_verbosity()
logging.set_verbosity_error()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_warning()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_info()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_debug()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
# restore to the original level
logging.set_verbosity(UpperCAmelCase__ )
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : List[str] =logging.get_verbosity()
__UpperCamelCase : Any =logging.get_logger('transformers.models.bart.tokenization_bart' )
__UpperCamelCase : Dict ='Testing 1, 2, 3'
# should be able to log warnings (if default settings weren't overridden by `pytest --log-level-all`)
if level_origin <= logging.WARNING:
with CaptureLogger(UpperCAmelCase__ ) as cl:
logger.warning(UpperCAmelCase__ )
self.assertEqual(cl.out , msg + '\n' )
# this is setting the level for all of `transformers.*` loggers
logging.set_verbosity_error()
# should not be able to log warnings
with CaptureLogger(UpperCAmelCase__ ) as cl:
logger.warning(UpperCAmelCase__ )
self.assertEqual(cl.out , '' )
# should be able to log warnings again
logging.set_verbosity_warning()
with CaptureLogger(UpperCAmelCase__ ) as cl:
logger.warning(UpperCAmelCase__ )
self.assertEqual(cl.out , msg + '\n' )
# restore to the original level
logging.set_verbosity(UpperCAmelCase__ )
@mockenv(TRANSFORMERS_VERBOSITY='error' )
def __lowercase ( self ):
"""simple docstring"""
transformers.utils.logging._reset_library_root_logger()
# this action activates the env var
__UpperCamelCase : str =logging.get_logger('transformers.models.bart.tokenization_bart' )
__UpperCamelCase : Optional[int] =os.getenv('TRANSFORMERS_VERBOSITY' , UpperCAmelCase__ )
__UpperCamelCase : List[str] =logging.log_levels[env_level_str]
__UpperCamelCase : Dict =logging.get_verbosity()
self.assertEqual(
UpperCAmelCase__ , UpperCAmelCase__ , f'TRANSFORMERS_VERBOSITY={env_level_str}/{env_level}, but internal verbosity is {current_level}' , )
# restore to the original level
__UpperCamelCase : Dict =''
transformers.utils.logging._reset_library_root_logger()
@mockenv(TRANSFORMERS_VERBOSITY='super-error' )
def __lowercase ( self ):
"""simple docstring"""
transformers.utils.logging._reset_library_root_logger()
__UpperCamelCase : Tuple =logging.logging.getLogger()
with CaptureLogger(UpperCAmelCase__ ) as cl:
# this action activates the env var
logging.get_logger('transformers.models.bart.tokenization_bart' )
self.assertIn('Unknown option TRANSFORMERS_VERBOSITY=super-error' , cl.out )
# no need to restore as nothing was changed
def __lowercase ( self ):
"""simple docstring"""
transformers.utils.logging._reset_library_root_logger()
__UpperCamelCase : int =logging.get_logger('transformers.models.bart.tokenization_bart' )
__UpperCamelCase : int ='Testing 1, 2, 3'
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS='1' ):
# nothing should be logged as env var disables this method
with CaptureLogger(UpperCAmelCase__ ) as cl:
logger.warning_advice(UpperCAmelCase__ )
self.assertEqual(cl.out , '' )
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS='' ):
# should log normally as TRANSFORMERS_NO_ADVISORY_WARNINGS is unset
with CaptureLogger(UpperCAmelCase__ ) as cl:
logger.warning_advice(UpperCAmelCase__ )
self.assertEqual(cl.out , msg + '\n' )
def A ( ) -> int:
disable_progress_bar()
assert are_progress_bars_disabled()
enable_progress_bar()
assert not are_progress_bars_disabled()
| 71 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import XGLMConfig, XGLMTokenizer, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.xglm.modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
)
@require_tf
class _lowerCAmelCase :
"""simple docstring"""
__UpperCAmelCase : Tuple = XGLMConfig
__UpperCAmelCase : Optional[Any] = {}
__UpperCAmelCase : Union[str, Any] = "gelu"
def __init__( self : Optional[int], UpperCAmelCase__ : List[str], UpperCAmelCase__ : Optional[int]=1_4, UpperCAmelCase__ : str=7, UpperCAmelCase__ : Optional[Any]=True, UpperCAmelCase__ : List[Any]=True, UpperCAmelCase__ : int=True, UpperCAmelCase__ : List[str]=9_9, UpperCAmelCase__ : Union[str, Any]=3_2, UpperCAmelCase__ : Union[str, Any]=2, UpperCAmelCase__ : Union[str, Any]=4, UpperCAmelCase__ : Tuple=3_7, UpperCAmelCase__ : List[Any]="gelu", UpperCAmelCase__ : List[str]=0.1, UpperCAmelCase__ : Optional[int]=0.1, UpperCAmelCase__ : Tuple=5_1_2, UpperCAmelCase__ : Optional[Any]=0.02, ):
__lowercase = parent
__lowercase = batch_size
__lowercase = seq_length
__lowercase = is_training
__lowercase = use_input_mask
__lowercase = use_labels
__lowercase = vocab_size
__lowercase = d_model
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = ffn_dim
__lowercase = activation_function
__lowercase = activation_dropout
__lowercase = attention_dropout
__lowercase = max_position_embeddings
__lowercase = initializer_range
__lowercase = None
__lowercase = 0
__lowercase = 2
__lowercase = 1
def _lowercase ( self : Union[str, Any] ):
return XGLMConfig.from_pretrained("facebook/xglm-564M" )
def _lowercase ( self : Tuple ):
__lowercase = tf.clip_by_value(
ids_tensor([self.batch_size, self.seq_length], self.vocab_size ), clip_value_min=0, clip_value_max=3 )
__lowercase = None
if self.use_input_mask:
__lowercase = random_attention_mask([self.batch_size, self.seq_length] )
__lowercase = self.get_config()
__lowercase = floats_tensor([self.num_hidden_layers, self.num_attention_heads], 2 )
return (
config,
input_ids,
input_mask,
head_mask,
)
def _lowercase ( self : List[Any] ):
return XGLMConfig(
vocab_size=self.vocab_size, d_model=self.hidden_size, num_layers=self.num_hidden_layers, attention_heads=self.num_attention_heads, ffn_dim=self.ffn_dim, activation_function=self.activation_function, activation_dropout=self.activation_dropout, attention_dropout=self.attention_dropout, max_position_embeddings=self.max_position_embeddings, initializer_range=self.initializer_range, use_cache=UpperCAmelCase__, bos_token_id=self.bos_token_id, eos_token_id=self.eos_token_id, pad_token_id=self.pad_token_id, return_dict=UpperCAmelCase__, )
def _lowercase ( self : Dict ):
__lowercase = self.prepare_config_and_inputs()
(
(
__lowercase
) ,(
__lowercase
) ,(
__lowercase
) ,(
__lowercase
) ,
) = config_and_inputs
__lowercase = {
"input_ids": input_ids,
"head_mask": head_mask,
}
return config, inputs_dict
@require_tf
class _lowerCAmelCase ( lowercase ,lowercase ,unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else ()
__UpperCAmelCase : List[str] = (TFXGLMForCausalLM,) if is_tf_available() else ()
__UpperCAmelCase : Any = (
{"feature-extraction": TFXGLMModel, "text-generation": TFXGLMForCausalLM} if is_tf_available() else {}
)
__UpperCAmelCase : Optional[Any] = False
__UpperCAmelCase : List[str] = False
__UpperCAmelCase : int = False
def _lowercase ( self : Optional[Any] ):
__lowercase = TFXGLMModelTester(self )
__lowercase = ConfigTester(self, config_class=UpperCAmelCase__, n_embd=3_7 )
def _lowercase ( self : Any ):
self.config_tester.run_common_tests()
@slow
def _lowercase ( self : List[str] ):
for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase = TFXGLMModel.from_pretrained(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
@unittest.skip(reason="Currently, model embeddings are going to undergo a major refactor." )
def _lowercase ( self : int ):
super().test_resize_token_embeddings()
@require_tf
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def _lowercase ( self : Dict, UpperCAmelCase__ : Optional[int]=True ):
__lowercase = TFXGLMForCausalLM.from_pretrained("facebook/xglm-564M" )
__lowercase = tf.convert_to_tensor([[2, 2_6_8, 9_8_6_5]], dtype=tf.intaa ) # The dog
# </s> The dog is a very friendly dog. He is very affectionate and loves to play with other
# fmt: off
__lowercase = [2, 2_6_8, 9_8_6_5, 6_7, 1_1, 1_9_8_8, 5_7_2_5_2, 9_8_6_5, 5, 9_8_4, 6_7, 1_9_8_8, 2_1_3_8_3_8, 1_6_5_8, 5_3, 7_0_4_4_6, 3_3, 6_6_5_7, 2_7_8, 1_5_8_1]
# fmt: on
__lowercase = model.generate(UpperCAmelCase__, do_sample=UpperCAmelCase__, num_beams=1 )
if verify_outputs:
self.assertListEqual(output_ids[0].numpy().tolist(), UpperCAmelCase__ )
@slow
def _lowercase ( self : List[Any] ):
__lowercase = XGLMTokenizer.from_pretrained("facebook/xglm-564M" )
__lowercase = TFXGLMForCausalLM.from_pretrained("facebook/xglm-564M" )
tf.random.set_seed(0 )
__lowercase = tokenizer("Today is a nice day and", return_tensors="tf" )
__lowercase = tokenized.input_ids
# forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices)
with tf.device(":/CPU:0" ):
__lowercase = model.generate(UpperCAmelCase__, do_sample=UpperCAmelCase__, seed=[7, 0] )
__lowercase = tokenizer.decode(output_ids[0], skip_special_tokens=UpperCAmelCase__ )
__lowercase = (
"Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due"
)
self.assertEqual(UpperCAmelCase__, UpperCAmelCase__ )
@slow
def _lowercase ( self : Dict ):
__lowercase = TFXGLMForCausalLM.from_pretrained("facebook/xglm-564M" )
__lowercase = XGLMTokenizer.from_pretrained("facebook/xglm-564M" )
__lowercase = "left"
# use different length sentences to test batching
__lowercase = [
"This is an extremelly long sentence that only exists to test the ability of the model to cope with "
"left-padding, such as in batched generation. The output for the sequence below should be the same "
"regardless of whether left padding is applied or not. When",
"Hello, my dog is a little",
]
__lowercase = tokenizer(UpperCAmelCase__, return_tensors="tf", padding=UpperCAmelCase__ )
__lowercase = inputs["input_ids"]
__lowercase = model.generate(input_ids=UpperCAmelCase__, attention_mask=inputs["attention_mask"], max_new_tokens=1_2 )
__lowercase = tokenizer(sentences[0], return_tensors="tf" ).input_ids
__lowercase = model.generate(input_ids=UpperCAmelCase__, max_new_tokens=1_2 )
__lowercase = tokenizer(sentences[1], return_tensors="tf" ).input_ids
__lowercase = model.generate(input_ids=UpperCAmelCase__, max_new_tokens=1_2 )
__lowercase = tokenizer.batch_decode(UpperCAmelCase__, skip_special_tokens=UpperCAmelCase__ )
__lowercase = tokenizer.decode(output_non_padded[0], skip_special_tokens=UpperCAmelCase__ )
__lowercase = tokenizer.decode(output_padded[0], skip_special_tokens=UpperCAmelCase__ )
__lowercase = [
"This is an extremelly long sentence that only exists to test the ability of the model to cope with "
"left-padding, such as in batched generation. The output for the sequence below should be the same "
"regardless of whether left padding is applied or not. When left padding is applied, the sequence will be "
"a single",
"Hello, my dog is a little bit of a shy one, but he is very friendly",
]
self.assertListEqual(UpperCAmelCase__, UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__, [non_padded_sentence, padded_sentence] )
| 17 | 0 |
'''simple docstring'''
def lowercase_ ( lowerCAmelCase__ : int ):
"""simple docstring"""
return sum(i for i in range(1 , number // 2 + 1 ) if number % i == 0 ) == number
if __name__ == "__main__":
print('''Program to check whether a number is a Perfect number or not...''')
_UpperCamelCase = int(input('''Enter number: ''').strip())
print(F'{number} is {"" if perfect(number) else "not "}a Perfect Number.')
| 254 |
"""simple docstring"""
import time
from contextlib import contextmanager
from pathlib import Path
import pytest
import requests
from huggingface_hub.hf_api import HfApi, HfFolder
_a = '__DUMMY_TRANSFORMERS_USER__'
_a = 'Dummy User'
_a = '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'
_a = 'https://hub-ci.huggingface.co'
_a = CI_HUB_ENDPOINT + '/datasets/{repo_id}/resolve/{revision}/{path}'
_a = CI_HUB_ENDPOINT + '/{repo_id}/resolve/{revision}/{filename}'
_a = Path('~/.huggingface/hub_ci_token').expanduser()
@pytest.fixture
def _A ( UpperCamelCase_ : List[Any]) -> Tuple:
'''simple docstring'''
monkeypatch.setattr(
"huggingface_hub.file_download.HUGGINGFACE_CO_URL_TEMPLATE", UpperCamelCase_)
@pytest.fixture
def _A ( UpperCamelCase_ : int) -> List[Any]:
'''simple docstring'''
monkeypatch.setattr("datasets.config.HF_ENDPOINT", UpperCamelCase_)
monkeypatch.setattr("datasets.config.HUB_DATASETS_URL", UpperCamelCase_)
@pytest.fixture
def _A ( UpperCamelCase_ : str) -> Dict:
'''simple docstring'''
monkeypatch.setattr("huggingface_hub.hf_api.HfFolder.path_token", UpperCamelCase_)
@pytest.fixture
def _A ( UpperCamelCase_ : Optional[Any], UpperCamelCase_ : List[Any]) -> List[str]:
'''simple docstring'''
HfFolder.save_token(UpperCamelCase_)
yield
HfFolder.delete_token()
@pytest.fixture(scope="session")
def _A ( ) -> List[Any]:
'''simple docstring'''
return HfApi(endpoint=UpperCamelCase_)
@pytest.fixture(scope="session")
def _A ( UpperCamelCase_ : HfApi) -> List[Any]:
'''simple docstring'''
__lowercase = HfFolder.get_token()
HfFolder.save_token(UpperCamelCase_)
yield CI_HUB_USER_TOKEN
if previous_token is not None:
HfFolder.save_token(UpperCamelCase_)
@pytest.fixture
def _A ( UpperCamelCase_ : Dict) -> int:
'''simple docstring'''
def _cleanup_repo(UpperCamelCase_ : Optional[int]):
hf_api.delete_repo(UpperCamelCase_, token=UpperCamelCase_, repo_type="dataset")
return _cleanup_repo
@pytest.fixture
def _A ( UpperCamelCase_ : str) -> Any:
'''simple docstring'''
@contextmanager
def _temporary_repo(UpperCamelCase_ : Any):
try:
yield repo_id
finally:
cleanup_repo(UpperCamelCase_)
return _temporary_repo
@pytest.fixture(scope="session")
def _A ( UpperCamelCase_ : HfApi, UpperCamelCase_ : str, UpperCamelCase_ : Optional[int]) -> List[Any]:
'''simple docstring'''
__lowercase = F"""repo_txt_data-{int(time.time() * 10E3)}"""
__lowercase = F"""{CI_HUB_USER}/{repo_name}"""
hf_api.create_repo(UpperCamelCase_, token=UpperCamelCase_, repo_type="dataset", private=UpperCamelCase_)
hf_api.upload_file(
token=UpperCamelCase_, path_or_fileobj=str(UpperCamelCase_), path_in_repo="data/text_data.txt", repo_id=UpperCamelCase_, repo_type="dataset", )
yield repo_id
try:
hf_api.delete_repo(UpperCamelCase_, token=UpperCamelCase_, repo_type="dataset")
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def _A ( UpperCamelCase_ : Tuple, UpperCamelCase_ : Any, UpperCamelCase_ : Dict) -> Optional[int]:
'''simple docstring'''
return hf_private_dataset_repo_txt_data_
@pytest.fixture(scope="session")
def _A ( UpperCamelCase_ : HfApi, UpperCamelCase_ : int, UpperCamelCase_ : Optional[int]) -> int:
'''simple docstring'''
__lowercase = F"""repo_zipped_txt_data-{int(time.time() * 10E3)}"""
__lowercase = F"""{CI_HUB_USER}/{repo_name}"""
hf_api.create_repo(UpperCamelCase_, token=UpperCamelCase_, repo_type="dataset", private=UpperCamelCase_)
hf_api.upload_file(
token=UpperCamelCase_, path_or_fileobj=str(UpperCamelCase_), path_in_repo="data.zip", repo_id=UpperCamelCase_, repo_type="dataset", )
yield repo_id
try:
hf_api.delete_repo(UpperCamelCase_, token=UpperCamelCase_, repo_type="dataset")
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def _A ( UpperCamelCase_ : List[str], UpperCamelCase_ : Dict, UpperCamelCase_ : Any) -> int:
'''simple docstring'''
return hf_private_dataset_repo_zipped_txt_data_
@pytest.fixture(scope="session")
def _A ( UpperCamelCase_ : HfApi, UpperCamelCase_ : List[str], UpperCamelCase_ : List[str]) -> List[Any]:
'''simple docstring'''
__lowercase = F"""repo_zipped_img_data-{int(time.time() * 10E3)}"""
__lowercase = F"""{CI_HUB_USER}/{repo_name}"""
hf_api.create_repo(UpperCamelCase_, token=UpperCamelCase_, repo_type="dataset", private=UpperCamelCase_)
hf_api.upload_file(
token=UpperCamelCase_, path_or_fileobj=str(UpperCamelCase_), path_in_repo="data.zip", repo_id=UpperCamelCase_, repo_type="dataset", )
yield repo_id
try:
hf_api.delete_repo(UpperCamelCase_, token=UpperCamelCase_, repo_type="dataset")
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def _A ( UpperCamelCase_ : Union[str, Any], UpperCamelCase_ : List[str], UpperCamelCase_ : List[str]) -> str:
'''simple docstring'''
return hf_private_dataset_repo_zipped_img_data_
| 17 | 0 |
'''simple docstring'''
from math import factorial
a : Tuple = {str(d): factorial(d) for d in range(1_0)}
def __lowerCamelCase ( _lowercase ) -> int:
return sum(DIGIT_FACTORIAL[d] for d in str(UpperCamelCase_ ) )
def __lowerCamelCase ( ) -> int:
UpperCAmelCase : Optional[Any] = 7 * factorial(9 ) + 1
return sum(i for i in range(3 , UpperCamelCase_ ) if sum_of_digit_factorial(UpperCamelCase_ ) == i )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 265 |
"""simple docstring"""
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_a = logging.get_logger(__name__)
_a = {
'huggingface/time-series-transformer-tourism-monthly': (
'https://huggingface.co/huggingface/time-series-transformer-tourism-monthly/resolve/main/config.json'
),
# See all TimeSeriesTransformer models at https://huggingface.co/models?filter=time_series_transformer
}
class _lowerCAmelCase ( lowercase ):
"""simple docstring"""
__UpperCAmelCase : int = "time_series_transformer"
__UpperCAmelCase : Any = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
"num_hidden_layers": "encoder_layers",
}
def __init__( self : int, UpperCAmelCase__ : Optional[int] = None, UpperCAmelCase__ : Optional[int] = None, UpperCAmelCase__ : str = "student_t", UpperCAmelCase__ : str = "nll", UpperCAmelCase__ : int = 1, UpperCAmelCase__ : List[int] = [1, 2, 3, 4, 5, 6, 7], UpperCAmelCase__ : Optional[Union[str, bool]] = "mean", UpperCAmelCase__ : int = 0, UpperCAmelCase__ : int = 0, UpperCAmelCase__ : int = 0, UpperCAmelCase__ : int = 0, UpperCAmelCase__ : Optional[List[int]] = None, UpperCAmelCase__ : Optional[List[int]] = None, UpperCAmelCase__ : int = 3_2, UpperCAmelCase__ : int = 3_2, UpperCAmelCase__ : int = 2, UpperCAmelCase__ : int = 2, UpperCAmelCase__ : int = 2, UpperCAmelCase__ : int = 2, UpperCAmelCase__ : bool = True, UpperCAmelCase__ : str = "gelu", UpperCAmelCase__ : int = 6_4, UpperCAmelCase__ : float = 0.1, UpperCAmelCase__ : float = 0.1, UpperCAmelCase__ : float = 0.1, UpperCAmelCase__ : float = 0.1, UpperCAmelCase__ : float = 0.1, UpperCAmelCase__ : int = 1_0_0, UpperCAmelCase__ : float = 0.02, UpperCAmelCase__ : Any=True, **UpperCAmelCase__ : List[str], ):
# time series specific configuration
__lowercase = prediction_length
__lowercase = context_length or prediction_length
__lowercase = distribution_output
__lowercase = loss
__lowercase = input_size
__lowercase = num_time_features
__lowercase = lags_sequence
__lowercase = scaling
__lowercase = num_dynamic_real_features
__lowercase = num_static_real_features
__lowercase = num_static_categorical_features
if cardinality and num_static_categorical_features > 0:
if len(UpperCAmelCase__ ) != num_static_categorical_features:
raise ValueError(
"The cardinality should be a list of the same length as `num_static_categorical_features`" )
__lowercase = cardinality
else:
__lowercase = [0]
if embedding_dimension and num_static_categorical_features > 0:
if len(UpperCAmelCase__ ) != num_static_categorical_features:
raise ValueError(
"The embedding dimension should be a list of the same length as `num_static_categorical_features`" )
__lowercase = embedding_dimension
else:
__lowercase = [min(5_0, (cat + 1) // 2 ) for cat in self.cardinality]
__lowercase = num_parallel_samples
# Transformer architecture configuration
__lowercase = input_size * len(UpperCAmelCase__ ) + self._number_of_features
__lowercase = d_model
__lowercase = encoder_attention_heads
__lowercase = decoder_attention_heads
__lowercase = encoder_ffn_dim
__lowercase = decoder_ffn_dim
__lowercase = encoder_layers
__lowercase = decoder_layers
__lowercase = dropout
__lowercase = attention_dropout
__lowercase = activation_dropout
__lowercase = encoder_layerdrop
__lowercase = decoder_layerdrop
__lowercase = activation_function
__lowercase = init_std
__lowercase = use_cache
super().__init__(is_encoder_decoder=UpperCAmelCase__, **UpperCAmelCase__ )
@property
def _lowercase ( self : Optional[Any] ):
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 17 | 0 |
"""simple docstring"""
import torch
from diffusers import EulerDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class UpperCamelCase__( __A ):
lowerCAmelCase__ : str = (EulerDiscreteScheduler,)
lowerCAmelCase__ : Optional[Any] = 10
def snake_case__ ( self ,**__UpperCAmelCase ) -> Optional[Any]:
A__ = {
'num_train_timesteps': 11_00,
'beta_start': 0.0_0_0_1,
'beta_end': 0.0_2,
'beta_schedule': 'linear',
}
config.update(**UpperCAmelCase__ )
return config
def snake_case__ ( self ) -> List[str]:
for timesteps in [10, 50, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=UpperCAmelCase__ )
def snake_case__ ( self ) -> Tuple:
for beta_start, beta_end in zip([0.0_0_0_0_1, 0.0_0_0_1, 0.0_0_1] ,[0.0_0_0_2, 0.0_0_2, 0.0_2] ):
self.check_over_configs(beta_start=UpperCAmelCase__ ,beta_end=UpperCAmelCase__ )
def snake_case__ ( self ) -> Tuple:
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=UpperCAmelCase__ )
def snake_case__ ( self ) -> Tuple:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=UpperCAmelCase__ )
def snake_case__ ( self ) -> Any:
A__ = self.scheduler_classes[0]
A__ = self.get_scheduler_config()
A__ = scheduler_class(**UpperCAmelCase__ )
scheduler.set_timesteps(self.num_inference_steps )
A__ = torch.manual_seed(0 )
A__ = self.dummy_model()
A__ = self.dummy_sample_deter * scheduler.init_noise_sigma
A__ = sample.to(UpperCAmelCase__ )
for i, t in enumerate(scheduler.timesteps ):
A__ = scheduler.scale_model_input(UpperCAmelCase__ ,UpperCAmelCase__ )
A__ = model(UpperCAmelCase__ ,UpperCAmelCase__ )
A__ = scheduler.step(UpperCAmelCase__ ,UpperCAmelCase__ ,UpperCAmelCase__ ,generator=UpperCAmelCase__ )
A__ = output.prev_sample
A__ = torch.sum(torch.abs(UpperCAmelCase__ ) )
A__ = torch.mean(torch.abs(UpperCAmelCase__ ) )
assert abs(result_sum.item() - 1_0.0_8_0_7 ) < 1e-2
assert abs(result_mean.item() - 0.0_1_3_1 ) < 1e-3
def snake_case__ ( self ) -> Dict:
A__ = self.scheduler_classes[0]
A__ = self.get_scheduler_config(prediction_type='v_prediction' )
A__ = scheduler_class(**UpperCAmelCase__ )
scheduler.set_timesteps(self.num_inference_steps )
A__ = torch.manual_seed(0 )
A__ = self.dummy_model()
A__ = self.dummy_sample_deter * scheduler.init_noise_sigma
A__ = sample.to(UpperCAmelCase__ )
for i, t in enumerate(scheduler.timesteps ):
A__ = scheduler.scale_model_input(UpperCAmelCase__ ,UpperCAmelCase__ )
A__ = model(UpperCAmelCase__ ,UpperCAmelCase__ )
A__ = scheduler.step(UpperCAmelCase__ ,UpperCAmelCase__ ,UpperCAmelCase__ ,generator=UpperCAmelCase__ )
A__ = output.prev_sample
A__ = torch.sum(torch.abs(UpperCAmelCase__ ) )
A__ = torch.mean(torch.abs(UpperCAmelCase__ ) )
assert abs(result_sum.item() - 0.0_0_0_2 ) < 1e-2
assert abs(result_mean.item() - 2.2_676e-06 ) < 1e-3
def snake_case__ ( self ) -> Tuple:
A__ = self.scheduler_classes[0]
A__ = self.get_scheduler_config()
A__ = scheduler_class(**UpperCAmelCase__ )
scheduler.set_timesteps(self.num_inference_steps ,device=UpperCAmelCase__ )
A__ = torch.manual_seed(0 )
A__ = self.dummy_model()
A__ = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
A__ = sample.to(UpperCAmelCase__ )
for t in scheduler.timesteps:
A__ = scheduler.scale_model_input(UpperCAmelCase__ ,UpperCAmelCase__ )
A__ = model(UpperCAmelCase__ ,UpperCAmelCase__ )
A__ = scheduler.step(UpperCAmelCase__ ,UpperCAmelCase__ ,UpperCAmelCase__ ,generator=UpperCAmelCase__ )
A__ = output.prev_sample
A__ = torch.sum(torch.abs(UpperCAmelCase__ ) )
A__ = torch.mean(torch.abs(UpperCAmelCase__ ) )
assert abs(result_sum.item() - 1_0.0_8_0_7 ) < 1e-2
assert abs(result_mean.item() - 0.0_1_3_1 ) < 1e-3
def snake_case__ ( self ) -> List[Any]:
A__ = self.scheduler_classes[0]
A__ = self.get_scheduler_config()
A__ = scheduler_class(**UpperCAmelCase__ ,use_karras_sigmas=UpperCAmelCase__ )
scheduler.set_timesteps(self.num_inference_steps ,device=UpperCAmelCase__ )
A__ = torch.manual_seed(0 )
A__ = self.dummy_model()
A__ = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
A__ = sample.to(UpperCAmelCase__ )
for t in scheduler.timesteps:
A__ = scheduler.scale_model_input(UpperCAmelCase__ ,UpperCAmelCase__ )
A__ = model(UpperCAmelCase__ ,UpperCAmelCase__ )
A__ = scheduler.step(UpperCAmelCase__ ,UpperCAmelCase__ ,UpperCAmelCase__ ,generator=UpperCAmelCase__ )
A__ = output.prev_sample
A__ = torch.sum(torch.abs(UpperCAmelCase__ ) )
A__ = torch.mean(torch.abs(UpperCAmelCase__ ) )
assert abs(result_sum.item() - 1_2_4.5_2_2_9_9_4_9_9_5_1_1_7_1_9 ) < 1e-2
assert abs(result_mean.item() - 0.1_6_2_1_3_9_3_2_6_3_3_3_9_9_9_6_3 ) < 1e-3
| 221 |
"""simple docstring"""
import unittest
from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available
from transformers.pipelines import pipeline
from transformers.pipelines.document_question_answering import apply_tesseract
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_detectrona,
require_pytesseract,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
from transformers.image_utils import load_image
else:
class _lowerCAmelCase :
"""simple docstring"""
@staticmethod
def _lowercase ( *UpperCAmelCase__ : Tuple, **UpperCAmelCase__ : List[Any] ):
pass
def _A ( UpperCamelCase_ : Union[str, Any]) -> Any:
'''simple docstring'''
return None
# This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace,
# so we can expect it to be available.
_a = (
'https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png'
)
@is_pipeline_test
@require_torch
@require_vision
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Tuple = MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING
@require_pytesseract
@require_vision
def _lowercase ( self : Optional[Any], UpperCAmelCase__ : Optional[Any], UpperCAmelCase__ : Dict, UpperCAmelCase__ : Optional[Any] ):
__lowercase = pipeline(
"document-question-answering", model=UpperCAmelCase__, tokenizer=UpperCAmelCase__, image_processor=UpperCAmelCase__ )
__lowercase = INVOICE_URL
__lowercase = list(zip(*apply_tesseract(load_image(UpperCAmelCase__ ), UpperCAmelCase__, "" ) ) )
__lowercase = "What is the placebo?"
__lowercase = [
{
"image": load_image(UpperCAmelCase__ ),
"question": question,
},
{
"image": image,
"question": question,
},
{
"image": image,
"question": question,
"word_boxes": word_boxes,
},
]
return dqa_pipeline, examples
def _lowercase ( self : int, UpperCAmelCase__ : Tuple, UpperCAmelCase__ : Any ):
__lowercase = dqa_pipeline(UpperCAmelCase__, top_k=2 )
self.assertEqual(
UpperCAmelCase__, [
[
{"score": ANY(UpperCAmelCase__ ), "answer": ANY(UpperCAmelCase__ ), "start": ANY(UpperCAmelCase__ ), "end": ANY(UpperCAmelCase__ )},
{"score": ANY(UpperCAmelCase__ ), "answer": ANY(UpperCAmelCase__ ), "start": ANY(UpperCAmelCase__ ), "end": ANY(UpperCAmelCase__ )},
]
]
* 3, )
@require_torch
@require_detectrona
@require_pytesseract
def _lowercase ( self : Dict ):
__lowercase = pipeline("document-question-answering", model="hf-internal-testing/tiny-random-layoutlmv2" )
__lowercase = INVOICE_URL
__lowercase = "How many cats are there?"
__lowercase = [
{"score": 0.0_001, "answer": "oy 2312/2019", "start": 3_8, "end": 3_9},
{"score": 0.0_001, "answer": "oy 2312/2019 DUE", "start": 3_8, "end": 4_0},
]
__lowercase = dqa_pipeline(image=UpperCAmelCase__, question=UpperCAmelCase__, top_k=2 )
self.assertEqual(nested_simplify(UpperCAmelCase__, decimals=4 ), UpperCAmelCase__ )
__lowercase = dqa_pipeline({"image": image, "question": question}, top_k=2 )
self.assertEqual(nested_simplify(UpperCAmelCase__, decimals=4 ), UpperCAmelCase__ )
# This image does not detect ANY text in it, meaning layoutlmv2 should fail.
# Empty answer probably
__lowercase = "./tests/fixtures/tests_samples/COCO/000000039769.png"
__lowercase = dqa_pipeline(image=UpperCAmelCase__, question=UpperCAmelCase__, top_k=2 )
self.assertEqual(UpperCAmelCase__, [] )
# We can optionnally pass directly the words and bounding boxes
__lowercase = "./tests/fixtures/tests_samples/COCO/000000039769.png"
__lowercase = []
__lowercase = []
__lowercase = dqa_pipeline(image=UpperCAmelCase__, question=UpperCAmelCase__, words=UpperCAmelCase__, boxes=UpperCAmelCase__, top_k=2 )
self.assertEqual(UpperCAmelCase__, [] )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def _lowercase ( self : List[str] ):
__lowercase = pipeline(
"document-question-answering", model="tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa", revision="9977165", )
__lowercase = INVOICE_URL
__lowercase = "What is the invoice number?"
__lowercase = dqa_pipeline(image=UpperCAmelCase__, question=UpperCAmelCase__, top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase__, decimals=4 ), [
{"score": 0.9_944, "answer": "us-001", "start": 1_6, "end": 1_6},
{"score": 0.0_009, "answer": "us-001", "start": 1_6, "end": 1_6},
], )
__lowercase = dqa_pipeline({"image": image, "question": question}, top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase__, decimals=4 ), [
{"score": 0.9_944, "answer": "us-001", "start": 1_6, "end": 1_6},
{"score": 0.0_009, "answer": "us-001", "start": 1_6, "end": 1_6},
], )
__lowercase = dqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}], top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase__, decimals=4 ), [
[
{"score": 0.9_944, "answer": "us-001", "start": 1_6, "end": 1_6},
{"score": 0.0_009, "answer": "us-001", "start": 1_6, "end": 1_6},
],
]
* 2, )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def _lowercase ( self : Dict ):
__lowercase = pipeline(
"document-question-answering", model="tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa", revision="9977165", max_seq_len=5_0, )
__lowercase = INVOICE_URL
__lowercase = "What is the invoice number?"
__lowercase = dqa_pipeline(image=UpperCAmelCase__, question=UpperCAmelCase__, top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase__, decimals=4 ), [
{"score": 0.9_974, "answer": "1110212019", "start": 2_3, "end": 2_3},
{"score": 0.9_948, "answer": "us-001", "start": 1_6, "end": 1_6},
], )
__lowercase = dqa_pipeline({"image": image, "question": question}, top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase__, decimals=4 ), [
{"score": 0.9_974, "answer": "1110212019", "start": 2_3, "end": 2_3},
{"score": 0.9_948, "answer": "us-001", "start": 1_6, "end": 1_6},
], )
__lowercase = dqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}], top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase__, decimals=4 ), [
[
{"score": 0.9_974, "answer": "1110212019", "start": 2_3, "end": 2_3},
{"score": 0.9_948, "answer": "us-001", "start": 1_6, "end": 1_6},
]
]
* 2, )
@slow
@require_torch
@require_pytesseract
@require_vision
def _lowercase ( self : Optional[Any] ):
__lowercase = AutoTokenizer.from_pretrained(
"impira/layoutlm-document-qa", revision="3dc6de3", add_prefix_space=UpperCAmelCase__ )
__lowercase = pipeline(
"document-question-answering", model="impira/layoutlm-document-qa", tokenizer=UpperCAmelCase__, revision="3dc6de3", )
__lowercase = INVOICE_URL
__lowercase = "What is the invoice number?"
__lowercase = dqa_pipeline(image=UpperCAmelCase__, question=UpperCAmelCase__, top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase__, decimals=4 ), [
{"score": 0.4_251, "answer": "us-001", "start": 1_6, "end": 1_6},
{"score": 0.0_819, "answer": "1110212019", "start": 2_3, "end": 2_3},
], )
__lowercase = dqa_pipeline({"image": image, "question": question}, top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase__, decimals=4 ), [
{"score": 0.4_251, "answer": "us-001", "start": 1_6, "end": 1_6},
{"score": 0.0_819, "answer": "1110212019", "start": 2_3, "end": 2_3},
], )
__lowercase = dqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}], top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase__, decimals=4 ), [
[
{"score": 0.4_251, "answer": "us-001", "start": 1_6, "end": 1_6},
{"score": 0.0_819, "answer": "1110212019", "start": 2_3, "end": 2_3},
]
]
* 2, )
__lowercase = list(zip(*apply_tesseract(load_image(UpperCAmelCase__ ), UpperCAmelCase__, "" ) ) )
# This model should also work if `image` is set to None
__lowercase = dqa_pipeline({"image": None, "word_boxes": word_boxes, "question": question}, top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase__, decimals=4 ), [
{"score": 0.4_251, "answer": "us-001", "start": 1_6, "end": 1_6},
{"score": 0.0_819, "answer": "1110212019", "start": 2_3, "end": 2_3},
], )
@slow
@require_torch
@require_pytesseract
@require_vision
def _lowercase ( self : Union[str, Any] ):
__lowercase = AutoTokenizer.from_pretrained(
"impira/layoutlm-document-qa", revision="3dc6de3", add_prefix_space=UpperCAmelCase__ )
__lowercase = pipeline(
"document-question-answering", model="impira/layoutlm-document-qa", tokenizer=UpperCAmelCase__, revision="3dc6de3", max_seq_len=5_0, )
__lowercase = INVOICE_URL
__lowercase = "What is the invoice number?"
__lowercase = dqa_pipeline(image=UpperCAmelCase__, question=UpperCAmelCase__, top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase__, decimals=4 ), [
{"score": 0.9_999, "answer": "us-001", "start": 1_6, "end": 1_6},
{"score": 0.9_998, "answer": "us-001", "start": 1_6, "end": 1_6},
], )
__lowercase = dqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}], top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase__, decimals=4 ), [
[
{"score": 0.9_999, "answer": "us-001", "start": 1_6, "end": 1_6},
{"score": 0.9_998, "answer": "us-001", "start": 1_6, "end": 1_6},
]
]
* 2, )
__lowercase = list(zip(*apply_tesseract(load_image(UpperCAmelCase__ ), UpperCAmelCase__, "" ) ) )
# This model should also work if `image` is set to None
__lowercase = dqa_pipeline({"image": None, "word_boxes": word_boxes, "question": question}, top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase__, decimals=4 ), [
{"score": 0.9_999, "answer": "us-001", "start": 1_6, "end": 1_6},
{"score": 0.9_998, "answer": "us-001", "start": 1_6, "end": 1_6},
], )
@slow
@require_torch
def _lowercase ( self : Dict ):
__lowercase = pipeline(
"document-question-answering", model="naver-clova-ix/donut-base-finetuned-docvqa", tokenizer=AutoTokenizer.from_pretrained("naver-clova-ix/donut-base-finetuned-docvqa" ), feature_extractor="naver-clova-ix/donut-base-finetuned-docvqa", )
__lowercase = INVOICE_URL
__lowercase = "What is the invoice number?"
__lowercase = dqa_pipeline(image=UpperCAmelCase__, question=UpperCAmelCase__, top_k=2 )
self.assertEqual(nested_simplify(UpperCAmelCase__, decimals=4 ), [{"answer": "us-001"}] )
@require_tf
@unittest.skip("Document question answering not implemented in TF" )
def _lowercase ( self : List[Any] ):
pass
| 17 | 0 |
def lowerCAmelCase__( lowercase : float , lowercase : float ) -> float:
if mass < 0:
raise ValueError("The mass of a body cannot be negative" )
return 0.5 * mass * abs(UpperCamelCase_ ) * abs(UpperCamelCase_ )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 326 |
"""simple docstring"""
import argparse
import json
import os
import re
import shutil
import torch
from transformers import BioGptConfig, BioGptForCausalLM
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
_a = 2
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : Dict, *, # begin keyword-only arguments
UpperCAmelCase__ : str="<s>", UpperCAmelCase__ : Tuple="<pad>", UpperCAmelCase__ : str="</s>", UpperCAmelCase__ : Optional[Any]="<unk>", UpperCAmelCase__ : List[Any]=None, ):
__lowercase ,__lowercase ,__lowercase ,__lowercase = bos, unk, pad, eos
__lowercase = []
__lowercase = []
__lowercase = {}
__lowercase = self.add_symbol(UpperCAmelCase__ )
__lowercase = self.add_symbol(UpperCAmelCase__ )
__lowercase = self.add_symbol(UpperCAmelCase__ )
__lowercase = self.add_symbol(UpperCAmelCase__ )
if extra_special_symbols:
for s in extra_special_symbols:
self.add_symbol(UpperCAmelCase__ )
__lowercase = len(self.symbols )
def __eq__( self : List[str], UpperCAmelCase__ : Dict ):
return self.indices == other.indices
def __getitem__( self : Optional[int], UpperCAmelCase__ : List[str] ):
if idx < len(self.symbols ):
return self.symbols[idx]
return self.unk_word
def __len__( self : str ):
return len(self.symbols )
def __contains__( self : Any, UpperCAmelCase__ : Optional[Any] ):
return sym in self.indices
@classmethod
def _lowercase ( cls : List[Any], UpperCAmelCase__ : Optional[Any] ):
__lowercase = cls()
d.add_from_file(UpperCAmelCase__ )
return d
def _lowercase ( self : Dict, UpperCAmelCase__ : Optional[Any], UpperCAmelCase__ : List[Any]=1, UpperCAmelCase__ : str=False ):
if word in self.indices and not overwrite:
__lowercase = self.indices[word]
__lowercase = self.count[idx] + n
return idx
else:
__lowercase = len(self.symbols )
__lowercase = idx
self.symbols.append(UpperCAmelCase__ )
self.count.append(UpperCAmelCase__ )
return idx
def _lowercase ( self : Any, UpperCAmelCase__ : str ):
return 0
def _lowercase ( self : Tuple, UpperCAmelCase__ : List[Any] ):
if isinstance(UpperCAmelCase__, UpperCAmelCase__ ):
try:
with open(UpperCAmelCase__, "r", encoding="utf-8" ) as fd:
self.add_from_file(UpperCAmelCase__ )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception("Incorrect encoding detected in {}, please rebuild the dataset".format(UpperCAmelCase__ ) )
return
__lowercase = f.readlines()
__lowercase = self._load_meta(UpperCAmelCase__ )
for line in lines[indices_start_line:]:
try:
__lowercase ,__lowercase = line.rstrip().rsplit(" ", 1 )
if field == "#fairseq:overwrite":
__lowercase = True
__lowercase ,__lowercase = line.rsplit(" ", 1 )
else:
__lowercase = False
__lowercase = int(UpperCAmelCase__ )
__lowercase = line
if word in self and not overwrite:
raise RuntimeError(
"Duplicate word found when loading Dictionary: '{}'. "
"Duplicate words can overwrite earlier ones by adding the "
"#fairseq:overwrite flag at the end of the corresponding row "
"in the dictionary file. If using the Camembert model, please "
"download an updated copy of the model file.".format(UpperCAmelCase__ ) )
self.add_symbol(UpperCAmelCase__, n=UpperCAmelCase__, overwrite=UpperCAmelCase__ )
except ValueError:
raise ValueError("Incorrect dictionary format, expected '<token> <cnt> [flags]'" )
def _A ( UpperCamelCase_ : int) -> str:
'''simple docstring'''
__lowercase = dict((re.sub(r"@@$", "", UpperCamelCase_), v) if k.endswith("@@") else (re.sub(r"$", "</w>", UpperCamelCase_), v) for k, v in d.items())
__lowercase = "<s> <pad> </s> <unk>".split()
# restore the special tokens
for k in keep_keys:
del da[F"""{k}</w>"""]
__lowercase = d[k] # restore
return da
def _A ( UpperCamelCase_ : str, UpperCamelCase_ : str) -> List[Any]:
'''simple docstring'''
if not os.path.exists(UpperCamelCase_):
raise ValueError(F"""path {biogpt_checkpoint_path} does not exist!""")
os.makedirs(UpperCamelCase_, exist_ok=UpperCamelCase_)
print(F"""Writing results to {pytorch_dump_folder_path}""")
# handle various types of models
__lowercase = os.path.join(UpperCamelCase_, "checkpoint.pt")
if not os.path.isfile(UpperCamelCase_):
raise ValueError(F"""path to the file {checkpoint_file} does not exist!""")
__lowercase = torch.load(UpperCamelCase_, map_location="cpu")
__lowercase = chkpt["cfg"]["model"]
# dicts
__lowercase = os.path.join(UpperCamelCase_, "dict.txt")
if not os.path.isfile(UpperCamelCase_):
raise ValueError(F"""path to the file {dict_file} does not exist!""")
__lowercase = Dictionary.load(UpperCamelCase_)
__lowercase = rewrite_dict_keys(src_dict.indices)
__lowercase = len(UpperCamelCase_)
__lowercase = os.path.join(UpperCamelCase_, VOCAB_FILES_NAMES["vocab_file"])
print(F"""Generating {src_vocab_file} of {src_vocab_size} records""")
with open(UpperCamelCase_, "w", encoding="utf-8") as f:
f.write(json.dumps(UpperCamelCase_, ensure_ascii=UpperCamelCase_, indent=UpperCamelCase_))
# merges_file (bpecodes)
__lowercase = os.path.join(UpperCamelCase_, "bpecodes")
if not os.path.isfile(UpperCamelCase_):
raise ValueError(F"""path to the file {bpecodes_file} does not exist!""")
__lowercase = os.path.join(UpperCamelCase_, VOCAB_FILES_NAMES["merges_file"])
shutil.copyfile(UpperCamelCase_, UpperCamelCase_)
# model config
__lowercase = os.path.join(UpperCamelCase_, "config.json")
__lowercase = {
"activation_dropout": args["activation_dropout"],
"architectures": ["BioGptForCausalLM"],
"attention_probs_dropout_prob": args["attention_dropout"],
"bos_token_id": 0,
"eos_token_id": 2,
"hidden_act": args["activation_fn"],
"hidden_dropout_prob": args["dropout"],
"hidden_size": args["decoder_embed_dim"],
"initializer_range": 0.02,
"intermediate_size": args["decoder_ffn_embed_dim"],
"layer_norm_eps": 1E-12,
"layerdrop": args["decoder_layerdrop"],
"max_position_embeddings": args["max_target_positions"],
"model_type": "biogpt",
"num_attention_heads": args["decoder_attention_heads"],
"num_hidden_layers": args["decoder_layers"],
"pad_token_id": 1,
"scale_embedding": not args["no_scale_embedding"],
"tie_word_embeddings": args["share_decoder_input_output_embed"],
"vocab_size": src_vocab_size,
}
# good hparam defaults to start with
print(F"""Generating {biogpt_model_config_file}""")
with open(UpperCamelCase_, "w", encoding="utf-8") as f:
f.write(json.dumps(UpperCamelCase_, ensure_ascii=UpperCamelCase_, indent=UpperCamelCase_))
# tokenizer config
__lowercase = os.path.join(UpperCamelCase_, UpperCamelCase_)
__lowercase = {
"bos_token": "<s>",
"eos_token": "</s>",
"model_max_length": 1024,
"pad_token": "<pad>",
"special_tokens_map_file": None,
"tokenizer_class": "BioGptTokenizer",
"unk_token": "<unk>",
}
print(F"""Generating {biogpt_tokenizer_config_file}""")
with open(UpperCamelCase_, "w", encoding="utf-8") as f:
f.write(json.dumps(UpperCamelCase_, ensure_ascii=UpperCamelCase_, indent=UpperCamelCase_))
# model
__lowercase = chkpt["model"]
# remove unneeded keys
__lowercase = [
"decoder.version",
]
for k in ignore_keys:
model_state_dict.pop(UpperCamelCase_, UpperCamelCase_)
__lowercase = list(model_state_dict.keys())
for layer_name in layer_names:
if layer_name.endswith("output_projection.weight"):
__lowercase = model_state_dict.pop(UpperCamelCase_)
else:
__lowercase = model_state_dict.pop(UpperCamelCase_)
__lowercase = BioGptConfig.from_pretrained(UpperCamelCase_)
__lowercase = BioGptForCausalLM(UpperCamelCase_)
# check that it loads ok
model_new.load_state_dict(UpperCamelCase_)
# save
__lowercase = os.path.join(UpperCamelCase_, UpperCamelCase_)
print(F"""Generating {pytorch_weights_dump_path}""")
torch.save(UpperCamelCase_, UpperCamelCase_)
print("Conversion is done!")
if __name__ == "__main__":
_a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--biogpt_checkpoint_path',
default=None,
type=str,
required=True,
help=(
'Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,'
' bpecodes, etc.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
_a = parser.parse_args()
convert_biogpt_checkpoint_to_pytorch(args.biogpt_checkpoint_path, args.pytorch_dump_folder_path)
| 17 | 0 |
import math
def _snake_case ( lowerCAmelCase : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = [True] * n
SCREAMING_SNAKE_CASE_ : Dict = False
SCREAMING_SNAKE_CASE_ : Any = False
SCREAMING_SNAKE_CASE_ : str = True
for i in range(3 , int(n**0.5 + 1 ) , 2 ):
SCREAMING_SNAKE_CASE_ : Any = i * 2
while index < n:
SCREAMING_SNAKE_CASE_ : str = False
SCREAMING_SNAKE_CASE_ : Any = index + i
SCREAMING_SNAKE_CASE_ : Dict = [2]
for i in range(3 , lowerCAmelCase , 2 ):
if is_prime[i]:
primes.append(lowerCAmelCase )
return primes
def _snake_case ( lowerCAmelCase : int = 9_9_9_9_6_6_6_6_3_3_3_3 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = math.floor(math.sqrt(lowerCAmelCase ) ) + 1_0_0
SCREAMING_SNAKE_CASE_ : List[str] = prime_sieve(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Tuple = 0
SCREAMING_SNAKE_CASE_ : Optional[Any] = 0
SCREAMING_SNAKE_CASE_ : Tuple = primes[prime_index]
while (last_prime**2) <= limit:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = primes[prime_index + 1]
SCREAMING_SNAKE_CASE_ : Dict = last_prime**2
SCREAMING_SNAKE_CASE_ : Dict = next_prime**2
# Get numbers divisible by lps(current)
SCREAMING_SNAKE_CASE_ : str = lower_bound + last_prime
while upper_bound > current <= limit:
matches_sum += current
current += last_prime
# Reset the upper_bound
while (upper_bound - next_prime) > limit:
upper_bound -= next_prime
# Add the numbers divisible by ups(current)
SCREAMING_SNAKE_CASE_ : Optional[int] = upper_bound - next_prime
while current > lower_bound:
matches_sum += current
current -= next_prime
# Remove the numbers divisible by both ups and lps
SCREAMING_SNAKE_CASE_ : List[Any] = 0
while upper_bound > current <= limit:
if current <= lower_bound:
# Increment the current number
current += last_prime * next_prime
continue
if current > limit:
break
# Remove twice since it was added by both ups and lps
matches_sum -= current * 2
# Increment the current number
current += last_prime * next_prime
# Setup for next pair
SCREAMING_SNAKE_CASE_ : List[str] = next_prime
prime_index += 1
return matches_sum
if __name__ == "__main__":
print(solution())
| 18 | from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__lowerCamelCase : Union[str, Any] = {
'''configuration_chinese_clip''': [
'''CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''ChineseCLIPConfig''',
'''ChineseCLIPOnnxConfig''',
'''ChineseCLIPTextConfig''',
'''ChineseCLIPVisionConfig''',
],
'''processing_chinese_clip''': ['''ChineseCLIPProcessor'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Any = ['''ChineseCLIPFeatureExtractor''']
__lowerCamelCase : Optional[int] = ['''ChineseCLIPImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : int = [
'''CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ChineseCLIPModel''',
'''ChineseCLIPPreTrainedModel''',
'''ChineseCLIPTextModel''',
'''ChineseCLIPVisionModel''',
]
if TYPE_CHECKING:
from .configuration_chinese_clip import (
CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
ChineseCLIPConfig,
ChineseCLIPOnnxConfig,
ChineseCLIPTextConfig,
ChineseCLIPVisionConfig,
)
from .processing_chinese_clip import ChineseCLIPProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_chinese_clip import ChineseCLIPFeatureExtractor, ChineseCLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_chinese_clip import (
CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
ChineseCLIPModel,
ChineseCLIPPreTrainedModel,
ChineseCLIPTextModel,
ChineseCLIPVisionModel,
)
else:
import sys
__lowerCamelCase : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 18 | 1 |
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import debug_launcher
from accelerate.test_utils import (
execute_subprocess_async,
require_cpu,
require_huggingface_suite,
require_multi_gpu,
require_single_gpu,
)
from accelerate.utils import patch_environment
@require_huggingface_suite
class a__ ( unittest.TestCase ):
def __UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = inspect.getfile(accelerate.test_utils )
SCREAMING_SNAKE_CASE_ : Optional[Any] = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ["scripts", "external_deps", "test_metrics.py"] )
from accelerate.test_utils.scripts.external_deps import test_metrics # noqa: F401
SCREAMING_SNAKE_CASE_ : Any = test_metrics
@require_cpu
def __UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
debug_launcher(self.test_metrics.main,num_processes=1 )
@require_cpu
def __UpperCamelCase ( self : Any ):
"""simple docstring"""
debug_launcher(self.test_metrics.main )
@require_single_gpu
def __UpperCamelCase ( self : Any ):
"""simple docstring"""
self.test_metrics.main()
@require_multi_gpu
def __UpperCamelCase ( self : Dict ):
"""simple docstring"""
print(F'Found {torch.cuda.device_count()} devices.' )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ["torchrun", F'--nproc_per_node={torch.cuda.device_count()}', self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_A,env=os.environ.copy() )
| 18 | import argparse
import re
from flax.traverse_util import flatten_dict, unflatten_dict
from tax import checkpoints
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
from transformers.utils import logging
logging.set_verbosity_info()
# should not include what is already done by the `from_pt` argument
__lowerCamelCase : Any = {
'''/attention/''': '''/0/SelfAttention/''',
'''/self_attention/''': '''/0/SelfAttention/''',
'''/encoder_decoder_attention/''': '''/1/EncDecAttention/''',
'''value''': '''v''',
'''query''': '''q''',
'''key''': '''k''',
'''out''': '''o''',
'''pre_self_attention_layer_norm''': '''0/layer_norm''',
'''pre_cross_attention_layer_norm''': '''1/layer_norm''',
'''pre_attention_layer_norm''': '''0/layer_norm''', # previously 1, but seems wrong
'''token_embedder''': '''shared''',
'''encoder_norm''': '''final_layer_norm''',
'''decoder_norm''': '''final_layer_norm''',
'''relpos_bias/rel_embedding''': '''block/0/layer/0/SelfAttention/relative_attention_bias/weight''',
'''router/router_weights/w/''': '''router/classifier/''',
'''roer/roer_weights/w/''': '''router/classifier/''',
'''logits_dense''': '''lm_head''',
}
def _snake_case ( lowerCAmelCase : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = list(s_dict.keys() )
for key in keys:
SCREAMING_SNAKE_CASE_ : int = R".*/layers_(\d+)"
SCREAMING_SNAKE_CASE_ : List[Any] = key
if re.match(lowerCAmelCase , lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : Any = re.sub(R"layers_(\d+)" , R"block/\1/layer" , lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : List[Any] = R"(encoder|decoder)\/"
if re.match(lowerCAmelCase , lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : str = re.match(lowerCAmelCase , lowerCAmelCase ).groups()
if groups[0] == "encoder":
SCREAMING_SNAKE_CASE_ : Any = re.sub(R"/mlp/" , R"/1/mlp/" , lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : List[Any] = re.sub(R"/pre_mlp_layer_norm/" , R"/1/layer_norm/" , lowerCAmelCase )
elif groups[0] == "decoder":
SCREAMING_SNAKE_CASE_ : List[str] = re.sub(R"/mlp/" , R"/2/mlp/" , lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Dict = re.sub(R"/pre_mlp_layer_norm/" , R"/2/layer_norm/" , lowerCAmelCase )
# 2. Convert other classic mappings
for old_key, temp_key in MOE_LAYER_NAME_MAPPING.items():
if old_key in new_key:
SCREAMING_SNAKE_CASE_ : List[Any] = new_key.replace(lowerCAmelCase , lowerCAmelCase )
print(f'{key} -> {new_key}' )
SCREAMING_SNAKE_CASE_ : List[Any] = s_dict.pop(lowerCAmelCase )
if "encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
SCREAMING_SNAKE_CASE_ : str = s_dict[
"encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight"
].T
if "decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
SCREAMING_SNAKE_CASE_ : Optional[int] = s_dict[
"decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight"
].T
# 3. Take extra care of the EXPERTS layer
for key in list(s_dict.keys() ):
if "expert" in key:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = s_dict[key].shape[0]
SCREAMING_SNAKE_CASE_ : List[Any] = s_dict[key]
for idx in range(lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : Tuple = expert_weihts[idx]
print(f'{key} -> {key.replace("expert/" , "nested fstring" )}' )
s_dict.pop(lowerCAmelCase )
return s_dict
__lowerCamelCase : List[Any] = {
'''NUM_ENCODER_LAYERS''': '''num_layers''',
'''NUM_DECODER_LAYERS''': '''num_decoder_layers''',
'''NUM_HEADS''': '''num_heads''',
'''HEAD_DIM''': '''d_kv''',
'''EMBED_DIM''': '''d_model''',
'''MLP_DIM''': '''d_ff''',
'''NUM_SELECTED_EXPERTS''': '''num_selected_experts''',
'''NUM_ENCODER_SPARSE_LAYERS''': '''num_sparse_encoder_layers''',
'''NUM_DECODER_SPARSE_LAYERS''': '''num_sparse_decoder_layers''',
'''dense.MlpBlock.activations''': '''feed_forward_proj''',
}
def _snake_case ( lowerCAmelCase : Dict , lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
import regex as re
with open(lowerCAmelCase , "r" ) as f:
SCREAMING_SNAKE_CASE_ : Optional[Any] = f.read()
SCREAMING_SNAKE_CASE_ : List[str] = re.findall(R"(.*) = ([0-9.]*)" , lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Any = {}
for param, value in regex_match:
if param in GIN_TO_CONFIG_MAPPING and value != "":
SCREAMING_SNAKE_CASE_ : int = float(lowerCAmelCase ) if "." in value else int(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = re.findall(R"(.*activations) = \(\'(.*)\',\)" , lowerCAmelCase )[0]
SCREAMING_SNAKE_CASE_ : List[str] = str(activation[1] )
SCREAMING_SNAKE_CASE_ : str = num_experts
SCREAMING_SNAKE_CASE_ : Tuple = SwitchTransformersConfig(**lowerCAmelCase )
return config
def _snake_case ( lowerCAmelCase : str , lowerCAmelCase : str , lowerCAmelCase : str=None , lowerCAmelCase : Optional[Any]="./" , lowerCAmelCase : Dict=8 ):
"""simple docstring"""
print(f'Loading flax weights from : {flax_checkpoint_path}' )
SCREAMING_SNAKE_CASE_ : int = checkpoints.load_tax_checkpoint(lowerCAmelCase )
if gin_file is not None:
SCREAMING_SNAKE_CASE_ : int = convert_gin_to_config(lowerCAmelCase , lowerCAmelCase )
else:
SCREAMING_SNAKE_CASE_ : Dict = SwitchTransformersConfig.from_pretrained(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : str = SwitchTransformersForConditionalGeneration(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[int] = flax_params["target"]
SCREAMING_SNAKE_CASE_ : List[str] = flatten_dict(lowerCAmelCase , sep="/" )
SCREAMING_SNAKE_CASE_ : List[str] = rename_keys(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[int] = unflatten_dict(lowerCAmelCase , sep="/" )
# Load the flax params in the PT model
load_flax_weights_in_pytorch_model(lowerCAmelCase , lowerCAmelCase )
print(f'Save PyTorch model to {pytorch_dump_path}' )
pt_model.save_pretrained(lowerCAmelCase )
if __name__ == "__main__":
__lowerCamelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--switch_t5x_checkpoint_path''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained SwitchTransformers model. \nThis specifies the'''
''' model architecture. If not provided, a `gin_file` has to be provided.'''
),
)
parser.add_argument(
'''--gin_file''',
default=None,
type=str,
required=False,
help='''Path to the gin config file. If not provided, a `config_file` has to be passed ''',
)
parser.add_argument(
'''--config_name''', default=None, type=str, required=False, help='''Config name of SwitchTransformers model.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output pytorch model.'''
)
parser.add_argument('''--num_experts''', default=8, type=int, required=False, help='''Number of experts''')
__lowerCamelCase : Any = parser.parse_args()
convert_flax_checkpoint_to_pytorch(
args.switch_tax_checkpoint_path,
args.config_name,
args.gin_file,
args.pytorch_dump_folder_path,
args.num_experts,
)
| 18 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__lowerCamelCase : Optional[int] = {
'''configuration_clip''': [
'''CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''CLIPConfig''',
'''CLIPOnnxConfig''',
'''CLIPTextConfig''',
'''CLIPVisionConfig''',
],
'''processing_clip''': ['''CLIPProcessor'''],
'''tokenization_clip''': ['''CLIPTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : List[Any] = ['''CLIPTokenizerFast''']
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : List[str] = ['''CLIPFeatureExtractor''']
__lowerCamelCase : str = ['''CLIPImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Tuple = [
'''CLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CLIPModel''',
'''CLIPPreTrainedModel''',
'''CLIPTextModel''',
'''CLIPTextModelWithProjection''',
'''CLIPVisionModel''',
'''CLIPVisionModelWithProjection''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : str = [
'''TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFCLIPModel''',
'''TFCLIPPreTrainedModel''',
'''TFCLIPTextModel''',
'''TFCLIPVisionModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : List[str] = [
'''FlaxCLIPModel''',
'''FlaxCLIPPreTrainedModel''',
'''FlaxCLIPTextModel''',
'''FlaxCLIPTextPreTrainedModel''',
'''FlaxCLIPVisionModel''',
'''FlaxCLIPVisionPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_clip import (
CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPConfig,
CLIPOnnxConfig,
CLIPTextConfig,
CLIPVisionConfig,
)
from .processing_clip import CLIPProcessor
from .tokenization_clip import CLIPTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_clip_fast import CLIPTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clip import CLIPFeatureExtractor
from .image_processing_clip import CLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clip import (
CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPModel,
CLIPPreTrainedModel,
CLIPTextModel,
CLIPTextModelWithProjection,
CLIPVisionModel,
CLIPVisionModelWithProjection,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_clip import (
TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCLIPModel,
TFCLIPPreTrainedModel,
TFCLIPTextModel,
TFCLIPVisionModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_clip import (
FlaxCLIPModel,
FlaxCLIPPreTrainedModel,
FlaxCLIPTextModel,
FlaxCLIPTextPreTrainedModel,
FlaxCLIPVisionModel,
FlaxCLIPVisionPreTrainedModel,
)
else:
import sys
__lowerCamelCase : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 18 | from math import factorial, radians
def _snake_case ( lowerCAmelCase : float , lowerCAmelCase : int = 1_8 , lowerCAmelCase : int = 1_0 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = angle_in_degrees - ((angle_in_degrees // 360.0) * 360.0)
# Converting from degrees to radians
SCREAMING_SNAKE_CASE_ : Tuple = radians(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[Any] = angle_in_radians
SCREAMING_SNAKE_CASE_ : List[str] = 3
SCREAMING_SNAKE_CASE_ : str = -1
for _ in range(lowerCAmelCase ):
result += (b * (angle_in_radians**a)) / factorial(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Dict = -b # One positive term and the next will be negative and so on...
a += 2 # Increased by 2 for every term.
return round(lowerCAmelCase , lowerCAmelCase )
if __name__ == "__main__":
__import__('''doctest''').testmod()
| 18 | 1 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import XLMRobertaTokenizer
from diffusers import (
AltDiffusionImgaImgPipeline,
AutoencoderKL,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class a__ ( unittest.TestCase ):
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def __UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = 1
SCREAMING_SNAKE_CASE_ : Optional[Any] = 3
SCREAMING_SNAKE_CASE_ : Union[str, Any] = (32, 32)
SCREAMING_SNAKE_CASE_ : List[Any] = floats_tensor((batch_size, num_channels) + sizes,rng=random.Random(0 ) ).to(_A )
return image
@property
def __UpperCamelCase ( self : str ):
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ : Dict = UNetaDConditionModel(
block_out_channels=(32, 64),layers_per_block=2,sample_size=32,in_channels=4,out_channels=4,down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"),up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"),cross_attention_dim=32,)
return model
@property
def __UpperCamelCase ( self : Any ):
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ : List[Any] = AutoencoderKL(
block_out_channels=[32, 64],in_channels=3,out_channels=3,down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"],up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"],latent_channels=4,)
return model
@property
def __UpperCamelCase ( self : str ):
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ : int = RobertaSeriesConfig(
hidden_size=32,project_dim=32,intermediate_size=37,layer_norm_eps=1E-05,num_attention_heads=4,num_hidden_layers=5,pad_token_id=1,vocab_size=5006,)
return RobertaSeriesModelWithTransformation(_A )
@property
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
def extract(*_A : str,**_A : Dict ):
class a__ :
def __init__( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = torch.ones([0] )
def __UpperCamelCase ( self : Dict,_A : List[str] ):
"""simple docstring"""
self.pixel_values.to(_A )
return self
return Out()
return extract
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = "cpu" # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE_ : Optional[int] = self.dummy_cond_unet
SCREAMING_SNAKE_CASE_ : Optional[int] = PNDMScheduler(skip_prk_steps=_A )
SCREAMING_SNAKE_CASE_ : int = self.dummy_vae
SCREAMING_SNAKE_CASE_ : Dict = self.dummy_text_encoder
SCREAMING_SNAKE_CASE_ : str = XLMRobertaTokenizer.from_pretrained("hf-internal-testing/tiny-xlm-roberta" )
SCREAMING_SNAKE_CASE_ : Dict = 77
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.dummy_image.to(_A )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = init_image / 2 + 0.5
# make sure here that pndm scheduler skips prk
SCREAMING_SNAKE_CASE_ : Any = AltDiffusionImgaImgPipeline(
unet=_A,scheduler=_A,vae=_A,text_encoder=_A,tokenizer=_A,safety_checker=_A,feature_extractor=self.dummy_extractor,)
SCREAMING_SNAKE_CASE_ : Optional[Any] = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor,do_normalize=_A )
SCREAMING_SNAKE_CASE_ : str = alt_pipe.to(_A )
alt_pipe.set_progress_bar_config(disable=_A )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = "A painting of a squirrel eating a burger"
SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.Generator(device=_A ).manual_seed(0 )
SCREAMING_SNAKE_CASE_ : Optional[Any] = alt_pipe(
[prompt],generator=_A,guidance_scale=6.0,num_inference_steps=2,output_type="np",image=_A,)
SCREAMING_SNAKE_CASE_ : List[str] = output.images
SCREAMING_SNAKE_CASE_ : int = torch.Generator(device=_A ).manual_seed(0 )
SCREAMING_SNAKE_CASE_ : str = alt_pipe(
[prompt],generator=_A,guidance_scale=6.0,num_inference_steps=2,output_type="np",image=_A,return_dict=_A,)[0]
SCREAMING_SNAKE_CASE_ : List[str] = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE_ : Dict = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
SCREAMING_SNAKE_CASE_ : Dict = np.array([0.4427, 0.3731, 0.4249, 0.4941, 0.4546, 0.4148, 0.4193, 0.4666, 0.4499] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-3
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 5E-3
@unittest.skipIf(torch_device != "cuda","This test requires a GPU" )
def __UpperCamelCase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.dummy_cond_unet
SCREAMING_SNAKE_CASE_ : Optional[int] = PNDMScheduler(skip_prk_steps=_A )
SCREAMING_SNAKE_CASE_ : List[Any] = self.dummy_vae
SCREAMING_SNAKE_CASE_ : str = self.dummy_text_encoder
SCREAMING_SNAKE_CASE_ : str = XLMRobertaTokenizer.from_pretrained("hf-internal-testing/tiny-xlm-roberta" )
SCREAMING_SNAKE_CASE_ : Tuple = 77
SCREAMING_SNAKE_CASE_ : Tuple = self.dummy_image.to(_A )
# put models in fp16
SCREAMING_SNAKE_CASE_ : Dict = unet.half()
SCREAMING_SNAKE_CASE_ : str = vae.half()
SCREAMING_SNAKE_CASE_ : int = bert.half()
# make sure here that pndm scheduler skips prk
SCREAMING_SNAKE_CASE_ : int = AltDiffusionImgaImgPipeline(
unet=_A,scheduler=_A,vae=_A,text_encoder=_A,tokenizer=_A,safety_checker=_A,feature_extractor=self.dummy_extractor,)
SCREAMING_SNAKE_CASE_ : List[str] = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor,do_normalize=_A )
SCREAMING_SNAKE_CASE_ : Any = alt_pipe.to(_A )
alt_pipe.set_progress_bar_config(disable=_A )
SCREAMING_SNAKE_CASE_ : Any = "A painting of a squirrel eating a burger"
SCREAMING_SNAKE_CASE_ : Tuple = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ : int = alt_pipe(
[prompt],generator=_A,num_inference_steps=2,output_type="np",image=_A,).images
assert image.shape == (1, 32, 32, 3)
@unittest.skipIf(torch_device != "cuda","This test requires a GPU" )
def __UpperCamelCase ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
# resize to resolution that is divisible by 8 but not 16 or 32
SCREAMING_SNAKE_CASE_ : int = init_image.resize((760, 504) )
SCREAMING_SNAKE_CASE_ : Tuple = "BAAI/AltDiffusion"
SCREAMING_SNAKE_CASE_ : Dict = AltDiffusionImgaImgPipeline.from_pretrained(
_A,safety_checker=_A,)
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
pipe.enable_attention_slicing()
SCREAMING_SNAKE_CASE_ : int = "A fantasy landscape, trending on artstation"
SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ : str = pipe(
prompt=_A,image=_A,strength=0.75,guidance_scale=7.5,generator=_A,output_type="np",)
SCREAMING_SNAKE_CASE_ : List[str] = output.images[0]
SCREAMING_SNAKE_CASE_ : List[Any] = image[255:258, 383:386, -1]
assert image.shape == (504, 760, 3)
SCREAMING_SNAKE_CASE_ : Optional[int] = np.array([0.9358, 0.9397, 0.9599, 0.9901, 1.0000, 1.0000, 0.9882, 1.0000, 1.0000] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class a__ ( unittest.TestCase ):
def __UpperCamelCase ( self : Any ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
SCREAMING_SNAKE_CASE_ : Optional[int] = init_image.resize((768, 512) )
SCREAMING_SNAKE_CASE_ : int = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy" )
SCREAMING_SNAKE_CASE_ : List[Any] = "BAAI/AltDiffusion"
SCREAMING_SNAKE_CASE_ : List[Any] = AltDiffusionImgaImgPipeline.from_pretrained(
_A,safety_checker=_A,)
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
pipe.enable_attention_slicing()
SCREAMING_SNAKE_CASE_ : Tuple = "A fantasy landscape, trending on artstation"
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ : Any = pipe(
prompt=_A,image=_A,strength=0.75,guidance_scale=7.5,generator=_A,output_type="np",)
SCREAMING_SNAKE_CASE_ : List[Any] = output.images[0]
assert image.shape == (512, 768, 3)
# img2img is flaky across GPUs even in fp32, so using MAE here
assert np.abs(expected_image - image ).max() < 1E-2
| 18 | from functools import lru_cache
@lru_cache
def _snake_case ( lowerCAmelCase : int ):
"""simple docstring"""
if num < 0:
raise ValueError("Number should not be negative." )
return 1 if num in (0, 1) else num * factorial(num - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 18 | 1 |
def _snake_case ( lowerCAmelCase : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = int(lowerCAmelCase )
if decimal in (0, 1): # Exit cases for the recursion
return str(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = divmod(lowerCAmelCase , 2 )
return binary_recursive(lowerCAmelCase ) + str(lowerCAmelCase )
def _snake_case ( lowerCAmelCase : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = str(lowerCAmelCase ).strip()
if not number:
raise ValueError("No input value was provided" )
SCREAMING_SNAKE_CASE_ : List[str] = "-" if number.startswith("-" ) else ""
SCREAMING_SNAKE_CASE_ : Optional[Any] = number.lstrip("-" )
if not number.isnumeric():
raise ValueError("Input value is not an integer" )
return f'{negative}0b{binary_recursive(int(lowerCAmelCase ) )}'
if __name__ == "__main__":
from doctest import testmod
testmod()
| 18 | from collections import defaultdict
def _snake_case ( lowerCAmelCase : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = 1
SCREAMING_SNAKE_CASE_ : Tuple = True
for v in tree[start]:
if v not in visited:
ret += dfs(lowerCAmelCase )
if ret % 2 == 0:
cuts.append(lowerCAmelCase )
return ret
def _snake_case ( ):
"""simple docstring"""
dfs(1 )
if __name__ == "__main__":
__lowerCamelCase , __lowerCamelCase : Union[str, Any] = 10, 9
__lowerCamelCase : Optional[int] = defaultdict(list)
__lowerCamelCase : dict[int, bool] = {}
__lowerCamelCase : list[int] = []
__lowerCamelCase : Optional[Any] = 0
__lowerCamelCase : Any = [(2, 1), (3, 1), (4, 3), (5, 2), (6, 1), (7, 2), (8, 6), (9, 8), (10, 8)]
for u, v in edges:
tree[u].append(v)
tree[v].append(u)
even_tree()
print(len(cuts) - 1)
| 18 | 1 |
import unittest
from transformers import SqueezeBertConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
)
class a__ ( A__ ):
def __init__( self : Tuple,_A : Optional[int],_A : Any=13,_A : List[str]=7,_A : int=True,_A : Dict=True,_A : Dict=False,_A : List[Any]=True,_A : Any=99,_A : Optional[int]=32,_A : Any=5,_A : List[Any]=4,_A : Dict=64,_A : Optional[Any]="gelu",_A : Tuple=0.1,_A : Any=0.1,_A : List[Any]=512,_A : Dict=16,_A : Optional[Any]=2,_A : Union[str, Any]=0.02,_A : List[str]=3,_A : Optional[Any]=4,_A : Union[str, Any]=None,_A : Tuple=2,_A : List[str]=2,_A : str=2,_A : Dict=2,_A : Optional[Any]=4,_A : Union[str, Any]=1,):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = parent
SCREAMING_SNAKE_CASE_ : Optional[int] = batch_size
SCREAMING_SNAKE_CASE_ : Dict = seq_length
SCREAMING_SNAKE_CASE_ : Dict = is_training
SCREAMING_SNAKE_CASE_ : Optional[int] = use_input_mask
SCREAMING_SNAKE_CASE_ : int = use_token_type_ids
SCREAMING_SNAKE_CASE_ : Optional[int] = use_labels
SCREAMING_SNAKE_CASE_ : Tuple = vocab_size
SCREAMING_SNAKE_CASE_ : Any = hidden_size
SCREAMING_SNAKE_CASE_ : Union[str, Any] = num_hidden_layers
SCREAMING_SNAKE_CASE_ : Tuple = num_attention_heads
SCREAMING_SNAKE_CASE_ : List[Any] = intermediate_size
SCREAMING_SNAKE_CASE_ : List[str] = hidden_act
SCREAMING_SNAKE_CASE_ : List[str] = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ : Optional[int] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : str = max_position_embeddings
SCREAMING_SNAKE_CASE_ : str = type_vocab_size
SCREAMING_SNAKE_CASE_ : List[str] = type_sequence_label_size
SCREAMING_SNAKE_CASE_ : Optional[Any] = initializer_range
SCREAMING_SNAKE_CASE_ : Tuple = num_labels
SCREAMING_SNAKE_CASE_ : List[Any] = num_choices
SCREAMING_SNAKE_CASE_ : Dict = scope
SCREAMING_SNAKE_CASE_ : int = q_groups
SCREAMING_SNAKE_CASE_ : Tuple = k_groups
SCREAMING_SNAKE_CASE_ : List[Any] = v_groups
SCREAMING_SNAKE_CASE_ : Tuple = post_attention_groups
SCREAMING_SNAKE_CASE_ : int = intermediate_groups
SCREAMING_SNAKE_CASE_ : List[Any] = output_groups
def __UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length],self.vocab_size )
SCREAMING_SNAKE_CASE_ : List[Any] = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE_ : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE_ : Optional[Any] = None
SCREAMING_SNAKE_CASE_ : Any = None
SCREAMING_SNAKE_CASE_ : str = None
if self.use_labels:
SCREAMING_SNAKE_CASE_ : str = ids_tensor([self.batch_size],self.type_sequence_label_size )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length],self.num_labels )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ids_tensor([self.batch_size],self.num_choices )
SCREAMING_SNAKE_CASE_ : Optional[int] = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def __UpperCamelCase ( self : str ):
"""simple docstring"""
return SqueezeBertConfig(
embedding_size=self.hidden_size,vocab_size=self.vocab_size,hidden_size=self.hidden_size,num_hidden_layers=self.num_hidden_layers,num_attention_heads=self.num_attention_heads,intermediate_size=self.intermediate_size,hidden_act=self.hidden_act,attention_probs_dropout_prob=self.hidden_dropout_prob,attention_dropout=self.attention_probs_dropout_prob,max_position_embeddings=self.max_position_embeddings,initializer_range=self.initializer_range,q_groups=self.q_groups,k_groups=self.k_groups,v_groups=self.v_groups,post_attention_groups=self.post_attention_groups,intermediate_groups=self.intermediate_groups,output_groups=self.output_groups,)
def __UpperCamelCase ( self : Tuple,_A : Union[str, Any],_A : Union[str, Any],_A : int,_A : Optional[int],_A : Union[str, Any],_A : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = SqueezeBertModel(config=_A )
model.to(_A )
model.eval()
SCREAMING_SNAKE_CASE_ : Any = model(_A,_A )
SCREAMING_SNAKE_CASE_ : List[str] = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape,(self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCamelCase ( self : Dict,_A : Any,_A : Tuple,_A : str,_A : Any,_A : Union[str, Any],_A : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = SqueezeBertForMaskedLM(config=_A )
model.to(_A )
model.eval()
SCREAMING_SNAKE_CASE_ : List[str] = model(_A,attention_mask=_A,labels=_A )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCamelCase ( self : Optional[int],_A : Union[str, Any],_A : Union[str, Any],_A : Any,_A : Tuple,_A : int,_A : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = SqueezeBertForQuestionAnswering(config=_A )
model.to(_A )
model.eval()
SCREAMING_SNAKE_CASE_ : Dict = model(
_A,attention_mask=_A,start_positions=_A,end_positions=_A )
self.parent.assertEqual(result.start_logits.shape,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape,(self.batch_size, self.seq_length) )
def __UpperCamelCase ( self : List[Any],_A : List[str],_A : Tuple,_A : List[Any],_A : List[str],_A : List[str],_A : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = self.num_labels
SCREAMING_SNAKE_CASE_ : List[str] = SqueezeBertForSequenceClassification(_A )
model.to(_A )
model.eval()
SCREAMING_SNAKE_CASE_ : Dict = model(_A,attention_mask=_A,labels=_A )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.num_labels) )
def __UpperCamelCase ( self : str,_A : Optional[int],_A : str,_A : List[Any],_A : List[str],_A : str,_A : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.num_labels
SCREAMING_SNAKE_CASE_ : Optional[int] = SqueezeBertForTokenClassification(config=_A )
model.to(_A )
model.eval()
SCREAMING_SNAKE_CASE_ : Optional[int] = model(_A,attention_mask=_A,labels=_A )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.seq_length, self.num_labels) )
def __UpperCamelCase ( self : List[Any],_A : Tuple,_A : str,_A : Optional[Any],_A : int,_A : str,_A : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = self.num_choices
SCREAMING_SNAKE_CASE_ : Union[str, Any] = SqueezeBertForMultipleChoice(config=_A )
model.to(_A )
model.eval()
SCREAMING_SNAKE_CASE_ : Dict = input_ids.unsqueeze(1 ).expand(-1,self.num_choices,-1 ).contiguous()
SCREAMING_SNAKE_CASE_ : str = input_mask.unsqueeze(1 ).expand(-1,self.num_choices,-1 ).contiguous()
SCREAMING_SNAKE_CASE_ : Optional[int] = model(
_A,attention_mask=_A,labels=_A,)
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.num_choices) )
def __UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.prepare_config_and_inputs()
((SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_)) : Dict = config_and_inputs
SCREAMING_SNAKE_CASE_ : Dict = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class a__ ( A__ , A__ , unittest.TestCase ):
A = (
(
SqueezeBertModel,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
)
if is_torch_available()
else None
)
A = (
{
'feature-extraction': SqueezeBertModel,
'fill-mask': SqueezeBertForMaskedLM,
'question-answering': SqueezeBertForQuestionAnswering,
'text-classification': SqueezeBertForSequenceClassification,
'token-classification': SqueezeBertForTokenClassification,
'zero-shot': SqueezeBertForSequenceClassification,
}
if is_torch_available()
else {}
)
A = False
A = True
A = False
def __UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = SqueezeBertModelTester(self )
SCREAMING_SNAKE_CASE_ : List[str] = ConfigTester(self,config_class=_A,dim=37 )
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_model(*_A )
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_masked_lm(*_A )
def __UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_question_answering(*_A )
def __UpperCamelCase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_sequence_classification(*_A )
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_token_classification(*_A )
def __UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_multiple_choice(*_A )
@slow
def __UpperCamelCase ( self : Any ):
"""simple docstring"""
for model_name in SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE_ : Tuple = SqueezeBertModel.from_pretrained(_A )
self.assertIsNotNone(_A )
@require_sentencepiece
@require_tokenizers
@require_torch
class a__ ( unittest.TestCase ):
@slow
def __UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = SqueezeBertForSequenceClassification.from_pretrained("squeezebert/squeezebert-mnli" )
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.tensor([[1, 2_9414, 232, 328, 740, 1140, 1_2695, 69, 13, 1588, 2]] )
SCREAMING_SNAKE_CASE_ : List[Any] = model(_A )[0]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.Size((1, 3) )
self.assertEqual(output.shape,_A )
SCREAMING_SNAKE_CASE_ : int = torch.tensor([[0.6401, -0.0349, -0.6041]] )
self.assertTrue(torch.allclose(_A,_A,atol=1E-4 ) )
| 18 | # Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def _snake_case ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = ArgumentParser("Accelerate CLI tool" , usage="accelerate <command> [<args>]" , allow_abbrev=lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[int] = parser.add_subparsers(help="accelerate command helpers" )
# Register commands
get_config_parser(subparsers=lowerCAmelCase )
env_command_parser(subparsers=lowerCAmelCase )
launch_command_parser(subparsers=lowerCAmelCase )
tpu_command_parser(subparsers=lowerCAmelCase )
test_command_parser(subparsers=lowerCAmelCase )
# Let's go
SCREAMING_SNAKE_CASE_ : Dict = parser.parse_args()
if not hasattr(lowerCAmelCase , "func" ):
parser.print_help()
exit(1 )
# Run
args.func(lowerCAmelCase )
if __name__ == "__main__":
main()
| 18 | 1 |
from diffusers.utils.testing_utils import require_onnxruntime
@require_onnxruntime
class a__ :
pass
| 18 | import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from tokenizers.pre_tokenizers import BertPreTokenizer, PreTokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roformer import RoFormerTokenizer
from .tokenization_utils import JiebaPreTokenizer
__lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
__lowerCamelCase : Dict = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
__lowerCamelCase : int = {
'''vocab_file''': {
'''junnyu/roformer_chinese_small''': '''https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/vocab.txt''',
'''junnyu/roformer_chinese_base''': '''https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/vocab.txt''',
'''junnyu/roformer_chinese_char_small''': (
'''https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/vocab.txt'''
),
'''junnyu/roformer_chinese_char_base''': (
'''https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/vocab.txt'''
),
'''junnyu/roformer_small_discriminator''': (
'''https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/vocab.txt'''
),
'''junnyu/roformer_small_generator''': (
'''https://huggingface.co/junnyu/roformer_small_generator/resolve/main/vocab.txt'''
),
}
}
__lowerCamelCase : Any = {
'''junnyu/roformer_chinese_small''': 15_36,
'''junnyu/roformer_chinese_base''': 15_36,
'''junnyu/roformer_chinese_char_small''': 5_12,
'''junnyu/roformer_chinese_char_base''': 5_12,
'''junnyu/roformer_small_discriminator''': 1_28,
'''junnyu/roformer_small_generator''': 1_28,
}
__lowerCamelCase : Union[str, Any] = {
'''junnyu/roformer_chinese_small''': {'''do_lower_case''': True},
'''junnyu/roformer_chinese_base''': {'''do_lower_case''': True},
'''junnyu/roformer_chinese_char_small''': {'''do_lower_case''': True},
'''junnyu/roformer_chinese_char_base''': {'''do_lower_case''': True},
'''junnyu/roformer_small_discriminator''': {'''do_lower_case''': True},
'''junnyu/roformer_small_generator''': {'''do_lower_case''': True},
}
class a__ ( A__ ):
A = VOCAB_FILES_NAMES
A = PRETRAINED_VOCAB_FILES_MAP
A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A = PRETRAINED_INIT_CONFIGURATION
A = RoFormerTokenizer
def __init__( self : List[str],_A : int=None,_A : int=None,_A : int=True,_A : List[Any]="[UNK]",_A : Tuple="[SEP]",_A : List[Any]="[PAD]",_A : Optional[int]="[CLS]",_A : Optional[Any]="[MASK]",_A : Optional[int]=True,_A : List[str]=None,**_A : List[Any],):
"""simple docstring"""
super().__init__(
_A,tokenizer_file=_A,do_lower_case=_A,unk_token=_A,sep_token=_A,pad_token=_A,cls_token=_A,mask_token=_A,tokenize_chinese_chars=_A,strip_accents=_A,**_A,)
SCREAMING_SNAKE_CASE_ : Tuple = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
pre_tok_state.get("lowercase",_A ) != do_lower_case
or pre_tok_state.get("strip_accents",_A ) != strip_accents
):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = getattr(_A,pre_tok_state.pop("type" ) )
SCREAMING_SNAKE_CASE_ : Any = do_lower_case
SCREAMING_SNAKE_CASE_ : List[str] = strip_accents
SCREAMING_SNAKE_CASE_ : str = pre_tok_class(**_A )
SCREAMING_SNAKE_CASE_ : List[str] = do_lower_case
def __getstate__( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = self.__dict__.copy()
SCREAMING_SNAKE_CASE_ : Optional[Any] = BertPreTokenizer()
return state
def __setstate__( self : List[Any],_A : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = d
SCREAMING_SNAKE_CASE_ : List[str] = self.__dict__["_tokenizer"].get_vocab()
SCREAMING_SNAKE_CASE_ : Any = PreTokenizer.custom(JiebaPreTokenizer(_A ) )
def __UpperCamelCase ( self : Union[str, Any],_A : List[Any],_A : str=None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __UpperCamelCase ( self : str,_A : List[int],_A : Optional[List[int]] = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = [self.sep_token_id]
SCREAMING_SNAKE_CASE_ : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __UpperCamelCase ( self : int,_A : str,_A : Optional[str] = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self._tokenizer.model.save(_A,name=_A )
return tuple(_A )
def __UpperCamelCase ( self : int,_A : Optional[int],_A : List[Any]=None,_A : Tuple=None,_A : str=False,**_A : List[Any],):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = BertPreTokenizer()
return super().save_pretrained(_A,_A,_A,_A,**_A )
| 18 | 1 |
from collections import Counter
from pathlib import Path
from typing import Optional, Tuple
import yaml
class a__ ( yaml.SafeLoader ):
def __UpperCamelCase ( self : str,_A : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = [self.constructed_objects[key_node] for key_node, _ in node.value]
SCREAMING_SNAKE_CASE_ : List[str] = [tuple(_A ) if isinstance(_A,_A ) else key for key in keys]
SCREAMING_SNAKE_CASE_ : Optional[int] = Counter(_A )
SCREAMING_SNAKE_CASE_ : Tuple = [key for key in counter if counter[key] > 1]
if duplicate_keys:
raise TypeError(F'Got duplicate yaml keys: {duplicate_keys}' )
def __UpperCamelCase ( self : Tuple,_A : Dict,_A : List[Any]=False ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = super().construct_mapping(_A,deep=_A )
self._check_no_duplicates_on_constructed_node(_A )
return mapping
def _snake_case ( lowerCAmelCase : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = list(readme_content.splitlines() )
if full_content and full_content[0] == "---" and "---" in full_content[1:]:
SCREAMING_SNAKE_CASE_ : List[Any] = full_content[1:].index("---" ) + 1
SCREAMING_SNAKE_CASE_ : int = "\n".join(full_content[1:sep_idx] )
return yamlblock, "\n".join(full_content[sep_idx + 1 :] )
return None, "\n".join(lowerCAmelCase )
class a__ ( A__ ):
# class attributes
A = {'train_eval_index'} # train-eval-index in the YAML metadata
@classmethod
def __UpperCamelCase ( cls : Any,_A : Path ):
"""simple docstring"""
with open(_A,encoding="utf-8" ) as readme_file:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = _split_yaml_from_readme(readme_file.read() )
if yaml_string is not None:
return cls.from_yaml_string(_A )
else:
return cls()
def __UpperCamelCase ( self : Dict,_A : Path ):
"""simple docstring"""
if path.exists():
with open(_A,encoding="utf-8" ) as readme_file:
SCREAMING_SNAKE_CASE_ : int = readme_file.read()
else:
SCREAMING_SNAKE_CASE_ : Any = None
SCREAMING_SNAKE_CASE_ : int = self._to_readme(_A )
with open(_A,"w",encoding="utf-8" ) as readme_file:
readme_file.write(_A )
def __UpperCamelCase ( self : Optional[int],_A : Optional[str] = None ):
"""simple docstring"""
if readme_content is not None:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[Any] = _split_yaml_from_readme(_A )
SCREAMING_SNAKE_CASE_ : Tuple = "---\n" + self.to_yaml_string() + "---\n" + content
else:
SCREAMING_SNAKE_CASE_ : Dict = "---\n" + self.to_yaml_string() + "---\n"
return full_content
@classmethod
def __UpperCamelCase ( cls : Dict,_A : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = yaml.load(_A,Loader=_NoDuplicateSafeLoader ) or {}
# Convert the YAML keys to DatasetMetadata fields
SCREAMING_SNAKE_CASE_ : Any = {
(key.replace("-","_" ) if key.replace("-","_" ) in cls._FIELDS_WITH_DASHES else key): value
for key, value in metadata_dict.items()
}
return cls(**_A )
def __UpperCamelCase ( self : Dict ):
"""simple docstring"""
return yaml.safe_dump(
{
(key.replace("_","-" ) if key in self._FIELDS_WITH_DASHES else key): value
for key, value in self.items()
},sort_keys=_A,allow_unicode=_A,encoding="utf-8",).decode("utf-8" )
__lowerCamelCase : List[Any] = {
'''image-classification''': [],
'''translation''': [],
'''image-segmentation''': [],
'''fill-mask''': [],
'''automatic-speech-recognition''': [],
'''token-classification''': [],
'''sentence-similarity''': [],
'''audio-classification''': [],
'''question-answering''': [],
'''summarization''': [],
'''zero-shot-classification''': [],
'''table-to-text''': [],
'''feature-extraction''': [],
'''other''': [],
'''multiple-choice''': [],
'''text-classification''': [],
'''text-to-image''': [],
'''text2text-generation''': [],
'''zero-shot-image-classification''': [],
'''tabular-classification''': [],
'''tabular-regression''': [],
'''image-to-image''': [],
'''tabular-to-text''': [],
'''unconditional-image-generation''': [],
'''text-retrieval''': [],
'''text-to-speech''': [],
'''object-detection''': [],
'''audio-to-audio''': [],
'''text-generation''': [],
'''conversational''': [],
'''table-question-answering''': [],
'''visual-question-answering''': [],
'''image-to-text''': [],
'''reinforcement-learning''': [],
'''voice-activity-detection''': [],
'''time-series-forecasting''': [],
'''document-question-answering''': [],
}
if __name__ == "__main__":
from argparse import ArgumentParser
__lowerCamelCase : List[Any] = ArgumentParser(usage='''Validate the yaml metadata block of a README.md file.''')
ap.add_argument('''readme_filepath''')
__lowerCamelCase : Dict = ap.parse_args()
__lowerCamelCase : List[Any] = Path(args.readme_filepath)
__lowerCamelCase : Optional[int] = DatasetMetadata.from_readme(readme_filepath)
print(dataset_metadata)
dataset_metadata.to_readme(readme_filepath)
| 18 | import unittest
from transformers import SqueezeBertConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
)
class a__ ( A__ ):
def __init__( self : Tuple,_A : Optional[int],_A : Any=13,_A : List[str]=7,_A : int=True,_A : Dict=True,_A : Dict=False,_A : List[Any]=True,_A : Any=99,_A : Optional[int]=32,_A : Any=5,_A : List[Any]=4,_A : Dict=64,_A : Optional[Any]="gelu",_A : Tuple=0.1,_A : Any=0.1,_A : List[Any]=512,_A : Dict=16,_A : Optional[Any]=2,_A : Union[str, Any]=0.02,_A : List[str]=3,_A : Optional[Any]=4,_A : Union[str, Any]=None,_A : Tuple=2,_A : List[str]=2,_A : str=2,_A : Dict=2,_A : Optional[Any]=4,_A : Union[str, Any]=1,):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = parent
SCREAMING_SNAKE_CASE_ : Optional[int] = batch_size
SCREAMING_SNAKE_CASE_ : Dict = seq_length
SCREAMING_SNAKE_CASE_ : Dict = is_training
SCREAMING_SNAKE_CASE_ : Optional[int] = use_input_mask
SCREAMING_SNAKE_CASE_ : int = use_token_type_ids
SCREAMING_SNAKE_CASE_ : Optional[int] = use_labels
SCREAMING_SNAKE_CASE_ : Tuple = vocab_size
SCREAMING_SNAKE_CASE_ : Any = hidden_size
SCREAMING_SNAKE_CASE_ : Union[str, Any] = num_hidden_layers
SCREAMING_SNAKE_CASE_ : Tuple = num_attention_heads
SCREAMING_SNAKE_CASE_ : List[Any] = intermediate_size
SCREAMING_SNAKE_CASE_ : List[str] = hidden_act
SCREAMING_SNAKE_CASE_ : List[str] = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ : Optional[int] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : str = max_position_embeddings
SCREAMING_SNAKE_CASE_ : str = type_vocab_size
SCREAMING_SNAKE_CASE_ : List[str] = type_sequence_label_size
SCREAMING_SNAKE_CASE_ : Optional[Any] = initializer_range
SCREAMING_SNAKE_CASE_ : Tuple = num_labels
SCREAMING_SNAKE_CASE_ : List[Any] = num_choices
SCREAMING_SNAKE_CASE_ : Dict = scope
SCREAMING_SNAKE_CASE_ : int = q_groups
SCREAMING_SNAKE_CASE_ : Tuple = k_groups
SCREAMING_SNAKE_CASE_ : List[Any] = v_groups
SCREAMING_SNAKE_CASE_ : Tuple = post_attention_groups
SCREAMING_SNAKE_CASE_ : int = intermediate_groups
SCREAMING_SNAKE_CASE_ : List[Any] = output_groups
def __UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length],self.vocab_size )
SCREAMING_SNAKE_CASE_ : List[Any] = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE_ : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE_ : Optional[Any] = None
SCREAMING_SNAKE_CASE_ : Any = None
SCREAMING_SNAKE_CASE_ : str = None
if self.use_labels:
SCREAMING_SNAKE_CASE_ : str = ids_tensor([self.batch_size],self.type_sequence_label_size )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length],self.num_labels )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ids_tensor([self.batch_size],self.num_choices )
SCREAMING_SNAKE_CASE_ : Optional[int] = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def __UpperCamelCase ( self : str ):
"""simple docstring"""
return SqueezeBertConfig(
embedding_size=self.hidden_size,vocab_size=self.vocab_size,hidden_size=self.hidden_size,num_hidden_layers=self.num_hidden_layers,num_attention_heads=self.num_attention_heads,intermediate_size=self.intermediate_size,hidden_act=self.hidden_act,attention_probs_dropout_prob=self.hidden_dropout_prob,attention_dropout=self.attention_probs_dropout_prob,max_position_embeddings=self.max_position_embeddings,initializer_range=self.initializer_range,q_groups=self.q_groups,k_groups=self.k_groups,v_groups=self.v_groups,post_attention_groups=self.post_attention_groups,intermediate_groups=self.intermediate_groups,output_groups=self.output_groups,)
def __UpperCamelCase ( self : Tuple,_A : Union[str, Any],_A : Union[str, Any],_A : int,_A : Optional[int],_A : Union[str, Any],_A : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = SqueezeBertModel(config=_A )
model.to(_A )
model.eval()
SCREAMING_SNAKE_CASE_ : Any = model(_A,_A )
SCREAMING_SNAKE_CASE_ : List[str] = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape,(self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCamelCase ( self : Dict,_A : Any,_A : Tuple,_A : str,_A : Any,_A : Union[str, Any],_A : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = SqueezeBertForMaskedLM(config=_A )
model.to(_A )
model.eval()
SCREAMING_SNAKE_CASE_ : List[str] = model(_A,attention_mask=_A,labels=_A )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCamelCase ( self : Optional[int],_A : Union[str, Any],_A : Union[str, Any],_A : Any,_A : Tuple,_A : int,_A : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = SqueezeBertForQuestionAnswering(config=_A )
model.to(_A )
model.eval()
SCREAMING_SNAKE_CASE_ : Dict = model(
_A,attention_mask=_A,start_positions=_A,end_positions=_A )
self.parent.assertEqual(result.start_logits.shape,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape,(self.batch_size, self.seq_length) )
def __UpperCamelCase ( self : List[Any],_A : List[str],_A : Tuple,_A : List[Any],_A : List[str],_A : List[str],_A : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = self.num_labels
SCREAMING_SNAKE_CASE_ : List[str] = SqueezeBertForSequenceClassification(_A )
model.to(_A )
model.eval()
SCREAMING_SNAKE_CASE_ : Dict = model(_A,attention_mask=_A,labels=_A )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.num_labels) )
def __UpperCamelCase ( self : str,_A : Optional[int],_A : str,_A : List[Any],_A : List[str],_A : str,_A : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.num_labels
SCREAMING_SNAKE_CASE_ : Optional[int] = SqueezeBertForTokenClassification(config=_A )
model.to(_A )
model.eval()
SCREAMING_SNAKE_CASE_ : Optional[int] = model(_A,attention_mask=_A,labels=_A )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.seq_length, self.num_labels) )
def __UpperCamelCase ( self : List[Any],_A : Tuple,_A : str,_A : Optional[Any],_A : int,_A : str,_A : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = self.num_choices
SCREAMING_SNAKE_CASE_ : Union[str, Any] = SqueezeBertForMultipleChoice(config=_A )
model.to(_A )
model.eval()
SCREAMING_SNAKE_CASE_ : Dict = input_ids.unsqueeze(1 ).expand(-1,self.num_choices,-1 ).contiguous()
SCREAMING_SNAKE_CASE_ : str = input_mask.unsqueeze(1 ).expand(-1,self.num_choices,-1 ).contiguous()
SCREAMING_SNAKE_CASE_ : Optional[int] = model(
_A,attention_mask=_A,labels=_A,)
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.num_choices) )
def __UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.prepare_config_and_inputs()
((SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_)) : Dict = config_and_inputs
SCREAMING_SNAKE_CASE_ : Dict = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class a__ ( A__ , A__ , unittest.TestCase ):
A = (
(
SqueezeBertModel,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
)
if is_torch_available()
else None
)
A = (
{
'feature-extraction': SqueezeBertModel,
'fill-mask': SqueezeBertForMaskedLM,
'question-answering': SqueezeBertForQuestionAnswering,
'text-classification': SqueezeBertForSequenceClassification,
'token-classification': SqueezeBertForTokenClassification,
'zero-shot': SqueezeBertForSequenceClassification,
}
if is_torch_available()
else {}
)
A = False
A = True
A = False
def __UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = SqueezeBertModelTester(self )
SCREAMING_SNAKE_CASE_ : List[str] = ConfigTester(self,config_class=_A,dim=37 )
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_model(*_A )
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_masked_lm(*_A )
def __UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_question_answering(*_A )
def __UpperCamelCase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_sequence_classification(*_A )
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_token_classification(*_A )
def __UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_multiple_choice(*_A )
@slow
def __UpperCamelCase ( self : Any ):
"""simple docstring"""
for model_name in SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE_ : Tuple = SqueezeBertModel.from_pretrained(_A )
self.assertIsNotNone(_A )
@require_sentencepiece
@require_tokenizers
@require_torch
class a__ ( unittest.TestCase ):
@slow
def __UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = SqueezeBertForSequenceClassification.from_pretrained("squeezebert/squeezebert-mnli" )
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.tensor([[1, 2_9414, 232, 328, 740, 1140, 1_2695, 69, 13, 1588, 2]] )
SCREAMING_SNAKE_CASE_ : List[Any] = model(_A )[0]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.Size((1, 3) )
self.assertEqual(output.shape,_A )
SCREAMING_SNAKE_CASE_ : int = torch.tensor([[0.6401, -0.0349, -0.6041]] )
self.assertTrue(torch.allclose(_A,_A,atol=1E-4 ) )
| 18 | 1 |
from __future__ import annotations
import typing
from collections.abc import Iterable
import numpy as np
__lowerCamelCase : List[Any] = typing.Union[Iterable[float], Iterable[int], np.ndarray] # noqa: UP007
__lowerCamelCase : List[Any] = typing.Union[np.floataa, int, float] # noqa: UP007
def _snake_case ( lowerCAmelCase : Vector , lowerCAmelCase : Vector ):
"""simple docstring"""
return np.sqrt(np.sum((np.asarray(lowerCAmelCase ) - np.asarray(lowerCAmelCase )) ** 2 ) )
def _snake_case ( lowerCAmelCase : Vector , lowerCAmelCase : Vector ):
"""simple docstring"""
return sum((va - va) ** 2 for va, va in zip(lowerCAmelCase , lowerCAmelCase ) ) ** (1 / 2)
if __name__ == "__main__":
def _snake_case ( ):
"""simple docstring"""
from timeit import timeit
print("Without Numpy" )
print(
timeit(
"euclidean_distance_no_np([1, 2, 3], [4, 5, 6])" , number=1_0_0_0_0 , globals=globals() , ) )
print("With Numpy" )
print(
timeit(
"euclidean_distance([1, 2, 3], [4, 5, 6])" , number=1_0_0_0_0 , globals=globals() , ) )
benchmark()
| 18 | import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.text import TextDatasetReader
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def _snake_case ( lowerCAmelCase : str , lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
assert isinstance(lowerCAmelCase , lowerCAmelCase )
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def _snake_case ( lowerCAmelCase : Tuple , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tmp_path / "cache"
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {"text": "string"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
SCREAMING_SNAKE_CASE_ : List[str] = TextDatasetReader(lowerCAmelCase , cache_dir=lowerCAmelCase , keep_in_memory=lowerCAmelCase ).read()
_check_text_dataset(lowerCAmelCase , lowerCAmelCase )
@pytest.mark.parametrize(
"features" , [
None,
{"text": "string"},
{"text": "int32"},
{"text": "float32"},
] , )
def _snake_case ( lowerCAmelCase : Tuple , lowerCAmelCase : Dict , lowerCAmelCase : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = tmp_path / "cache"
SCREAMING_SNAKE_CASE_ : Optional[Any] = {"text": "string"}
SCREAMING_SNAKE_CASE_ : Any = features.copy() if features else default_expected_features
SCREAMING_SNAKE_CASE_ : List[str] = (
Features({feature: Value(lowerCAmelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
SCREAMING_SNAKE_CASE_ : Optional[Any] = TextDatasetReader(lowerCAmelCase , features=lowerCAmelCase , cache_dir=lowerCAmelCase ).read()
_check_text_dataset(lowerCAmelCase , lowerCAmelCase )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def _snake_case ( lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tmp_path / "cache"
SCREAMING_SNAKE_CASE_ : List[str] = {"text": "string"}
SCREAMING_SNAKE_CASE_ : List[str] = TextDatasetReader(lowerCAmelCase , cache_dir=lowerCAmelCase , split=lowerCAmelCase ).read()
_check_text_dataset(lowerCAmelCase , lowerCAmelCase )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("path_type" , [str, list] )
def _snake_case ( lowerCAmelCase : int , lowerCAmelCase : int , lowerCAmelCase : Optional[int] ):
"""simple docstring"""
if issubclass(lowerCAmelCase , lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = text_path
elif issubclass(lowerCAmelCase , lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [text_path]
SCREAMING_SNAKE_CASE_ : int = tmp_path / "cache"
SCREAMING_SNAKE_CASE_ : Optional[int] = {"text": "string"}
SCREAMING_SNAKE_CASE_ : List[str] = TextDatasetReader(lowerCAmelCase , cache_dir=lowerCAmelCase ).read()
_check_text_dataset(lowerCAmelCase , lowerCAmelCase )
def _snake_case ( lowerCAmelCase : Dict , lowerCAmelCase : Dict , lowerCAmelCase : List[str]=("train",) ):
"""simple docstring"""
assert isinstance(lowerCAmelCase , lowerCAmelCase )
for split in splits:
SCREAMING_SNAKE_CASE_ : int = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def _snake_case ( lowerCAmelCase : List[str] , lowerCAmelCase : str , lowerCAmelCase : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = tmp_path / "cache"
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {"text": "string"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
SCREAMING_SNAKE_CASE_ : List[Any] = TextDatasetReader({"train": text_path} , cache_dir=lowerCAmelCase , keep_in_memory=lowerCAmelCase ).read()
_check_text_datasetdict(lowerCAmelCase , lowerCAmelCase )
@pytest.mark.parametrize(
"features" , [
None,
{"text": "string"},
{"text": "int32"},
{"text": "float32"},
] , )
def _snake_case ( lowerCAmelCase : Union[str, Any] , lowerCAmelCase : List[Any] , lowerCAmelCase : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = tmp_path / "cache"
# CSV file loses col_1 string dtype information: default now is "int64" instead of "string"
SCREAMING_SNAKE_CASE_ : Tuple = {"text": "string"}
SCREAMING_SNAKE_CASE_ : Any = features.copy() if features else default_expected_features
SCREAMING_SNAKE_CASE_ : Dict = (
Features({feature: Value(lowerCAmelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
SCREAMING_SNAKE_CASE_ : str = TextDatasetReader({"train": text_path} , features=lowerCAmelCase , cache_dir=lowerCAmelCase ).read()
_check_text_datasetdict(lowerCAmelCase , lowerCAmelCase )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def _snake_case ( lowerCAmelCase : Optional[Any] , lowerCAmelCase : Any , lowerCAmelCase : Dict ):
"""simple docstring"""
if split:
SCREAMING_SNAKE_CASE_ : Optional[int] = {split: text_path}
else:
SCREAMING_SNAKE_CASE_ : List[Any] = "train"
SCREAMING_SNAKE_CASE_ : Tuple = {"train": text_path, "test": text_path}
SCREAMING_SNAKE_CASE_ : Any = tmp_path / "cache"
SCREAMING_SNAKE_CASE_ : List[str] = {"text": "string"}
SCREAMING_SNAKE_CASE_ : str = TextDatasetReader(lowerCAmelCase , cache_dir=lowerCAmelCase ).read()
_check_text_datasetdict(lowerCAmelCase , lowerCAmelCase , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
| 18 | 1 |
import random
def _snake_case ( lowerCAmelCase : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = num - 1
SCREAMING_SNAKE_CASE_ : List[str] = 0
while s % 2 == 0:
SCREAMING_SNAKE_CASE_ : List[str] = s // 2
t += 1
for _ in range(5 ):
SCREAMING_SNAKE_CASE_ : str = random.randrange(2 , num - 1 )
SCREAMING_SNAKE_CASE_ : List[str] = pow(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
if v != 1:
SCREAMING_SNAKE_CASE_ : List[Any] = 0
while v != (num - 1):
if i == t - 1:
return False
else:
SCREAMING_SNAKE_CASE_ : Dict = i + 1
SCREAMING_SNAKE_CASE_ : List[str] = (v**2) % num
return True
def _snake_case ( lowerCAmelCase : int ):
"""simple docstring"""
if num < 2:
return False
SCREAMING_SNAKE_CASE_ : Dict = [
2,
3,
5,
7,
1_1,
1_3,
1_7,
1_9,
2_3,
2_9,
3_1,
3_7,
4_1,
4_3,
4_7,
5_3,
5_9,
6_1,
6_7,
7_1,
7_3,
7_9,
8_3,
8_9,
9_7,
1_0_1,
1_0_3,
1_0_7,
1_0_9,
1_1_3,
1_2_7,
1_3_1,
1_3_7,
1_3_9,
1_4_9,
1_5_1,
1_5_7,
1_6_3,
1_6_7,
1_7_3,
1_7_9,
1_8_1,
1_9_1,
1_9_3,
1_9_7,
1_9_9,
2_1_1,
2_2_3,
2_2_7,
2_2_9,
2_3_3,
2_3_9,
2_4_1,
2_5_1,
2_5_7,
2_6_3,
2_6_9,
2_7_1,
2_7_7,
2_8_1,
2_8_3,
2_9_3,
3_0_7,
3_1_1,
3_1_3,
3_1_7,
3_3_1,
3_3_7,
3_4_7,
3_4_9,
3_5_3,
3_5_9,
3_6_7,
3_7_3,
3_7_9,
3_8_3,
3_8_9,
3_9_7,
4_0_1,
4_0_9,
4_1_9,
4_2_1,
4_3_1,
4_3_3,
4_3_9,
4_4_3,
4_4_9,
4_5_7,
4_6_1,
4_6_3,
4_6_7,
4_7_9,
4_8_7,
4_9_1,
4_9_9,
5_0_3,
5_0_9,
5_2_1,
5_2_3,
5_4_1,
5_4_7,
5_5_7,
5_6_3,
5_6_9,
5_7_1,
5_7_7,
5_8_7,
5_9_3,
5_9_9,
6_0_1,
6_0_7,
6_1_3,
6_1_7,
6_1_9,
6_3_1,
6_4_1,
6_4_3,
6_4_7,
6_5_3,
6_5_9,
6_6_1,
6_7_3,
6_7_7,
6_8_3,
6_9_1,
7_0_1,
7_0_9,
7_1_9,
7_2_7,
7_3_3,
7_3_9,
7_4_3,
7_5_1,
7_5_7,
7_6_1,
7_6_9,
7_7_3,
7_8_7,
7_9_7,
8_0_9,
8_1_1,
8_2_1,
8_2_3,
8_2_7,
8_2_9,
8_3_9,
8_5_3,
8_5_7,
8_5_9,
8_6_3,
8_7_7,
8_8_1,
8_8_3,
8_8_7,
9_0_7,
9_1_1,
9_1_9,
9_2_9,
9_3_7,
9_4_1,
9_4_7,
9_5_3,
9_6_7,
9_7_1,
9_7_7,
9_8_3,
9_9_1,
9_9_7,
]
if num in low_primes:
return True
for prime in low_primes:
if (num % prime) == 0:
return False
return rabin_miller(lowerCAmelCase )
def _snake_case ( lowerCAmelCase : int = 1_0_2_4 ):
"""simple docstring"""
while True:
SCREAMING_SNAKE_CASE_ : Optional[Any] = random.randrange(2 ** (keysize - 1) , 2 ** (keysize) )
if is_prime_low_num(lowerCAmelCase ):
return num
if __name__ == "__main__":
__lowerCamelCase : Union[str, Any] = generate_large_prime()
print(('''Prime number:''', num))
print(('''is_prime_low_num:''', is_prime_low_num(num)))
| 18 | import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
import diffusers
from diffusers import (
AutoencoderKL,
EulerDiscreteScheduler,
StableDiffusionLatentUpscalePipeline,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.schedulers import KarrasDiffusionSchedulers
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
def _snake_case ( lowerCAmelCase : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = [tensor.shape for tensor in tensor_list]
return all(shape == shapes[0] for shape in shapes[1:] )
class a__ ( A__ , A__ , A__ , unittest.TestCase ):
A = StableDiffusionLatentUpscalePipeline
A = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
'height',
'width',
'cross_attention_kwargs',
'negative_prompt_embeds',
'prompt_embeds',
}
A = PipelineTesterMixin.required_optional_params - {'num_images_per_prompt'}
A = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
A = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
A = frozenset([] )
A = True
@property
def __UpperCamelCase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = 1
SCREAMING_SNAKE_CASE_ : Optional[int] = 4
SCREAMING_SNAKE_CASE_ : Optional[int] = (16, 16)
SCREAMING_SNAKE_CASE_ : Dict = floats_tensor((batch_size, num_channels) + sizes,rng=random.Random(0 ) ).to(_A )
return image
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ : str = UNetaDConditionModel(
act_fn="gelu",attention_head_dim=8,norm_num_groups=_A,block_out_channels=[32, 32, 64, 64],time_cond_proj_dim=160,conv_in_kernel=1,conv_out_kernel=1,cross_attention_dim=32,down_block_types=(
"KDownBlock2D",
"KCrossAttnDownBlock2D",
"KCrossAttnDownBlock2D",
"KCrossAttnDownBlock2D",
),in_channels=8,mid_block_type=_A,only_cross_attention=_A,out_channels=5,resnet_time_scale_shift="scale_shift",time_embedding_type="fourier",timestep_post_act="gelu",up_block_types=("KCrossAttnUpBlock2D", "KCrossAttnUpBlock2D", "KCrossAttnUpBlock2D", "KUpBlock2D"),)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = AutoencoderKL(
block_out_channels=[32, 32, 64, 64],in_channels=3,out_channels=3,down_block_types=[
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
],up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],latent_channels=4,)
SCREAMING_SNAKE_CASE_ : int = EulerDiscreteScheduler(prediction_type="sample" )
SCREAMING_SNAKE_CASE_ : List[Any] = CLIPTextConfig(
bos_token_id=0,eos_token_id=2,hidden_size=32,intermediate_size=37,layer_norm_eps=1E-05,num_attention_heads=4,num_hidden_layers=5,pad_token_id=1,vocab_size=1000,hidden_act="quick_gelu",projection_dim=512,)
SCREAMING_SNAKE_CASE_ : Tuple = CLIPTextModel(_A )
SCREAMING_SNAKE_CASE_ : int = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {
"unet": model.eval(),
"vae": vae.eval(),
"scheduler": scheduler,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
}
return components
def __UpperCamelCase ( self : List[Any],_A : int,_A : Tuple=0 ):
"""simple docstring"""
if str(_A ).startswith("mps" ):
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.manual_seed(_A )
else:
SCREAMING_SNAKE_CASE_ : Dict = torch.Generator(device=_A ).manual_seed(_A )
SCREAMING_SNAKE_CASE_ : Tuple = {
"prompt": "A painting of a squirrel eating a burger",
"image": self.dummy_image.cpu(),
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = "cpu"
SCREAMING_SNAKE_CASE_ : List[str] = self.get_dummy_components()
SCREAMING_SNAKE_CASE_ : List[str] = self.pipeline_class(**_A )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
SCREAMING_SNAKE_CASE_ : Tuple = self.get_dummy_inputs(_A )
SCREAMING_SNAKE_CASE_ : Dict = pipe(**_A ).images
SCREAMING_SNAKE_CASE_ : Optional[int] = image[0, -3:, -3:, -1]
self.assertEqual(image.shape,(1, 256, 256, 3) )
SCREAMING_SNAKE_CASE_ : Optional[Any] = np.array(
[0.47222412, 0.41921633, 0.44717434, 0.46874192, 0.42588258, 0.46150726, 0.4677534, 0.45583832, 0.48579055] )
SCREAMING_SNAKE_CASE_ : Optional[int] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_A,1E-3 )
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
super().test_attention_slicing_forward_pass(expected_max_diff=7E-3 )
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
super().test_cpu_offload_forward_pass(expected_max_diff=3E-3 )
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
def __UpperCamelCase ( self : int ):
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=7E-3 )
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=3E-3 )
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
super().test_save_load_local(expected_max_difference=3E-3 )
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
super().test_save_load_optional_components(expected_max_difference=3E-3 )
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = [
"DDIMScheduler",
"DDPMScheduler",
"PNDMScheduler",
"HeunDiscreteScheduler",
"EulerAncestralDiscreteScheduler",
"KDPM2DiscreteScheduler",
"KDPM2AncestralDiscreteScheduler",
"DPMSolverSDEScheduler",
]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.get_dummy_components()
SCREAMING_SNAKE_CASE_ : Any = self.pipeline_class(**_A )
# make sure that PNDM does not need warm-up
pipe.scheduler.register_to_config(skip_prk_steps=_A )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
SCREAMING_SNAKE_CASE_ : Any = self.get_dummy_inputs(_A )
SCREAMING_SNAKE_CASE_ : Optional[Any] = 2
SCREAMING_SNAKE_CASE_ : Optional[int] = []
for scheduler_enum in KarrasDiffusionSchedulers:
if scheduler_enum.name in skip_schedulers:
# no sigma schedulers are not supported
# no schedulers
continue
SCREAMING_SNAKE_CASE_ : Tuple = getattr(_A,scheduler_enum.name )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = scheduler_cls.from_config(pipe.scheduler.config )
SCREAMING_SNAKE_CASE_ : Optional[int] = pipe(**_A )[0]
outputs.append(_A )
assert check_same_shape(_A )
@require_torch_gpu
@slow
class a__ ( unittest.TestCase ):
def __UpperCamelCase ( self : Dict ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = torch.manual_seed(33 )
SCREAMING_SNAKE_CASE_ : Optional[int] = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4",torch_dtype=torch.floataa )
pipe.to("cuda" )
SCREAMING_SNAKE_CASE_ : List[Any] = StableDiffusionLatentUpscalePipeline.from_pretrained(
"stabilityai/sd-x2-latent-upscaler",torch_dtype=torch.floataa )
upscaler.to("cuda" )
SCREAMING_SNAKE_CASE_ : Tuple = "a photo of an astronaut high resolution, unreal engine, ultra realistic"
SCREAMING_SNAKE_CASE_ : str = pipe(_A,generator=_A,output_type="latent" ).images
SCREAMING_SNAKE_CASE_ : Optional[Any] = upscaler(
prompt=_A,image=_A,num_inference_steps=20,guidance_scale=0,generator=_A,output_type="np",).images[0]
SCREAMING_SNAKE_CASE_ : str = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/astronaut_1024.npy" )
assert np.abs((expected_image - image).mean() ) < 5E-2
def __UpperCamelCase ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = torch.manual_seed(33 )
SCREAMING_SNAKE_CASE_ : Optional[Any] = StableDiffusionLatentUpscalePipeline.from_pretrained(
"stabilityai/sd-x2-latent-upscaler",torch_dtype=torch.floataa )
upscaler.to("cuda" )
SCREAMING_SNAKE_CASE_ : Any = "the temple of fire by Ross Tran and Gerardo Dottori, oil on canvas"
SCREAMING_SNAKE_CASE_ : Optional[Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_512.png" )
SCREAMING_SNAKE_CASE_ : str = upscaler(
prompt=_A,image=_A,num_inference_steps=20,guidance_scale=0,generator=_A,output_type="np",).images[0]
SCREAMING_SNAKE_CASE_ : Any = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_1024.npy" )
assert np.abs((expected_image - image).max() ) < 5E-2
| 18 | 1 |
from collections.abc import Sequence
from queue import Queue
class a__ :
def __init__( self : int,_A : List[Any],_A : Optional[Any],_A : Optional[int],_A : int=None,_A : List[str]=None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = start
SCREAMING_SNAKE_CASE_ : List[str] = end
SCREAMING_SNAKE_CASE_ : Tuple = val
SCREAMING_SNAKE_CASE_ : List[str] = (start + end) // 2
SCREAMING_SNAKE_CASE_ : Optional[int] = left
SCREAMING_SNAKE_CASE_ : str = right
def __repr__( self : Tuple ):
"""simple docstring"""
return F'SegmentTreeNode(start={self.start}, end={self.end}, val={self.val})'
class a__ :
def __init__( self : Any,_A : Sequence,_A : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = collection
SCREAMING_SNAKE_CASE_ : Optional[int] = function
if self.collection:
SCREAMING_SNAKE_CASE_ : List[str] = self._build_tree(0,len(_A ) - 1 )
def __UpperCamelCase ( self : int,_A : Any,_A : List[Any] ):
"""simple docstring"""
self._update_tree(self.root,_A,_A )
def __UpperCamelCase ( self : str,_A : Any,_A : List[Any] ):
"""simple docstring"""
return self._query_range(self.root,_A,_A )
def __UpperCamelCase ( self : Any,_A : Optional[int],_A : int ):
"""simple docstring"""
if start == end:
return SegmentTreeNode(_A,_A,self.collection[start] )
SCREAMING_SNAKE_CASE_ : List[Any] = (start + end) // 2
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self._build_tree(_A,_A )
SCREAMING_SNAKE_CASE_ : Optional[int] = self._build_tree(mid + 1,_A )
return SegmentTreeNode(_A,_A,self.fn(left.val,right.val ),_A,_A )
def __UpperCamelCase ( self : int,_A : int,_A : Tuple,_A : Dict ):
"""simple docstring"""
if node.start == i and node.end == i:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = val
return
if i <= node.mid:
self._update_tree(node.left,_A,_A )
else:
self._update_tree(node.right,_A,_A )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.fn(node.left.val,node.right.val )
def __UpperCamelCase ( self : str,_A : List[str],_A : Optional[int],_A : Optional[Any] ):
"""simple docstring"""
if node.start == i and node.end == j:
return node.val
if i <= node.mid:
if j <= node.mid:
# range in left child tree
return self._query_range(node.left,_A,_A )
else:
# range in left child tree and right child tree
return self.fn(
self._query_range(node.left,_A,node.mid ),self._query_range(node.right,node.mid + 1,_A ),)
else:
# range in right child tree
return self._query_range(node.right,_A,_A )
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
if self.root is not None:
SCREAMING_SNAKE_CASE_ : int = Queue()
queue.put(self.root )
while not queue.empty():
SCREAMING_SNAKE_CASE_ : Tuple = queue.get()
yield node
if node.left is not None:
queue.put(node.left )
if node.right is not None:
queue.put(node.right )
if __name__ == "__main__":
import operator
for fn in [operator.add, max, min]:
print('''*''' * 50)
__lowerCamelCase : int = SegmentTree([2, 1, 5, 3, 4], fn)
for node in arr.traverse():
print(node)
print()
arr.update(1, 5)
for node in arr.traverse():
print(node)
print()
print(arr.query_range(3, 4)) # 7
print(arr.query_range(2, 2)) # 5
print(arr.query_range(1, 3)) # 13
print()
| 18 | from collections import OrderedDict
from typing import Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...feature_extraction_utils import FeatureExtractionMixin
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType, logging
__lowerCamelCase : Optional[int] = logging.get_logger(__name__)
__lowerCamelCase : Tuple = {
'''deepmind/language-perceiver''': '''https://huggingface.co/deepmind/language-perceiver/resolve/main/config.json''',
# See all Perceiver models at https://huggingface.co/models?filter=perceiver
}
class a__ ( A__ ):
A = 'perceiver'
def __init__( self : List[Any],_A : Tuple=256,_A : str=1280,_A : List[Any]=768,_A : Union[str, Any]=1,_A : Union[str, Any]=26,_A : List[str]=8,_A : List[Any]=8,_A : List[Any]=None,_A : List[Any]=None,_A : Union[str, Any]="kv",_A : Any=1,_A : int=1,_A : Dict="gelu",_A : Any=0.1,_A : int=0.02,_A : int=1E-12,_A : Any=True,_A : Optional[Any]=262,_A : List[Any]=2048,_A : str=56,_A : Optional[int]=[368, 496],_A : Dict=16,_A : Tuple=1920,_A : List[Any]=16,_A : str=[1, 16, 224, 224],**_A : Optional[Any],):
"""simple docstring"""
super().__init__(**_A )
SCREAMING_SNAKE_CASE_ : Dict = num_latents
SCREAMING_SNAKE_CASE_ : List[Any] = d_latents
SCREAMING_SNAKE_CASE_ : Union[str, Any] = d_model
SCREAMING_SNAKE_CASE_ : Optional[int] = num_blocks
SCREAMING_SNAKE_CASE_ : List[Any] = num_self_attends_per_block
SCREAMING_SNAKE_CASE_ : Tuple = num_self_attention_heads
SCREAMING_SNAKE_CASE_ : List[str] = num_cross_attention_heads
SCREAMING_SNAKE_CASE_ : List[Any] = qk_channels
SCREAMING_SNAKE_CASE_ : Any = v_channels
SCREAMING_SNAKE_CASE_ : Any = cross_attention_shape_for_attention
SCREAMING_SNAKE_CASE_ : List[str] = self_attention_widening_factor
SCREAMING_SNAKE_CASE_ : Any = cross_attention_widening_factor
SCREAMING_SNAKE_CASE_ : List[Any] = hidden_act
SCREAMING_SNAKE_CASE_ : List[str] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : Any = initializer_range
SCREAMING_SNAKE_CASE_ : List[Any] = layer_norm_eps
SCREAMING_SNAKE_CASE_ : Tuple = use_query_residual
# masked language modeling attributes
SCREAMING_SNAKE_CASE_ : List[str] = vocab_size
SCREAMING_SNAKE_CASE_ : Union[str, Any] = max_position_embeddings
# image classification attributes
SCREAMING_SNAKE_CASE_ : Dict = image_size
# flow attributes
SCREAMING_SNAKE_CASE_ : List[Any] = train_size
# multimodal autoencoding attributes
SCREAMING_SNAKE_CASE_ : str = num_frames
SCREAMING_SNAKE_CASE_ : Any = audio_samples_per_frame
SCREAMING_SNAKE_CASE_ : Tuple = samples_per_patch
SCREAMING_SNAKE_CASE_ : Optional[Any] = output_shape
class a__ ( A__ ):
@property
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE_ : List[str] = {0: "batch", 1: "choice", 2: "sequence"}
else:
SCREAMING_SNAKE_CASE_ : str = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("inputs", dynamic_axis),
("attention_mask", dynamic_axis),
] )
@property
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
return 1E-4
def __UpperCamelCase ( self : List[str],_A : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"],_A : int = -1,_A : int = -1,_A : int = -1,_A : bool = False,_A : Optional[TensorType] = None,_A : int = 3,_A : int = 40,_A : int = 40,):
"""simple docstring"""
if isinstance(_A,_A ):
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
SCREAMING_SNAKE_CASE_ : Tuple = compute_effective_axis_dimension(
_A,fixed_dimension=OnnxConfig.default_fixed_batch,num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
SCREAMING_SNAKE_CASE_ : Tuple = preprocessor.num_special_tokens_to_add(_A )
SCREAMING_SNAKE_CASE_ : Any = compute_effective_axis_dimension(
_A,fixed_dimension=OnnxConfig.default_fixed_sequence,num_token_to_add=_A )
# Generate dummy inputs according to compute batch and sequence
SCREAMING_SNAKE_CASE_ : Optional[Any] = [" ".join(["a"] ) * seq_length] * batch_size
SCREAMING_SNAKE_CASE_ : str = dict(preprocessor(_A,return_tensors=_A ) )
SCREAMING_SNAKE_CASE_ : List[str] = inputs.pop("input_ids" )
return inputs
elif isinstance(_A,_A ) and preprocessor.model_input_names[0] == "pixel_values":
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
SCREAMING_SNAKE_CASE_ : Any = compute_effective_axis_dimension(_A,fixed_dimension=OnnxConfig.default_fixed_batch )
SCREAMING_SNAKE_CASE_ : Optional[int] = self._generate_dummy_images(_A,_A,_A,_A )
SCREAMING_SNAKE_CASE_ : Any = dict(preprocessor(images=_A,return_tensors=_A ) )
SCREAMING_SNAKE_CASE_ : Any = inputs.pop("pixel_values" )
return inputs
else:
raise ValueError(
"Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor." )
| 18 | 1 |
import os
from typing import BinaryIO, Optional, Union
import numpy as np
import pyarrow.parquet as pq
from .. import Audio, Dataset, Features, Image, NamedSplit, Value, config
from ..features.features import FeatureType, _visit
from ..formatting import query_table
from ..packaged_modules import _PACKAGED_DATASETS_MODULES
from ..packaged_modules.parquet.parquet import Parquet
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
def _snake_case ( lowerCAmelCase : Features ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = np.inf
def set_batch_size(lowerCAmelCase : FeatureType ) -> None:
nonlocal batch_size
if isinstance(lowerCAmelCase , lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : int = min(lowerCAmelCase , config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS )
elif isinstance(lowerCAmelCase , lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = min(lowerCAmelCase , config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS )
elif isinstance(lowerCAmelCase , lowerCAmelCase ) and feature.dtype == "binary":
SCREAMING_SNAKE_CASE_ : Union[str, Any] = min(lowerCAmelCase , config.PARQUET_ROW_GROUP_SIZE_FOR_BINARY_DATASETS )
_visit(lowerCAmelCase , lowerCAmelCase )
return None if batch_size is np.inf else batch_size
class a__ ( A__ ):
def __init__( self : List[str],_A : NestedDataStructureLike[PathLike],_A : Optional[NamedSplit] = None,_A : Optional[Features] = None,_A : str = None,_A : bool = False,_A : bool = False,_A : Optional[int] = None,**_A : int,):
"""simple docstring"""
super().__init__(
_A,split=_A,features=_A,cache_dir=_A,keep_in_memory=_A,streaming=_A,num_proc=_A,**_A,)
SCREAMING_SNAKE_CASE_ : int = path_or_paths if isinstance(_A,_A ) else {self.split: path_or_paths}
SCREAMING_SNAKE_CASE_ : Tuple = _PACKAGED_DATASETS_MODULES["parquet"][1]
SCREAMING_SNAKE_CASE_ : Optional[Any] = Parquet(
cache_dir=_A,data_files=_A,features=_A,hash=_A,**_A,)
def __UpperCamelCase ( self : str ):
"""simple docstring"""
if self.streaming:
SCREAMING_SNAKE_CASE_ : str = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
SCREAMING_SNAKE_CASE_ : Optional[int] = None
SCREAMING_SNAKE_CASE_ : int = None
SCREAMING_SNAKE_CASE_ : List[Any] = None
SCREAMING_SNAKE_CASE_ : Any = None
self.builder.download_and_prepare(
download_config=_A,download_mode=_A,verification_mode=_A,base_path=_A,num_proc=self.num_proc,)
SCREAMING_SNAKE_CASE_ : List[Any] = self.builder.as_dataset(
split=self.split,verification_mode=_A,in_memory=self.keep_in_memory )
return dataset
class a__ :
def __init__( self : int,_A : Dataset,_A : Union[PathLike, BinaryIO],_A : Optional[int] = None,**_A : str,):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = dataset
SCREAMING_SNAKE_CASE_ : Union[str, Any] = path_or_buf
SCREAMING_SNAKE_CASE_ : int = batch_size or get_writer_batch_size(dataset.features )
SCREAMING_SNAKE_CASE_ : List[Any] = parquet_writer_kwargs
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = self.batch_size if self.batch_size else config.DEFAULT_MAX_BATCH_SIZE
if isinstance(self.path_or_buf,(str, bytes, os.PathLike) ):
with open(self.path_or_buf,"wb+" ) as buffer:
SCREAMING_SNAKE_CASE_ : Any = self._write(file_obj=_A,batch_size=_A,**self.parquet_writer_kwargs )
else:
SCREAMING_SNAKE_CASE_ : Dict = self._write(file_obj=self.path_or_buf,batch_size=_A,**self.parquet_writer_kwargs )
return written
def __UpperCamelCase ( self : str,_A : BinaryIO,_A : int,**_A : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = 0
SCREAMING_SNAKE_CASE_ : Any = parquet_writer_kwargs.pop("path_or_buf",_A )
SCREAMING_SNAKE_CASE_ : Tuple = self.dataset.features.arrow_schema
SCREAMING_SNAKE_CASE_ : List[str] = pq.ParquetWriter(_A,schema=_A,**_A )
for offset in logging.tqdm(
range(0,len(self.dataset ),_A ),unit="ba",disable=not logging.is_progress_bar_enabled(),desc="Creating parquet from Arrow format",):
SCREAMING_SNAKE_CASE_ : Any = query_table(
table=self.dataset._data,key=slice(_A,offset + batch_size ),indices=self.dataset._indices if self.dataset._indices is not None else None,)
writer.write_table(_A )
written += batch.nbytes
writer.close()
return written
| 18 | from collections import Counter
from pathlib import Path
from typing import Optional, Tuple
import yaml
class a__ ( yaml.SafeLoader ):
def __UpperCamelCase ( self : str,_A : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = [self.constructed_objects[key_node] for key_node, _ in node.value]
SCREAMING_SNAKE_CASE_ : List[str] = [tuple(_A ) if isinstance(_A,_A ) else key for key in keys]
SCREAMING_SNAKE_CASE_ : Optional[int] = Counter(_A )
SCREAMING_SNAKE_CASE_ : Tuple = [key for key in counter if counter[key] > 1]
if duplicate_keys:
raise TypeError(F'Got duplicate yaml keys: {duplicate_keys}' )
def __UpperCamelCase ( self : Tuple,_A : Dict,_A : List[Any]=False ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = super().construct_mapping(_A,deep=_A )
self._check_no_duplicates_on_constructed_node(_A )
return mapping
def _snake_case ( lowerCAmelCase : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = list(readme_content.splitlines() )
if full_content and full_content[0] == "---" and "---" in full_content[1:]:
SCREAMING_SNAKE_CASE_ : List[Any] = full_content[1:].index("---" ) + 1
SCREAMING_SNAKE_CASE_ : int = "\n".join(full_content[1:sep_idx] )
return yamlblock, "\n".join(full_content[sep_idx + 1 :] )
return None, "\n".join(lowerCAmelCase )
class a__ ( A__ ):
# class attributes
A = {'train_eval_index'} # train-eval-index in the YAML metadata
@classmethod
def __UpperCamelCase ( cls : Any,_A : Path ):
"""simple docstring"""
with open(_A,encoding="utf-8" ) as readme_file:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = _split_yaml_from_readme(readme_file.read() )
if yaml_string is not None:
return cls.from_yaml_string(_A )
else:
return cls()
def __UpperCamelCase ( self : Dict,_A : Path ):
"""simple docstring"""
if path.exists():
with open(_A,encoding="utf-8" ) as readme_file:
SCREAMING_SNAKE_CASE_ : int = readme_file.read()
else:
SCREAMING_SNAKE_CASE_ : Any = None
SCREAMING_SNAKE_CASE_ : int = self._to_readme(_A )
with open(_A,"w",encoding="utf-8" ) as readme_file:
readme_file.write(_A )
def __UpperCamelCase ( self : Optional[int],_A : Optional[str] = None ):
"""simple docstring"""
if readme_content is not None:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[Any] = _split_yaml_from_readme(_A )
SCREAMING_SNAKE_CASE_ : Tuple = "---\n" + self.to_yaml_string() + "---\n" + content
else:
SCREAMING_SNAKE_CASE_ : Dict = "---\n" + self.to_yaml_string() + "---\n"
return full_content
@classmethod
def __UpperCamelCase ( cls : Dict,_A : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = yaml.load(_A,Loader=_NoDuplicateSafeLoader ) or {}
# Convert the YAML keys to DatasetMetadata fields
SCREAMING_SNAKE_CASE_ : Any = {
(key.replace("-","_" ) if key.replace("-","_" ) in cls._FIELDS_WITH_DASHES else key): value
for key, value in metadata_dict.items()
}
return cls(**_A )
def __UpperCamelCase ( self : Dict ):
"""simple docstring"""
return yaml.safe_dump(
{
(key.replace("_","-" ) if key in self._FIELDS_WITH_DASHES else key): value
for key, value in self.items()
},sort_keys=_A,allow_unicode=_A,encoding="utf-8",).decode("utf-8" )
__lowerCamelCase : List[Any] = {
'''image-classification''': [],
'''translation''': [],
'''image-segmentation''': [],
'''fill-mask''': [],
'''automatic-speech-recognition''': [],
'''token-classification''': [],
'''sentence-similarity''': [],
'''audio-classification''': [],
'''question-answering''': [],
'''summarization''': [],
'''zero-shot-classification''': [],
'''table-to-text''': [],
'''feature-extraction''': [],
'''other''': [],
'''multiple-choice''': [],
'''text-classification''': [],
'''text-to-image''': [],
'''text2text-generation''': [],
'''zero-shot-image-classification''': [],
'''tabular-classification''': [],
'''tabular-regression''': [],
'''image-to-image''': [],
'''tabular-to-text''': [],
'''unconditional-image-generation''': [],
'''text-retrieval''': [],
'''text-to-speech''': [],
'''object-detection''': [],
'''audio-to-audio''': [],
'''text-generation''': [],
'''conversational''': [],
'''table-question-answering''': [],
'''visual-question-answering''': [],
'''image-to-text''': [],
'''reinforcement-learning''': [],
'''voice-activity-detection''': [],
'''time-series-forecasting''': [],
'''document-question-answering''': [],
}
if __name__ == "__main__":
from argparse import ArgumentParser
__lowerCamelCase : List[Any] = ArgumentParser(usage='''Validate the yaml metadata block of a README.md file.''')
ap.add_argument('''readme_filepath''')
__lowerCamelCase : Dict = ap.parse_args()
__lowerCamelCase : List[Any] = Path(args.readme_filepath)
__lowerCamelCase : Optional[int] = DatasetMetadata.from_readme(readme_filepath)
print(dataset_metadata)
dataset_metadata.to_readme(readme_filepath)
| 18 | 1 |
from __future__ import annotations
__lowerCamelCase : List[Any] = list[tuple[int, int]]
__lowerCamelCase : List[Any] = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
__lowerCamelCase : Union[str, Any] = ([-1, 0], [0, -1], [1, 0], [0, 1]) # up, left, down, right
class a__ :
def __init__( self : List[Any],_A : int,_A : int,_A : int,_A : int,_A : float,_A : Node | None,):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = pos_x
SCREAMING_SNAKE_CASE_ : Any = pos_y
SCREAMING_SNAKE_CASE_ : str = (pos_y, pos_x)
SCREAMING_SNAKE_CASE_ : Tuple = goal_x
SCREAMING_SNAKE_CASE_ : Any = goal_y
SCREAMING_SNAKE_CASE_ : str = g_cost
SCREAMING_SNAKE_CASE_ : str = parent
SCREAMING_SNAKE_CASE_ : Dict = self.calculate_heuristic()
def __UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = abs(self.pos_x - self.goal_x )
SCREAMING_SNAKE_CASE_ : Optional[Any] = abs(self.pos_y - self.goal_y )
return dx + dy
def __lt__( self : Union[str, Any],_A : Tuple ):
"""simple docstring"""
return self.f_cost < other.f_cost
class a__ :
def __init__( self : Optional[int],_A : tuple[int, int],_A : tuple[int, int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = Node(start[1],start[0],goal[1],goal[0],0,_A )
SCREAMING_SNAKE_CASE_ : List[str] = Node(goal[1],goal[0],goal[1],goal[0],9_9999,_A )
SCREAMING_SNAKE_CASE_ : Optional[int] = [self.start]
SCREAMING_SNAKE_CASE_ : list[Node] = []
SCREAMING_SNAKE_CASE_ : Dict = False
def __UpperCamelCase ( self : str ):
"""simple docstring"""
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
SCREAMING_SNAKE_CASE_ : List[str] = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
SCREAMING_SNAKE_CASE_ : Optional[int] = True
return self.retrace_path(_A )
self.closed_nodes.append(_A )
SCREAMING_SNAKE_CASE_ : List[str] = self.get_successors(_A )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(_A )
else:
# retrieve the best current path
SCREAMING_SNAKE_CASE_ : Tuple = self.open_nodes.pop(self.open_nodes.index(_A ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(_A )
else:
self.open_nodes.append(_A )
if not self.reached:
return [self.start.pos]
return None
def __UpperCamelCase ( self : Union[str, Any],_A : Node ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = []
for action in delta:
SCREAMING_SNAKE_CASE_ : Dict = parent.pos_x + action[1]
SCREAMING_SNAKE_CASE_ : Dict = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(_A ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
_A,_A,self.target.pos_y,self.target.pos_x,parent.g_cost + 1,_A,) )
return successors
def __UpperCamelCase ( self : List[Any],_A : Node | None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = node
SCREAMING_SNAKE_CASE_ : Optional[int] = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
SCREAMING_SNAKE_CASE_ : Optional[Any] = current_node.parent
path.reverse()
return path
if __name__ == "__main__":
__lowerCamelCase : Tuple = (0, 0)
__lowerCamelCase : List[Any] = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
print('''------''')
__lowerCamelCase : Tuple = GreedyBestFirst(init, goal)
__lowerCamelCase : Optional[int] = greedy_bf.search()
if path:
for pos_x, pos_y in path:
__lowerCamelCase : str = 2
for elem in grid:
print(elem)
| 18 | from __future__ import annotations
from math import pi, sqrt
def _snake_case ( lowerCAmelCase : float , lowerCAmelCase : float ):
"""simple docstring"""
if inductance <= 0:
raise ValueError("Inductance cannot be 0 or negative" )
elif capacitance <= 0:
raise ValueError("Capacitance cannot be 0 or negative" )
else:
return (
"Resonant frequency",
float(1 / (2 * pi * (sqrt(inductance * capacitance ))) ),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 18 | 1 |
import numpy
# List of input, output pairs
__lowerCamelCase : List[Any] = (
((5, 2, 3), 15),
((6, 5, 9), 25),
((11, 12, 13), 41),
((1, 1, 1), 8),
((11, 12, 13), 41),
)
__lowerCamelCase : Dict = (((5_15, 22, 13), 5_55), ((61, 35, 49), 1_50))
__lowerCamelCase : List[str] = [2, 4, 1, 5]
__lowerCamelCase : int = len(train_data)
__lowerCamelCase : str = 0.009
def _snake_case ( lowerCAmelCase : Optional[Any] , lowerCAmelCase : Dict="train" ):
"""simple docstring"""
return calculate_hypothesis_value(lowerCAmelCase , lowerCAmelCase ) - output(
lowerCAmelCase , lowerCAmelCase )
def _snake_case ( lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = 0
for i in range(len(lowerCAmelCase ) - 1 ):
hyp_val += data_input_tuple[i] * parameter_vector[i + 1]
hyp_val += parameter_vector[0]
return hyp_val
def _snake_case ( lowerCAmelCase : Union[str, Any] , lowerCAmelCase : List[str] ):
"""simple docstring"""
if data_set == "train":
return train_data[example_no][1]
elif data_set == "test":
return test_data[example_no][1]
return None
def _snake_case ( lowerCAmelCase : Tuple , lowerCAmelCase : Tuple ):
"""simple docstring"""
if data_set == "train":
return _hypothesis_value(train_data[example_no][0] )
elif data_set == "test":
return _hypothesis_value(test_data[example_no][0] )
return None
def _snake_case ( lowerCAmelCase : List[str] , lowerCAmelCase : str=m ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = 0
for i in range(lowerCAmelCase ):
if index == -1:
summation_value += _error(lowerCAmelCase )
else:
summation_value += _error(lowerCAmelCase ) * train_data[i][0][index]
return summation_value
def _snake_case ( lowerCAmelCase : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = summation_of_cost_derivative(lowerCAmelCase , lowerCAmelCase ) / m
return cost_derivative_value
def _snake_case ( ):
"""simple docstring"""
global parameter_vector
# Tune these values to set a tolerance value for predicted output
SCREAMING_SNAKE_CASE_ : List[str] = 0.000002
SCREAMING_SNAKE_CASE_ : Any = 0
SCREAMING_SNAKE_CASE_ : str = 0
while True:
j += 1
SCREAMING_SNAKE_CASE_ : Optional[Any] = [0, 0, 0, 0]
for i in range(0 , len(lowerCAmelCase ) ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = get_cost_derivative(i - 1 )
SCREAMING_SNAKE_CASE_ : Optional[int] = (
parameter_vector[i] - LEARNING_RATE * cost_derivative
)
if numpy.allclose(
lowerCAmelCase , lowerCAmelCase , atol=lowerCAmelCase , rtol=lowerCAmelCase , ):
break
SCREAMING_SNAKE_CASE_ : Union[str, Any] = temp_parameter_vector
print(("Number of iterations:", j) )
def _snake_case ( ):
"""simple docstring"""
for i in range(len(lowerCAmelCase ) ):
print(("Actual output value:", output(lowerCAmelCase , "test" )) )
print(("Hypothesis output:", calculate_hypothesis_value(lowerCAmelCase , "test" )) )
if __name__ == "__main__":
run_gradient_descent()
print('''\nTesting gradient descent for a linear hypothesis function.\n''')
test_gradient_descent()
| 18 | def _snake_case ( lowerCAmelCase : list ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = len(lowerCAmelCase )
for i in range(1 , lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : int = collection[i]
SCREAMING_SNAKE_CASE_ : Any = 0
SCREAMING_SNAKE_CASE_ : Tuple = i - 1
while low <= high:
SCREAMING_SNAKE_CASE_ : int = (low + high) // 2
if val < collection[mid]:
SCREAMING_SNAKE_CASE_ : Optional[Any] = mid - 1
else:
SCREAMING_SNAKE_CASE_ : Tuple = mid + 1
for j in range(lowerCAmelCase , lowerCAmelCase , -1 ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = collection[j - 1]
SCREAMING_SNAKE_CASE_ : int = val
return collection
if __name__ == "__main__":
__lowerCamelCase : Dict = input('''Enter numbers separated by a comma:\n''').strip()
__lowerCamelCase : List[str] = [int(item) for item in user_input.split(''',''')]
print(binary_insertion_sort(unsorted))
| 18 | 1 |
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments
@require_tf
class a__ ( unittest.TestCase ):
def __UpperCamelCase ( self : int,_A : List[Any] ):
"""simple docstring"""
for model_result in results.values():
for batch_size, sequence_length in zip(model_result["bs"],model_result["ss"] ):
SCREAMING_SNAKE_CASE_ : Any = model_result["result"][batch_size][sequence_length]
self.assertIsNotNone(_A )
def __UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = "sshleifer/tiny-gpt2"
SCREAMING_SNAKE_CASE_ : Union[str, Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID],training=_A,inference=_A,sequence_lengths=[8],batch_sizes=[1],eager_mode=_A,multi_process=_A,)
SCREAMING_SNAKE_CASE_ : List[str] = TensorFlowBenchmark(_A )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __UpperCamelCase ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = "sgugger/tiny-distilbert-classification"
SCREAMING_SNAKE_CASE_ : Optional[Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID],training=_A,inference=_A,sequence_lengths=[8],batch_sizes=[1],multi_process=_A,only_pretrain_model=_A,)
SCREAMING_SNAKE_CASE_ : Optional[Any] = TensorFlowBenchmark(_A )
SCREAMING_SNAKE_CASE_ : Tuple = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = "sshleifer/tiny-gpt2"
SCREAMING_SNAKE_CASE_ : Any = TensorFlowBenchmarkArguments(
models=[MODEL_ID],training=_A,inference=_A,sequence_lengths=[8],batch_sizes=[1],multi_process=_A,)
SCREAMING_SNAKE_CASE_ : Any = TensorFlowBenchmark(_A )
SCREAMING_SNAKE_CASE_ : int = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __UpperCamelCase ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = "sshleifer/tiny-gpt2"
SCREAMING_SNAKE_CASE_ : Optional[Any] = AutoConfig.from_pretrained(_A )
SCREAMING_SNAKE_CASE_ : Any = TensorFlowBenchmarkArguments(
models=[MODEL_ID],training=_A,inference=_A,sequence_lengths=[8],batch_sizes=[1],eager_mode=_A,multi_process=_A,)
SCREAMING_SNAKE_CASE_ : Tuple = TensorFlowBenchmark(_A,[config] )
SCREAMING_SNAKE_CASE_ : int = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = "sshleifer/tiny-gpt2"
SCREAMING_SNAKE_CASE_ : List[Any] = AutoConfig.from_pretrained(_A )
SCREAMING_SNAKE_CASE_ : Optional[int] = TensorFlowBenchmarkArguments(
models=[MODEL_ID],training=_A,inference=_A,sequence_lengths=[8],batch_sizes=[1],multi_process=_A,)
SCREAMING_SNAKE_CASE_ : Any = TensorFlowBenchmark(_A,[config] )
SCREAMING_SNAKE_CASE_ : Optional[int] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = "sshleifer/tiny-gpt2"
SCREAMING_SNAKE_CASE_ : Any = TensorFlowBenchmarkArguments(
models=[MODEL_ID],training=_A,inference=_A,sequence_lengths=[8],batch_sizes=[1],multi_process=_A,)
SCREAMING_SNAKE_CASE_ : Dict = TensorFlowBenchmark(_A )
SCREAMING_SNAKE_CASE_ : List[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = "sshleifer/tiny-gpt2"
SCREAMING_SNAKE_CASE_ : Any = AutoConfig.from_pretrained(_A )
SCREAMING_SNAKE_CASE_ : Optional[int] = TensorFlowBenchmarkArguments(
models=[MODEL_ID],training=_A,inference=_A,sequence_lengths=[8],batch_sizes=[1],multi_process=_A,)
SCREAMING_SNAKE_CASE_ : Optional[Any] = TensorFlowBenchmark(_A,[config] )
SCREAMING_SNAKE_CASE_ : Optional[int] = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def __UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = "patrickvonplaten/t5-tiny-random"
SCREAMING_SNAKE_CASE_ : Union[str, Any] = AutoConfig.from_pretrained(_A )
SCREAMING_SNAKE_CASE_ : List[Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID],training=_A,inference=_A,sequence_lengths=[8],batch_sizes=[1],multi_process=_A,)
SCREAMING_SNAKE_CASE_ : List[str] = TensorFlowBenchmark(_A,configs=[config] )
SCREAMING_SNAKE_CASE_ : str = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices("GPU" ) ) == 0,"Cannot do xla on CPU." )
def __UpperCamelCase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = "sshleifer/tiny-gpt2"
SCREAMING_SNAKE_CASE_ : List[str] = TensorFlowBenchmarkArguments(
models=[MODEL_ID],training=_A,inference=_A,sequence_lengths=[8],batch_sizes=[1],use_xla=_A,multi_process=_A,)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = TensorFlowBenchmark(_A )
SCREAMING_SNAKE_CASE_ : List[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = "sshleifer/tiny-gpt2"
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID],inference=_A,save_to_csv=_A,sequence_lengths=[8],batch_sizes=[1],inference_time_csv_file=os.path.join(_A,"inf_time.csv" ),inference_memory_csv_file=os.path.join(_A,"inf_mem.csv" ),env_info_csv_file=os.path.join(_A,"env.csv" ),multi_process=_A,)
SCREAMING_SNAKE_CASE_ : Dict = TensorFlowBenchmark(_A )
benchmark.run()
self.assertTrue(Path(os.path.join(_A,"inf_time.csv" ) ).exists() )
self.assertTrue(Path(os.path.join(_A,"inf_mem.csv" ) ).exists() )
self.assertTrue(Path(os.path.join(_A,"env.csv" ) ).exists() )
def __UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = "sshleifer/tiny-gpt2"
def _check_summary_is_not_empty(_A : Optional[Any] ):
self.assertTrue(hasattr(_A,"sequential" ) )
self.assertTrue(hasattr(_A,"cumulative" ) )
self.assertTrue(hasattr(_A,"current" ) )
self.assertTrue(hasattr(_A,"total" ) )
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE_ : int = TensorFlowBenchmarkArguments(
models=[MODEL_ID],inference=_A,sequence_lengths=[8],batch_sizes=[1],log_filename=os.path.join(_A,"log.txt" ),log_print=_A,trace_memory_line_by_line=_A,eager_mode=_A,multi_process=_A,)
SCREAMING_SNAKE_CASE_ : Tuple = TensorFlowBenchmark(_A )
SCREAMING_SNAKE_CASE_ : int = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
self.assertTrue(Path(os.path.join(_A,"log.txt" ) ).exists() )
| 18 | from collections.abc import Sequence
from queue import Queue
class a__ :
def __init__( self : int,_A : List[Any],_A : Optional[Any],_A : Optional[int],_A : int=None,_A : List[str]=None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = start
SCREAMING_SNAKE_CASE_ : List[str] = end
SCREAMING_SNAKE_CASE_ : Tuple = val
SCREAMING_SNAKE_CASE_ : List[str] = (start + end) // 2
SCREAMING_SNAKE_CASE_ : Optional[int] = left
SCREAMING_SNAKE_CASE_ : str = right
def __repr__( self : Tuple ):
"""simple docstring"""
return F'SegmentTreeNode(start={self.start}, end={self.end}, val={self.val})'
class a__ :
def __init__( self : Any,_A : Sequence,_A : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = collection
SCREAMING_SNAKE_CASE_ : Optional[int] = function
if self.collection:
SCREAMING_SNAKE_CASE_ : List[str] = self._build_tree(0,len(_A ) - 1 )
def __UpperCamelCase ( self : int,_A : Any,_A : List[Any] ):
"""simple docstring"""
self._update_tree(self.root,_A,_A )
def __UpperCamelCase ( self : str,_A : Any,_A : List[Any] ):
"""simple docstring"""
return self._query_range(self.root,_A,_A )
def __UpperCamelCase ( self : Any,_A : Optional[int],_A : int ):
"""simple docstring"""
if start == end:
return SegmentTreeNode(_A,_A,self.collection[start] )
SCREAMING_SNAKE_CASE_ : List[Any] = (start + end) // 2
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self._build_tree(_A,_A )
SCREAMING_SNAKE_CASE_ : Optional[int] = self._build_tree(mid + 1,_A )
return SegmentTreeNode(_A,_A,self.fn(left.val,right.val ),_A,_A )
def __UpperCamelCase ( self : int,_A : int,_A : Tuple,_A : Dict ):
"""simple docstring"""
if node.start == i and node.end == i:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = val
return
if i <= node.mid:
self._update_tree(node.left,_A,_A )
else:
self._update_tree(node.right,_A,_A )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.fn(node.left.val,node.right.val )
def __UpperCamelCase ( self : str,_A : List[str],_A : Optional[int],_A : Optional[Any] ):
"""simple docstring"""
if node.start == i and node.end == j:
return node.val
if i <= node.mid:
if j <= node.mid:
# range in left child tree
return self._query_range(node.left,_A,_A )
else:
# range in left child tree and right child tree
return self.fn(
self._query_range(node.left,_A,node.mid ),self._query_range(node.right,node.mid + 1,_A ),)
else:
# range in right child tree
return self._query_range(node.right,_A,_A )
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
if self.root is not None:
SCREAMING_SNAKE_CASE_ : int = Queue()
queue.put(self.root )
while not queue.empty():
SCREAMING_SNAKE_CASE_ : Tuple = queue.get()
yield node
if node.left is not None:
queue.put(node.left )
if node.right is not None:
queue.put(node.right )
if __name__ == "__main__":
import operator
for fn in [operator.add, max, min]:
print('''*''' * 50)
__lowerCamelCase : int = SegmentTree([2, 1, 5, 3, 4], fn)
for node in arr.traverse():
print(node)
print()
arr.update(1, 5)
for node in arr.traverse():
print(node)
print()
print(arr.query_range(3, 4)) # 7
print(arr.query_range(2, 2)) # 5
print(arr.query_range(1, 3)) # 13
print()
| 18 | 1 |
import json
import os
import re
import unittest
from transformers import CodeGenTokenizer, CodeGenTokenizerFast
from transformers.models.codegen.tokenization_codegen import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class a__ ( A__ , unittest.TestCase ):
A = CodeGenTokenizer
A = CodeGenTokenizerFast
A = True
A = {'add_prefix_space': True}
A = False
def __UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
SCREAMING_SNAKE_CASE_ : Optional[int] = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
"<|endoftext|>",
]
SCREAMING_SNAKE_CASE_ : Optional[int] = dict(zip(_A,range(len(_A ) ) ) )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
SCREAMING_SNAKE_CASE_ : List[str] = {"unk_token": "<unk>"}
SCREAMING_SNAKE_CASE_ : Tuple = os.path.join(self.tmpdirname,VOCAB_FILES_NAMES["vocab_file"] )
SCREAMING_SNAKE_CASE_ : Any = os.path.join(self.tmpdirname,VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file,"w",encoding="utf-8" ) as fp:
fp.write(json.dumps(_A ) + "\n" )
with open(self.merges_file,"w",encoding="utf-8" ) as fp:
fp.write("\n".join(_A ) )
def __UpperCamelCase ( self : Tuple,**_A : Optional[Any] ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return CodeGenTokenizer.from_pretrained(self.tmpdirname,**_A )
def __UpperCamelCase ( self : Any,**_A : int ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return CodeGenTokenizerFast.from_pretrained(self.tmpdirname,**_A )
def __UpperCamelCase ( self : Optional[Any],_A : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = "lower newer"
SCREAMING_SNAKE_CASE_ : Dict = "lower newer"
return input_text, output_text
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = CodeGenTokenizer(self.vocab_file,self.merges_file,**self.special_tokens_map )
SCREAMING_SNAKE_CASE_ : int = "lower newer"
SCREAMING_SNAKE_CASE_ : List[Any] = ["\u0120low", "er", "\u0120", "n", "e", "w", "er"]
SCREAMING_SNAKE_CASE_ : Tuple = tokenizer.tokenize(_A,add_prefix_space=_A )
self.assertListEqual(_A,_A )
SCREAMING_SNAKE_CASE_ : int = tokens + [tokenizer.unk_token]
SCREAMING_SNAKE_CASE_ : List[str] = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_A ),_A )
def __UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
if not self.test_rust_tokenizer:
return
SCREAMING_SNAKE_CASE_ : Tuple = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ : Any = self.get_rust_tokenizer(add_prefix_space=_A )
SCREAMING_SNAKE_CASE_ : List[str] = "lower newer"
# Testing tokenization
SCREAMING_SNAKE_CASE_ : Optional[Any] = tokenizer.tokenize(_A,add_prefix_space=_A )
SCREAMING_SNAKE_CASE_ : Any = rust_tokenizer.tokenize(_A )
self.assertListEqual(_A,_A )
# Testing conversion to ids without special tokens
SCREAMING_SNAKE_CASE_ : int = tokenizer.encode(_A,add_special_tokens=_A,add_prefix_space=_A )
SCREAMING_SNAKE_CASE_ : Optional[Any] = rust_tokenizer.encode(_A,add_special_tokens=_A )
self.assertListEqual(_A,_A )
# Testing conversion to ids with special tokens
SCREAMING_SNAKE_CASE_ : int = self.get_rust_tokenizer(add_prefix_space=_A )
SCREAMING_SNAKE_CASE_ : int = tokenizer.encode(_A,add_prefix_space=_A )
SCREAMING_SNAKE_CASE_ : Optional[int] = rust_tokenizer.encode(_A )
self.assertListEqual(_A,_A )
# Testing the unknown token
SCREAMING_SNAKE_CASE_ : int = tokens + [rust_tokenizer.unk_token]
SCREAMING_SNAKE_CASE_ : Any = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(_A ),_A )
def __UpperCamelCase ( self : Dict,*_A : Union[str, Any],**_A : Optional[int] ):
"""simple docstring"""
pass
def __UpperCamelCase ( self : Dict,_A : Any=15 ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
SCREAMING_SNAKE_CASE_ : Tuple = self.rust_tokenizer_class.from_pretrained(_A,**_A )
# Simple input
SCREAMING_SNAKE_CASE_ : Optional[int] = "This is a simple input"
SCREAMING_SNAKE_CASE_ : Dict = ["This is a simple input 1", "This is a simple input 2"]
SCREAMING_SNAKE_CASE_ : Optional[Any] = ("This is a simple input", "This is a pair")
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [
("This is a simple input 1", "This is a simple input 2"),
("This is a simple pair 1", "This is a simple pair 2"),
]
# Simple input tests
self.assertRaises(_A,tokenizer_r.encode,_A,max_length=_A,padding="max_length" )
# Simple input
self.assertRaises(_A,tokenizer_r.encode_plus,_A,max_length=_A,padding="max_length" )
# Simple input
self.assertRaises(
_A,tokenizer_r.batch_encode_plus,_A,max_length=_A,padding="max_length",)
# Pair input
self.assertRaises(_A,tokenizer_r.encode,_A,max_length=_A,padding="max_length" )
# Pair input
self.assertRaises(_A,tokenizer_r.encode_plus,_A,max_length=_A,padding="max_length" )
# Pair input
self.assertRaises(
_A,tokenizer_r.batch_encode_plus,_A,max_length=_A,padding="max_length",)
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = CodeGenTokenizer.from_pretrained(self.tmpdirname,pad_token="<pad>" )
# Simple input
SCREAMING_SNAKE_CASE_ : Any = "This is a simple input"
SCREAMING_SNAKE_CASE_ : str = ["This is a simple input looooooooong", "This is a simple input"]
SCREAMING_SNAKE_CASE_ : int = ("This is a simple input", "This is a pair")
SCREAMING_SNAKE_CASE_ : List[str] = [
("This is a simple input loooooong", "This is a simple input"),
("This is a simple pair loooooong", "This is a simple pair"),
]
SCREAMING_SNAKE_CASE_ : Dict = tokenizer.pad_token_id
SCREAMING_SNAKE_CASE_ : str = tokenizer(_A,padding="max_length",max_length=30,return_tensors="np" )
SCREAMING_SNAKE_CASE_ : Tuple = tokenizer(_A,padding=_A,truncate=_A,return_tensors="np" )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tokenizer(*_A,padding="max_length",max_length=60,return_tensors="np" )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tokenizer(_A,padding=_A,truncate=_A,return_tensors="np" )
# s
# test single string max_length padding
self.assertEqual(out_s["input_ids"].shape[-1],30 )
self.assertTrue(pad_token_id in out_s["input_ids"] )
self.assertTrue(0 in out_s["attention_mask"] )
# s2
# test automatic padding
self.assertEqual(out_sa["input_ids"].shape[-1],33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa["input_ids"][0] )
self.assertFalse(0 in out_sa["attention_mask"][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa["input_ids"][1] )
self.assertTrue(0 in out_sa["attention_mask"][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p["input_ids"].shape[-1],60 )
self.assertTrue(pad_token_id in out_p["input_ids"] )
self.assertTrue(0 in out_p["attention_mask"] )
# p2
# test automatic padding pair
self.assertEqual(out_pa["input_ids"].shape[-1],52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa["input_ids"][0] )
self.assertFalse(0 in out_pa["attention_mask"][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa["input_ids"][1] )
self.assertTrue(0 in out_pa["attention_mask"][1] )
def __UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = "$$$"
SCREAMING_SNAKE_CASE_ : str = CodeGenTokenizer.from_pretrained(self.tmpdirname,bos_token=_A,add_bos_token=_A )
SCREAMING_SNAKE_CASE_ : str = "This is a simple input"
SCREAMING_SNAKE_CASE_ : Optional[Any] = ["This is a simple input 1", "This is a simple input 2"]
SCREAMING_SNAKE_CASE_ : Tuple = tokenizer.bos_token_id
SCREAMING_SNAKE_CASE_ : List[Any] = tokenizer(_A )
SCREAMING_SNAKE_CASE_ : Optional[int] = tokenizer(_A )
self.assertEqual(out_s.input_ids[0],_A )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
SCREAMING_SNAKE_CASE_ : Optional[int] = tokenizer.decode(out_s.input_ids )
SCREAMING_SNAKE_CASE_ : int = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0],_A )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
@slow
def __UpperCamelCase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = CodeGenTokenizer.from_pretrained("Salesforce/codegen-350M-mono" )
SCREAMING_SNAKE_CASE_ : List[str] = "\nif len_a > len_b:\n result = a\nelse:\n result = b\n\n\n\n#"
SCREAMING_SNAKE_CASE_ : int = "\nif len_a > len_b: result = a\nelse: result = b"
SCREAMING_SNAKE_CASE_ : Optional[int] = tokenizer.encode(_A )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ["^#", re.escape("<|endoftext|>" ), "^'''", "^\"\"\"", "\n\n\n"]
SCREAMING_SNAKE_CASE_ : Optional[Any] = tokenizer.decode(_A,truncate_before_pattern=_A )
self.assertEqual(_A,_A )
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
pass
| 18 | def _snake_case ( lowerCAmelCase : int , lowerCAmelCase : int ):
"""simple docstring"""
while b:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int = b, a % b
return a
def _snake_case ( lowerCAmelCase : int , lowerCAmelCase : int ):
"""simple docstring"""
return a if b == 0 else euclidean_gcd_recursive(lowerCAmelCase , a % b )
def _snake_case ( ):
"""simple docstring"""
print(f'euclidean_gcd(3, 5) = {euclidean_gcd(3 , 5 )}' )
print(f'euclidean_gcd(5, 3) = {euclidean_gcd(5 , 3 )}' )
print(f'euclidean_gcd(1, 3) = {euclidean_gcd(1 , 3 )}' )
print(f'euclidean_gcd(3, 6) = {euclidean_gcd(3 , 6 )}' )
print(f'euclidean_gcd(6, 3) = {euclidean_gcd(6 , 3 )}' )
print(f'euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 , 5 )}' )
print(f'euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 , 3 )}' )
print(f'euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 , 3 )}' )
print(f'euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 , 6 )}' )
print(f'euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 , 3 )}' )
if __name__ == "__main__":
main()
| 18 | 1 |
from math import isqrt
def _snake_case ( lowerCAmelCase : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , lowerCAmelCase , lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : Optional[int] = False
return [i for i in range(2 , lowerCAmelCase ) if is_prime[i]]
def _snake_case ( lowerCAmelCase : int = 1_0**8 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = calculate_prime_numbers(max_number // 2 )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 0
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 0
SCREAMING_SNAKE_CASE_ : List[Any] = len(lowerCAmelCase ) - 1
while left <= right:
while prime_numbers[left] * prime_numbers[right] >= max_number:
right -= 1
semiprimes_count += right - left + 1
left += 1
return semiprimes_count
if __name__ == "__main__":
print(f'''{solution() = }''')
| 18 | from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__lowerCamelCase : Dict = {'''configuration_vit_mae''': ['''VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTMAEConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Any = [
'''VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ViTMAEForPreTraining''',
'''ViTMAELayer''',
'''ViTMAEModel''',
'''ViTMAEPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Optional[Any] = [
'''TFViTMAEForPreTraining''',
'''TFViTMAEModel''',
'''TFViTMAEPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_mae import (
VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMAEForPreTraining,
ViTMAELayer,
ViTMAEModel,
ViTMAEPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel
else:
import sys
__lowerCamelCase : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 18 | 1 |
def _snake_case ( lowerCAmelCase : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = [0 for i in range(len(lowerCAmelCase ) )]
# initialize interval's left pointer and right pointer
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[str] = 0, 0
for i in range(1 , len(lowerCAmelCase ) ):
# case when current index is inside the interval
if i <= right_pointer:
SCREAMING_SNAKE_CASE_ : List[str] = min(right_pointer - i + 1 , z_result[i - left_pointer] )
SCREAMING_SNAKE_CASE_ : str = min_edge
while go_next(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
z_result[i] += 1
# if new index's result gives us more right interval,
# we've to update left_pointer and right_pointer
if i + z_result[i] - 1 > right_pointer:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple = i, i + z_result[i] - 1
return z_result
def _snake_case ( lowerCAmelCase : int , lowerCAmelCase : list[int] , lowerCAmelCase : str ):
"""simple docstring"""
return i + z_result[i] < len(lowerCAmelCase ) and s[z_result[i]] == s[i + z_result[i]]
def _snake_case ( lowerCAmelCase : str , lowerCAmelCase : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = 0
# concatenate 'pattern' and 'input_str' and call z_function
# with concatenated string
SCREAMING_SNAKE_CASE_ : str = z_function(pattern + input_str )
for val in z_result:
# if value is greater then length of the pattern string
# that means this index is starting position of substring
# which is equal to pattern string
if val >= len(lowerCAmelCase ):
answer += 1
return answer
if __name__ == "__main__":
import doctest
doctest.testmod()
| 18 | import argparse
from pathlib import Path
from transformers import AutoConfig, AutoTokenizer, RagConfig, RagSequenceForGeneration, RagTokenForGeneration
def _snake_case ( lowerCAmelCase : int , lowerCAmelCase : str , lowerCAmelCase : str , lowerCAmelCase : Path , lowerCAmelCase : str = None , lowerCAmelCase : str = None , lowerCAmelCase : str = None , ):
"""simple docstring"""
if config_name_or_path is None:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = "facebook/rag-token-base" if model_type == "rag_token" else "facebook/rag-sequence-base"
if generator_tokenizer_name_or_path is None:
SCREAMING_SNAKE_CASE_ : Dict = generator_name_or_path
if question_encoder_tokenizer_name_or_path is None:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = question_encoder_name_or_path
SCREAMING_SNAKE_CASE_ : Union[str, Any] = RagTokenForGeneration if model_type == "rag_token" else RagSequenceForGeneration
# Save model.
SCREAMING_SNAKE_CASE_ : List[Any] = RagConfig.from_pretrained(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Tuple = AutoConfig.from_pretrained(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : int = AutoConfig.from_pretrained(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = gen_config
SCREAMING_SNAKE_CASE_ : Optional[Any] = question_encoder_config
SCREAMING_SNAKE_CASE_ : Dict = model_class.from_pretrained_question_encoder_generator(
lowerCAmelCase , lowerCAmelCase , config=lowerCAmelCase )
rag_model.save_pretrained(lowerCAmelCase )
# Sanity check.
model_class.from_pretrained(lowerCAmelCase )
# Save tokenizers.
SCREAMING_SNAKE_CASE_ : Optional[Any] = AutoTokenizer.from_pretrained(lowerCAmelCase )
gen_tokenizer.save_pretrained(dest_dir / "generator_tokenizer/" )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = AutoTokenizer.from_pretrained(lowerCAmelCase )
question_encoder_tokenizer.save_pretrained(dest_dir / "question_encoder_tokenizer/" )
if __name__ == "__main__":
__lowerCamelCase : List[Any] = argparse.ArgumentParser()
parser.add_argument(
'''--model_type''',
choices=['''rag_sequence''', '''rag_token'''],
required=True,
type=str,
help='''RAG model type: rag_sequence, rag_token''',
)
parser.add_argument('''--dest''', type=str, required=True, help='''Path to the output checkpoint directory.''')
parser.add_argument('''--generator_name_or_path''', type=str, required=True, help='''Generator model identifier''')
parser.add_argument(
'''--question_encoder_name_or_path''', type=str, required=True, help='''Question encoder model identifier'''
)
parser.add_argument(
'''--generator_tokenizer_name_or_path''',
type=str,
help='''Generator tokenizer identifier, if not specified, resolves to ``generator_name_or_path``''',
)
parser.add_argument(
'''--question_encoder_tokenizer_name_or_path''',
type=str,
help='''Question encoder tokenizer identifier, if not specified, resolves to ``question_encoder_name_or_path``''',
)
parser.add_argument(
'''--config_name_or_path''',
type=str,
help=(
'''Identifier of the model config to use, if not provided, resolves to a base config for a given'''
''' ``model_type``'''
),
)
__lowerCamelCase : str = parser.parse_args()
__lowerCamelCase : int = Path(args.dest)
dest_dir.mkdir(exist_ok=True)
consolidate(
args.model_type,
args.generator_name_or_path,
args.question_encoder_name_or_path,
dest_dir,
args.config_name_or_path,
args.generator_tokenizer_name_or_path,
args.question_encoder_tokenizer_name_or_path,
)
| 18 | 1 |
from __future__ import annotations
from typing import Dict
from ...configuration_utils import PretrainedConfig
__lowerCamelCase : Tuple = {
'''susnato/ernie-m-base_pytorch''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/config.json''',
'''susnato/ernie-m-large_pytorch''': '''https://huggingface.co/susnato/ernie-m-large_pytorch/blob/main/config.json''',
}
class a__ ( A__ ):
A = 'ernie_m'
A = {"dropout": "classifier_dropout", "num_classes": "num_labels"}
def __init__( self : Tuple,_A : int = 25_0002,_A : int = 768,_A : int = 12,_A : int = 12,_A : int = 3072,_A : str = "gelu",_A : float = 0.1,_A : float = 0.1,_A : int = 514,_A : float = 0.02,_A : int = 1,_A : float = 1E-05,_A : Any=None,_A : str=False,_A : int=0.0,**_A : int,):
"""simple docstring"""
super().__init__(pad_token_id=_A,**_A )
SCREAMING_SNAKE_CASE_ : Dict = vocab_size
SCREAMING_SNAKE_CASE_ : Union[str, Any] = hidden_size
SCREAMING_SNAKE_CASE_ : Any = num_hidden_layers
SCREAMING_SNAKE_CASE_ : List[Any] = num_attention_heads
SCREAMING_SNAKE_CASE_ : Tuple = intermediate_size
SCREAMING_SNAKE_CASE_ : List[Any] = hidden_act
SCREAMING_SNAKE_CASE_ : Optional[Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ : int = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : Tuple = max_position_embeddings
SCREAMING_SNAKE_CASE_ : Union[str, Any] = initializer_range
SCREAMING_SNAKE_CASE_ : List[str] = layer_norm_eps
SCREAMING_SNAKE_CASE_ : Union[str, Any] = classifier_dropout
SCREAMING_SNAKE_CASE_ : str = is_decoder
SCREAMING_SNAKE_CASE_ : str = act_dropout
| 18 | import unittest
import numpy as np
from transformers.testing_utils import require_flax, require_tf, require_torch
from transformers.utils import (
expand_dims,
flatten_dict,
is_flax_available,
is_tf_available,
is_torch_available,
reshape,
squeeze,
transpose,
)
if is_flax_available():
import jax.numpy as jnp
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
class a__ ( unittest.TestCase ):
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = {
"task_specific_params": {
"summarization": {"length_penalty": 1.0, "max_length": 128, "min_length": 12, "num_beams": 4},
"summarization_cnn": {"length_penalty": 2.0, "max_length": 142, "min_length": 56, "num_beams": 4},
"summarization_xsum": {"length_penalty": 1.0, "max_length": 62, "min_length": 11, "num_beams": 6},
}
}
SCREAMING_SNAKE_CASE_ : Any = {
"task_specific_params.summarization.length_penalty": 1.0,
"task_specific_params.summarization.max_length": 128,
"task_specific_params.summarization.min_length": 12,
"task_specific_params.summarization.num_beams": 4,
"task_specific_params.summarization_cnn.length_penalty": 2.0,
"task_specific_params.summarization_cnn.max_length": 142,
"task_specific_params.summarization_cnn.min_length": 56,
"task_specific_params.summarization_cnn.num_beams": 4,
"task_specific_params.summarization_xsum.length_penalty": 1.0,
"task_specific_params.summarization_xsum.max_length": 62,
"task_specific_params.summarization_xsum.min_length": 11,
"task_specific_params.summarization_xsum.num_beams": 6,
}
self.assertEqual(flatten_dict(_A ),_A )
def __UpperCamelCase ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = np.random.randn(3,4 )
self.assertTrue(np.allclose(transpose(_A ),x.transpose() ) )
SCREAMING_SNAKE_CASE_ : Tuple = np.random.randn(3,4,5 )
self.assertTrue(np.allclose(transpose(_A,axes=(1, 2, 0) ),x.transpose((1, 2, 0) ) ) )
@require_torch
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = np.random.randn(3,4 )
SCREAMING_SNAKE_CASE_ : Any = torch.tensor(_A )
self.assertTrue(np.allclose(transpose(_A ),transpose(_A ).numpy() ) )
SCREAMING_SNAKE_CASE_ : Dict = np.random.randn(3,4,5 )
SCREAMING_SNAKE_CASE_ : Tuple = torch.tensor(_A )
self.assertTrue(np.allclose(transpose(_A,axes=(1, 2, 0) ),transpose(_A,axes=(1, 2, 0) ).numpy() ) )
@require_tf
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = np.random.randn(3,4 )
SCREAMING_SNAKE_CASE_ : Optional[Any] = tf.constant(_A )
self.assertTrue(np.allclose(transpose(_A ),transpose(_A ).numpy() ) )
SCREAMING_SNAKE_CASE_ : List[str] = np.random.randn(3,4,5 )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tf.constant(_A )
self.assertTrue(np.allclose(transpose(_A,axes=(1, 2, 0) ),transpose(_A,axes=(1, 2, 0) ).numpy() ) )
@require_flax
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = np.random.randn(3,4 )
SCREAMING_SNAKE_CASE_ : Tuple = jnp.array(_A )
self.assertTrue(np.allclose(transpose(_A ),np.asarray(transpose(_A ) ) ) )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = np.random.randn(3,4,5 )
SCREAMING_SNAKE_CASE_ : List[Any] = jnp.array(_A )
self.assertTrue(np.allclose(transpose(_A,axes=(1, 2, 0) ),np.asarray(transpose(_A,axes=(1, 2, 0) ) ) ) )
def __UpperCamelCase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = np.random.randn(3,4 )
self.assertTrue(np.allclose(reshape(_A,(4, 3) ),np.reshape(_A,(4, 3) ) ) )
SCREAMING_SNAKE_CASE_ : Any = np.random.randn(3,4,5 )
self.assertTrue(np.allclose(reshape(_A,(12, 5) ),np.reshape(_A,(12, 5) ) ) )
@require_torch
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = np.random.randn(3,4 )
SCREAMING_SNAKE_CASE_ : List[str] = torch.tensor(_A )
self.assertTrue(np.allclose(reshape(_A,(4, 3) ),reshape(_A,(4, 3) ).numpy() ) )
SCREAMING_SNAKE_CASE_ : Dict = np.random.randn(3,4,5 )
SCREAMING_SNAKE_CASE_ : int = torch.tensor(_A )
self.assertTrue(np.allclose(reshape(_A,(12, 5) ),reshape(_A,(12, 5) ).numpy() ) )
@require_tf
def __UpperCamelCase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = np.random.randn(3,4 )
SCREAMING_SNAKE_CASE_ : Optional[Any] = tf.constant(_A )
self.assertTrue(np.allclose(reshape(_A,(4, 3) ),reshape(_A,(4, 3) ).numpy() ) )
SCREAMING_SNAKE_CASE_ : int = np.random.randn(3,4,5 )
SCREAMING_SNAKE_CASE_ : Any = tf.constant(_A )
self.assertTrue(np.allclose(reshape(_A,(12, 5) ),reshape(_A,(12, 5) ).numpy() ) )
@require_flax
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = np.random.randn(3,4 )
SCREAMING_SNAKE_CASE_ : int = jnp.array(_A )
self.assertTrue(np.allclose(reshape(_A,(4, 3) ),np.asarray(reshape(_A,(4, 3) ) ) ) )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = np.random.randn(3,4,5 )
SCREAMING_SNAKE_CASE_ : Tuple = jnp.array(_A )
self.assertTrue(np.allclose(reshape(_A,(12, 5) ),np.asarray(reshape(_A,(12, 5) ) ) ) )
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = np.random.randn(1,3,4 )
self.assertTrue(np.allclose(squeeze(_A ),np.squeeze(_A ) ) )
SCREAMING_SNAKE_CASE_ : Any = np.random.randn(1,4,1,5 )
self.assertTrue(np.allclose(squeeze(_A,axis=2 ),np.squeeze(_A,axis=2 ) ) )
@require_torch
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = np.random.randn(1,3,4 )
SCREAMING_SNAKE_CASE_ : Any = torch.tensor(_A )
self.assertTrue(np.allclose(squeeze(_A ),squeeze(_A ).numpy() ) )
SCREAMING_SNAKE_CASE_ : Any = np.random.randn(1,4,1,5 )
SCREAMING_SNAKE_CASE_ : Dict = torch.tensor(_A )
self.assertTrue(np.allclose(squeeze(_A,axis=2 ),squeeze(_A,axis=2 ).numpy() ) )
@require_tf
def __UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = np.random.randn(1,3,4 )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tf.constant(_A )
self.assertTrue(np.allclose(squeeze(_A ),squeeze(_A ).numpy() ) )
SCREAMING_SNAKE_CASE_ : Any = np.random.randn(1,4,1,5 )
SCREAMING_SNAKE_CASE_ : Optional[int] = tf.constant(_A )
self.assertTrue(np.allclose(squeeze(_A,axis=2 ),squeeze(_A,axis=2 ).numpy() ) )
@require_flax
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = np.random.randn(1,3,4 )
SCREAMING_SNAKE_CASE_ : List[str] = jnp.array(_A )
self.assertTrue(np.allclose(squeeze(_A ),np.asarray(squeeze(_A ) ) ) )
SCREAMING_SNAKE_CASE_ : str = np.random.randn(1,4,1,5 )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = jnp.array(_A )
self.assertTrue(np.allclose(squeeze(_A,axis=2 ),np.asarray(squeeze(_A,axis=2 ) ) ) )
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = np.random.randn(3,4 )
self.assertTrue(np.allclose(expand_dims(_A,axis=1 ),np.expand_dims(_A,axis=1 ) ) )
@require_torch
def __UpperCamelCase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = np.random.randn(3,4 )
SCREAMING_SNAKE_CASE_ : List[Any] = torch.tensor(_A )
self.assertTrue(np.allclose(expand_dims(_A,axis=1 ),expand_dims(_A,axis=1 ).numpy() ) )
@require_tf
def __UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = np.random.randn(3,4 )
SCREAMING_SNAKE_CASE_ : Optional[int] = tf.constant(_A )
self.assertTrue(np.allclose(expand_dims(_A,axis=1 ),expand_dims(_A,axis=1 ).numpy() ) )
@require_flax
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = np.random.randn(3,4 )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = jnp.array(_A )
self.assertTrue(np.allclose(expand_dims(_A,axis=1 ),np.asarray(expand_dims(_A,axis=1 ) ) ) )
| 18 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
__lowerCamelCase : Optional[int] = {
'''s-JoL/Open-Llama-V1''': '''https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json''',
}
class a__ ( A__ ):
A = 'open-llama'
def __init__( self : Union[str, Any],_A : Dict=10_0000,_A : List[Any]=4096,_A : List[Any]=1_1008,_A : List[Any]=32,_A : Dict=32,_A : List[Any]="silu",_A : Dict=2048,_A : Optional[Any]=0.02,_A : str=1E-6,_A : Union[str, Any]=True,_A : Optional[Any]=0,_A : List[str]=1,_A : Optional[Any]=2,_A : List[Any]=False,_A : Dict=True,_A : Dict=0.1,_A : Optional[int]=0.1,_A : List[Any]=True,_A : str=True,_A : str=None,**_A : Optional[Any],):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = vocab_size
SCREAMING_SNAKE_CASE_ : List[Any] = max_position_embeddings
SCREAMING_SNAKE_CASE_ : Any = hidden_size
SCREAMING_SNAKE_CASE_ : Optional[int] = intermediate_size
SCREAMING_SNAKE_CASE_ : Any = num_hidden_layers
SCREAMING_SNAKE_CASE_ : Tuple = num_attention_heads
SCREAMING_SNAKE_CASE_ : Optional[Any] = hidden_act
SCREAMING_SNAKE_CASE_ : Any = initializer_range
SCREAMING_SNAKE_CASE_ : List[Any] = rms_norm_eps
SCREAMING_SNAKE_CASE_ : Any = use_cache
SCREAMING_SNAKE_CASE_ : Dict = kwargs.pop(
"use_memorry_efficient_attention",_A )
SCREAMING_SNAKE_CASE_ : str = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ : Union[str, Any] = attention_dropout_prob
SCREAMING_SNAKE_CASE_ : List[Any] = use_stable_embedding
SCREAMING_SNAKE_CASE_ : Tuple = shared_input_output_embedding
SCREAMING_SNAKE_CASE_ : Tuple = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=_A,bos_token_id=_A,eos_token_id=_A,tie_word_embeddings=_A,**_A,)
def __UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling,_A ) or len(self.rope_scaling ) != 2:
raise ValueError(
"`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, "
F'got {self.rope_scaling}' )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.rope_scaling.get("type",_A )
SCREAMING_SNAKE_CASE_ : Dict = self.rope_scaling.get("factor",_A )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
F'`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}' )
if rope_scaling_factor is None or not isinstance(_A,_A ) or rope_scaling_factor <= 1.0:
raise ValueError(F'`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}' )
| 18 | import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__lowerCamelCase : List[str] = logging.get_logger(__name__)
__lowerCamelCase : Tuple = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt'''}
__lowerCamelCase : List[Any] = {
'''vocab_file''': {
'''allenai/longformer-base-4096''': '''https://huggingface.co/allenai/longformer-base-4096/resolve/main/vocab.json''',
'''allenai/longformer-large-4096''': (
'''https://huggingface.co/allenai/longformer-large-4096/resolve/main/vocab.json'''
),
'''allenai/longformer-large-4096-finetuned-triviaqa''': (
'''https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/vocab.json'''
),
'''allenai/longformer-base-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/vocab.json'''
),
'''allenai/longformer-large-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/vocab.json'''
),
},
'''merges_file''': {
'''allenai/longformer-base-4096''': '''https://huggingface.co/allenai/longformer-base-4096/resolve/main/merges.txt''',
'''allenai/longformer-large-4096''': (
'''https://huggingface.co/allenai/longformer-large-4096/resolve/main/merges.txt'''
),
'''allenai/longformer-large-4096-finetuned-triviaqa''': (
'''https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/merges.txt'''
),
'''allenai/longformer-base-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/merges.txt'''
),
'''allenai/longformer-large-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/merges.txt'''
),
},
}
__lowerCamelCase : int = {
'''allenai/longformer-base-4096''': 40_96,
'''allenai/longformer-large-4096''': 40_96,
'''allenai/longformer-large-4096-finetuned-triviaqa''': 40_96,
'''allenai/longformer-base-4096-extra.pos.embd.only''': 40_96,
'''allenai/longformer-large-4096-extra.pos.embd.only''': 40_96,
}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def _snake_case ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = (
list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) )
)
SCREAMING_SNAKE_CASE_ : str = bs[:]
SCREAMING_SNAKE_CASE_ : Optional[int] = 0
for b in range(2**8 ):
if b not in bs:
bs.append(lowerCAmelCase )
cs.append(2**8 + n )
n += 1
SCREAMING_SNAKE_CASE_ : List[str] = [chr(lowerCAmelCase ) for n in cs]
return dict(zip(lowerCAmelCase , lowerCAmelCase ) )
def _snake_case ( lowerCAmelCase : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = set()
SCREAMING_SNAKE_CASE_ : Tuple = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
SCREAMING_SNAKE_CASE_ : List[str] = char
return pairs
class a__ ( A__ ):
A = VOCAB_FILES_NAMES
A = PRETRAINED_VOCAB_FILES_MAP
A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A = ['input_ids', 'attention_mask']
def __init__( self : Union[str, Any],_A : List[Any],_A : Tuple,_A : str="replace",_A : Optional[int]="<s>",_A : Dict="</s>",_A : Any="</s>",_A : Optional[Any]="<s>",_A : Union[str, Any]="<unk>",_A : int="<pad>",_A : Dict="<mask>",_A : int=False,**_A : Dict,):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = AddedToken(_A,lstrip=_A,rstrip=_A ) if isinstance(_A,_A ) else bos_token
SCREAMING_SNAKE_CASE_ : Optional[int] = AddedToken(_A,lstrip=_A,rstrip=_A ) if isinstance(_A,_A ) else eos_token
SCREAMING_SNAKE_CASE_ : str = AddedToken(_A,lstrip=_A,rstrip=_A ) if isinstance(_A,_A ) else sep_token
SCREAMING_SNAKE_CASE_ : Union[str, Any] = AddedToken(_A,lstrip=_A,rstrip=_A ) if isinstance(_A,_A ) else cls_token
SCREAMING_SNAKE_CASE_ : List[str] = AddedToken(_A,lstrip=_A,rstrip=_A ) if isinstance(_A,_A ) else unk_token
SCREAMING_SNAKE_CASE_ : Optional[Any] = AddedToken(_A,lstrip=_A,rstrip=_A ) if isinstance(_A,_A ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
SCREAMING_SNAKE_CASE_ : Dict = AddedToken(_A,lstrip=_A,rstrip=_A ) if isinstance(_A,_A ) else mask_token
super().__init__(
errors=_A,bos_token=_A,eos_token=_A,unk_token=_A,sep_token=_A,cls_token=_A,pad_token=_A,mask_token=_A,add_prefix_space=_A,**_A,)
with open(_A,encoding="utf-8" ) as vocab_handle:
SCREAMING_SNAKE_CASE_ : Tuple = json.load(_A )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {v: k for k, v in self.encoder.items()}
SCREAMING_SNAKE_CASE_ : Any = errors # how to handle errors in decoding
SCREAMING_SNAKE_CASE_ : Optional[Any] = bytes_to_unicode()
SCREAMING_SNAKE_CASE_ : str = {v: k for k, v in self.byte_encoder.items()}
with open(_A,encoding="utf-8" ) as merges_handle:
SCREAMING_SNAKE_CASE_ : int = merges_handle.read().split("\n" )[1:-1]
SCREAMING_SNAKE_CASE_ : List[str] = [tuple(merge.split() ) for merge in bpe_merges]
SCREAMING_SNAKE_CASE_ : Optional[int] = dict(zip(_A,range(len(_A ) ) ) )
SCREAMING_SNAKE_CASE_ : Any = {}
SCREAMING_SNAKE_CASE_ : List[str] = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
SCREAMING_SNAKE_CASE_ : List[Any] = re.compile(R"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" )
@property
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
return len(self.encoder )
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
return dict(self.encoder,**self.added_tokens_encoder )
def __UpperCamelCase ( self : Any,_A : int ):
"""simple docstring"""
if token in self.cache:
return self.cache[token]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tuple(_A )
SCREAMING_SNAKE_CASE_ : str = get_pairs(_A )
if not pairs:
return token
while True:
SCREAMING_SNAKE_CASE_ : Tuple = min(_A,key=lambda _A : self.bpe_ranks.get(_A,float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple = bigram
SCREAMING_SNAKE_CASE_ : int = []
SCREAMING_SNAKE_CASE_ : Dict = 0
while i < len(_A ):
try:
SCREAMING_SNAKE_CASE_ : Tuple = word.index(_A,_A )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
SCREAMING_SNAKE_CASE_ : str = j
if word[i] == first and i < len(_A ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
SCREAMING_SNAKE_CASE_ : Dict = tuple(_A )
SCREAMING_SNAKE_CASE_ : List[str] = new_word
if len(_A ) == 1:
break
else:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = get_pairs(_A )
SCREAMING_SNAKE_CASE_ : List[str] = " ".join(_A )
SCREAMING_SNAKE_CASE_ : Any = word
return word
def __UpperCamelCase ( self : Dict,_A : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = []
for token in re.findall(self.pat,_A ):
SCREAMING_SNAKE_CASE_ : Any = "".join(
self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(_A ).split(" " ) )
return bpe_tokens
def __UpperCamelCase ( self : Optional[int],_A : str ):
"""simple docstring"""
return self.encoder.get(_A,self.encoder.get(self.unk_token ) )
def __UpperCamelCase ( self : Tuple,_A : str ):
"""simple docstring"""
return self.decoder.get(_A )
def __UpperCamelCase ( self : List[str],_A : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = "".join(_A )
SCREAMING_SNAKE_CASE_ : Tuple = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8",errors=self.errors )
return text
def __UpperCamelCase ( self : List[Any],_A : str,_A : Optional[str] = None ):
"""simple docstring"""
if not os.path.isdir(_A ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
SCREAMING_SNAKE_CASE_ : Tuple = os.path.join(
_A,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
SCREAMING_SNAKE_CASE_ : Any = os.path.join(
_A,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(_A,"w",encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder,indent=2,sort_keys=_A,ensure_ascii=_A ) + "\n" )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 0
with open(_A,"w",encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items(),key=lambda _A : kv[1] ):
if index != token_index:
logger.warning(
F'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
" Please check that the tokenizer is not corrupted!" )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = token_index
writer.write(" ".join(_A ) + "\n" )
index += 1
return vocab_file, merge_file
def __UpperCamelCase ( self : Optional[Any],_A : List[int],_A : Optional[List[int]] = None ):
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
SCREAMING_SNAKE_CASE_ : str = [self.cls_token_id]
SCREAMING_SNAKE_CASE_ : Tuple = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __UpperCamelCase ( self : str,_A : List[int],_A : Optional[List[int]] = None,_A : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_A,token_ids_a=_A,already_has_special_tokens=_A )
if token_ids_a is None:
return [1] + ([0] * len(_A )) + [1]
return [1] + ([0] * len(_A )) + [1, 1] + ([0] * len(_A )) + [1]
def __UpperCamelCase ( self : Any,_A : List[int],_A : Optional[List[int]] = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = [self.sep_token_id]
SCREAMING_SNAKE_CASE_ : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __UpperCamelCase ( self : Any,_A : Union[str, Any],_A : Any=False,**_A : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = kwargs.pop("add_prefix_space",self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(_A ) > 0 and not text[0].isspace()):
SCREAMING_SNAKE_CASE_ : str = " " + text
return (text, kwargs)
| 18 | 1 |
import unittest
from transformers import SPIECE_UNDERLINE, ReformerTokenizer, ReformerTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__lowerCamelCase : str = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
@require_tokenizers
class a__ ( A__ , unittest.TestCase ):
A = ReformerTokenizer
A = ReformerTokenizerFast
A = True
A = False
A = True
def __UpperCamelCase ( self : Dict ):
"""simple docstring"""
super().setUp()
SCREAMING_SNAKE_CASE_ : int = ReformerTokenizer(_A,keep_accents=_A )
tokenizer.save_pretrained(self.tmpdirname )
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = "<s>"
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_A ),_A )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_A ),_A )
def __UpperCamelCase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0],"<unk>" )
self.assertEqual(vocab_keys[1],"<s>" )
self.assertEqual(vocab_keys[-1],"j" )
self.assertEqual(len(_A ),1000 )
def __UpperCamelCase ( self : str ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size,1000 )
def __UpperCamelCase ( self : Dict ):
"""simple docstring"""
if not self.test_rust_tokenizer:
return
SCREAMING_SNAKE_CASE_ : Any = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ : Optional[int] = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE_ : int = "I was born in 92000, and this is falsé."
SCREAMING_SNAKE_CASE_ : Dict = tokenizer.tokenize(_A )
SCREAMING_SNAKE_CASE_ : Optional[int] = rust_tokenizer.tokenize(_A )
self.assertListEqual(_A,_A )
SCREAMING_SNAKE_CASE_ : int = tokenizer.encode(_A,add_special_tokens=_A )
SCREAMING_SNAKE_CASE_ : Tuple = rust_tokenizer.encode(_A,add_special_tokens=_A )
self.assertListEqual(_A,_A )
SCREAMING_SNAKE_CASE_ : List[Any] = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE_ : Optional[Any] = tokenizer.encode(_A )
SCREAMING_SNAKE_CASE_ : str = rust_tokenizer.encode(_A )
self.assertListEqual(_A,_A )
def __UpperCamelCase ( self : str,_A : List[str]=15 ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
SCREAMING_SNAKE_CASE_ : List[str] = self.rust_tokenizer_class.from_pretrained(_A,**_A )
# Simple input
SCREAMING_SNAKE_CASE_ : int = "This is a simple input"
SCREAMING_SNAKE_CASE_ : Optional[Any] = ["This is a simple input 1", "This is a simple input 2"]
SCREAMING_SNAKE_CASE_ : Optional[Any] = ("This is a simple input", "This is a pair")
SCREAMING_SNAKE_CASE_ : List[Any] = [
("This is a simple input 1", "This is a simple input 2"),
("This is a simple pair 1", "This is a simple pair 2"),
]
# Simple input tests
self.assertRaises(_A,tokenizer_r.encode,_A,max_length=_A,padding="max_length" )
# Simple input
self.assertRaises(_A,tokenizer_r.encode_plus,_A,max_length=_A,padding="max_length" )
# Simple input
self.assertRaises(
_A,tokenizer_r.batch_encode_plus,_A,max_length=_A,padding="max_length",)
# Pair input
self.assertRaises(_A,tokenizer_r.encode,_A,max_length=_A,padding="max_length" )
# Pair input
self.assertRaises(_A,tokenizer_r.encode_plus,_A,max_length=_A,padding="max_length" )
# Pair input
self.assertRaises(
_A,tokenizer_r.batch_encode_plus,_A,max_length=_A,padding="max_length",)
def __UpperCamelCase ( self : int ):
"""simple docstring"""
pass
def __UpperCamelCase ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = ReformerTokenizer(_A,keep_accents=_A )
SCREAMING_SNAKE_CASE_ : Optional[Any] = tokenizer.tokenize("This is a test" )
self.assertListEqual(_A,["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_A ),[285, 46, 10, 170, 382],)
SCREAMING_SNAKE_CASE_ : str = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
_A,[
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
],)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tokenizer.convert_tokens_to_ids(_A )
self.assertListEqual(
_A,[8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4],)
SCREAMING_SNAKE_CASE_ : int = tokenizer.convert_ids_to_tokens(_A )
self.assertListEqual(
_A,[
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
],)
@cached_property
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
return ReformerTokenizer.from_pretrained("google/reformer-crime-and-punishment" )
@slow
def __UpperCamelCase ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = "Hello World!"
SCREAMING_SNAKE_CASE_ : Optional[int] = [126, 32, 262, 152, 38, 72, 287]
self.assertListEqual(_A,self.big_tokenizer.encode(_A ) )
@slow
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = (
"This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"
" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"
)
SCREAMING_SNAKE_CASE_ : str = [
108,
265,
24,
111,
4,
258,
156,
35,
28,
275,
3,
259,
297,
260,
84,
4,
35,
110,
44,
8,
259,
91,
268,
21,
11,
209,
274,
109,
266,
277,
117,
86,
93,
315,
258,
278,
258,
277,
258,
0,
258,
288,
258,
319,
258,
0,
258,
0,
258,
0,
258,
0,
258,
287,
258,
315,
258,
289,
258,
278,
99,
269,
266,
262,
8,
259,
241,
4,
217,
230,
268,
266,
55,
168,
106,
75,
193,
266,
223,
27,
49,
26,
282,
25,
264,
299,
19,
26,
0,
258,
277,
117,
86,
93,
176,
183,
270,
11,
262,
42,
61,
265,
]
self.assertListEqual(_A,self.big_tokenizer.encode(_A ) )
@require_torch
@slow
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
import torch
from transformers import ReformerConfig, ReformerModel
# Build sequence
SCREAMING_SNAKE_CASE_ : Any = list(self.big_tokenizer.get_vocab().keys() )[:10]
SCREAMING_SNAKE_CASE_ : str = " ".join(_A )
SCREAMING_SNAKE_CASE_ : str = self.big_tokenizer.encode_plus(_A,return_tensors="pt" )
SCREAMING_SNAKE_CASE_ : Any = self.big_tokenizer.batch_encode_plus([sequence, sequence],return_tensors="pt" )
SCREAMING_SNAKE_CASE_ : List[str] = ReformerConfig()
# The input gets padded during training so adjust the axial position encodings from the pretrained model value of (512, 1024)
SCREAMING_SNAKE_CASE_ : int = encoded_sequence["input_ids"].shape
SCREAMING_SNAKE_CASE_ : Any = ReformerModel(_A )
# Reformer has config.vocab_size == tokenizer.vocab_size == len(tokenizer) - 1 = 320; len(tokenizer) is 321 (including a pad token with id 320)
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**_A )
model(**_A )
@slow
def __UpperCamelCase ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {"input_ids": [[108, 265, 24, 111, 4, 258, 156, 7, 51, 279, 58, 7, 76, 25, 69, 278], [140, 243, 264, 134, 17, 267, 77, 263, 22, 262, 297, 258, 304, 177, 279, 266, 14, 89, 13, 35, 261, 299, 272, 137, 275, 278]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# This tokenizer does not know some characters like ")".
# That is the reason why we use very simple texts here.
# Also see https://github.com/huggingface/transformers/pull/11737#issuecomment-850769064
SCREAMING_SNAKE_CASE_ : Optional[Any] = [
"This is a very simple sentence.",
"The quick brown fox jumps over the lazy dog.",
]
self.tokenizer_integration_test_util(
expected_encoding=_A,model_name="google/reformer-crime-and-punishment",revision="0e6c3decb8211d49bf881013425dc8b0448b3f5a",padding=_A,sequences=_A,)
| 18 | from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class a__ :
def __init__( self : Optional[int],_A : Dict,_A : List[str]=13,_A : List[str]=7,_A : int=True,_A : str=True,_A : Union[str, Any]=True,_A : Tuple=True,_A : Dict=99,_A : Tuple=32,_A : Tuple=2,_A : Tuple=4,_A : Optional[Any]=37,_A : str="gelu",_A : Dict=0.1,_A : List[Any]=0.1,_A : List[str]=512,_A : str=16,_A : int=2,_A : Dict=0.02,_A : List[Any]=3,_A : Optional[Any]=4,_A : Optional[int]=None,):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = parent
SCREAMING_SNAKE_CASE_ : Any = 13
SCREAMING_SNAKE_CASE_ : List[str] = 7
SCREAMING_SNAKE_CASE_ : Dict = True
SCREAMING_SNAKE_CASE_ : Optional[Any] = True
SCREAMING_SNAKE_CASE_ : Tuple = True
SCREAMING_SNAKE_CASE_ : List[str] = True
SCREAMING_SNAKE_CASE_ : List[str] = 99
SCREAMING_SNAKE_CASE_ : Tuple = 384
SCREAMING_SNAKE_CASE_ : Optional[Any] = 2
SCREAMING_SNAKE_CASE_ : Any = 4
SCREAMING_SNAKE_CASE_ : str = 37
SCREAMING_SNAKE_CASE_ : Optional[Any] = "gelu"
SCREAMING_SNAKE_CASE_ : List[Any] = 0.1
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 0.1
SCREAMING_SNAKE_CASE_ : Dict = 512
SCREAMING_SNAKE_CASE_ : int = 16
SCREAMING_SNAKE_CASE_ : Optional[int] = 2
SCREAMING_SNAKE_CASE_ : Any = 0.02
SCREAMING_SNAKE_CASE_ : str = 3
SCREAMING_SNAKE_CASE_ : int = 4
SCREAMING_SNAKE_CASE_ : Dict = 128
SCREAMING_SNAKE_CASE_ : Any = 2
SCREAMING_SNAKE_CASE_ : Tuple = 9
SCREAMING_SNAKE_CASE_ : List[Any] = 1
SCREAMING_SNAKE_CASE_ : Any = None
def __UpperCamelCase ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = ids_tensor([self.batch_size, self.seq_length],self.vocab_size )
SCREAMING_SNAKE_CASE_ : Any = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE_ : List[str] = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE_ : List[Any] = ids_tensor([self.batch_size, self.seq_length],self.type_vocab_size )
SCREAMING_SNAKE_CASE_ : Dict = None
SCREAMING_SNAKE_CASE_ : Dict = None
SCREAMING_SNAKE_CASE_ : str = None
if self.use_labels:
SCREAMING_SNAKE_CASE_ : List[str] = ids_tensor([self.batch_size],self.type_sequence_label_size )
SCREAMING_SNAKE_CASE_ : Dict = ids_tensor([self.batch_size, self.seq_length],self.num_labels )
SCREAMING_SNAKE_CASE_ : List[str] = ids_tensor([self.batch_size],self.num_choices )
SCREAMING_SNAKE_CASE_ : Any = ConvBertConfig(
vocab_size=self.vocab_size,hidden_size=self.hidden_size,num_hidden_layers=self.num_hidden_layers,num_attention_heads=self.num_attention_heads,intermediate_size=self.intermediate_size,hidden_act=self.hidden_act,hidden_dropout_prob=self.hidden_dropout_prob,attention_probs_dropout_prob=self.attention_probs_dropout_prob,max_position_embeddings=self.max_position_embeddings,type_vocab_size=self.type_vocab_size,initializer_range=self.initializer_range,return_dict=_A,)
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __UpperCamelCase ( self : Optional[int],_A : List[Any],_A : int,_A : Tuple,_A : Optional[int],_A : Union[str, Any],_A : Union[str, Any],_A : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = TFConvBertModel(config=_A )
SCREAMING_SNAKE_CASE_ : Optional[int] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
SCREAMING_SNAKE_CASE_ : str = [input_ids, input_mask]
SCREAMING_SNAKE_CASE_ : List[str] = model(_A )
SCREAMING_SNAKE_CASE_ : Dict = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape,(self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCamelCase ( self : Dict,_A : Dict,_A : int,_A : Union[str, Any],_A : List[Any],_A : int,_A : str,_A : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = TFConvBertForMaskedLM(config=_A )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
SCREAMING_SNAKE_CASE_ : List[Any] = model(_A )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCamelCase ( self : Any,_A : Optional[int],_A : List[Any],_A : Union[str, Any],_A : List[Any],_A : Union[str, Any],_A : Optional[int],_A : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = self.num_labels
SCREAMING_SNAKE_CASE_ : Any = TFConvBertForSequenceClassification(config=_A )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
SCREAMING_SNAKE_CASE_ : Optional[Any] = model(_A )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.num_labels) )
def __UpperCamelCase ( self : int,_A : int,_A : Dict,_A : List[str],_A : Tuple,_A : Dict,_A : Optional[int],_A : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.num_choices
SCREAMING_SNAKE_CASE_ : Optional[int] = TFConvBertForMultipleChoice(config=_A )
SCREAMING_SNAKE_CASE_ : Any = tf.tile(tf.expand_dims(_A,1 ),(1, self.num_choices, 1) )
SCREAMING_SNAKE_CASE_ : Any = tf.tile(tf.expand_dims(_A,1 ),(1, self.num_choices, 1) )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tf.tile(tf.expand_dims(_A,1 ),(1, self.num_choices, 1) )
SCREAMING_SNAKE_CASE_ : int = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
SCREAMING_SNAKE_CASE_ : int = model(_A )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.num_choices) )
def __UpperCamelCase ( self : List[Any],_A : Union[str, Any],_A : int,_A : Optional[int],_A : str,_A : str,_A : Tuple,_A : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = self.num_labels
SCREAMING_SNAKE_CASE_ : Union[str, Any] = TFConvBertForTokenClassification(config=_A )
SCREAMING_SNAKE_CASE_ : Tuple = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
SCREAMING_SNAKE_CASE_ : str = model(_A )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.seq_length, self.num_labels) )
def __UpperCamelCase ( self : List[Any],_A : int,_A : List[str],_A : List[Any],_A : Any,_A : Optional[int],_A : List[str],_A : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = TFConvBertForQuestionAnswering(config=_A )
SCREAMING_SNAKE_CASE_ : Dict = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
SCREAMING_SNAKE_CASE_ : Any = model(_A )
self.parent.assertEqual(result.start_logits.shape,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape,(self.batch_size, self.seq_length) )
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) ,
) : List[Any] = config_and_inputs
SCREAMING_SNAKE_CASE_ : Any = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class a__ ( A__ , A__ , unittest.TestCase ):
A = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
A = (
{
'feature-extraction': TFConvBertModel,
'fill-mask': TFConvBertForMaskedLM,
'question-answering': TFConvBertForQuestionAnswering,
'text-classification': TFConvBertForSequenceClassification,
'token-classification': TFConvBertForTokenClassification,
'zero-shot': TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
A = False
A = False
A = False
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = TFConvBertModelTester(self )
SCREAMING_SNAKE_CASE_ : Tuple = ConfigTester(self,config_class=_A,hidden_size=37 )
def __UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def __UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_A )
def __UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_A )
def __UpperCamelCase ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_A )
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_A )
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_A )
@slow
def __UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ : List[str] = True
SCREAMING_SNAKE_CASE_ : Any = True
if hasattr(_A,"use_cache" ):
SCREAMING_SNAKE_CASE_ : List[Any] = True
SCREAMING_SNAKE_CASE_ : int = getattr(self.model_tester,"encoder_seq_length",self.model_tester.seq_length )
SCREAMING_SNAKE_CASE_ : Optional[Any] = getattr(self.model_tester,"key_length",_A )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ : List[str] = self._prepare_for_class(_A,_A )
SCREAMING_SNAKE_CASE_ : List[Any] = model_class(_A )
SCREAMING_SNAKE_CASE_ : Optional[int] = len(model(_A ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_A,saved_model=_A )
SCREAMING_SNAKE_CASE_ : Optional[Any] = os.path.join(_A,"saved_model","1" )
SCREAMING_SNAKE_CASE_ : Tuple = tf.keras.models.load_model(_A )
SCREAMING_SNAKE_CASE_ : str = model(_A )
if self.is_encoder_decoder:
SCREAMING_SNAKE_CASE_ : Optional[Any] = outputs["encoder_hidden_states"]
SCREAMING_SNAKE_CASE_ : str = outputs["encoder_attentions"]
else:
SCREAMING_SNAKE_CASE_ : Any = outputs["hidden_states"]
SCREAMING_SNAKE_CASE_ : List[str] = outputs["attentions"]
self.assertEqual(len(_A ),_A )
SCREAMING_SNAKE_CASE_ : Any = getattr(
self.model_tester,"expected_num_hidden_layers",self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(_A ),_A )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ),[self.model_tester.seq_length, self.model_tester.hidden_size],)
self.assertEqual(len(_A ),self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ),[self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length],)
@slow
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" )
self.assertIsNotNone(_A )
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ : List[str] = True
SCREAMING_SNAKE_CASE_ : List[str] = getattr(self.model_tester,"decoder_seq_length",self.model_tester.seq_length )
SCREAMING_SNAKE_CASE_ : Any = getattr(self.model_tester,"encoder_seq_length",self.model_tester.seq_length )
SCREAMING_SNAKE_CASE_ : Optional[int] = getattr(self.model_tester,"key_length",_A )
SCREAMING_SNAKE_CASE_ : int = getattr(self.model_tester,"key_length",_A )
def check_decoder_attentions_output(_A : Dict ):
SCREAMING_SNAKE_CASE_ : int = len(_A )
self.assertEqual(out_len % 2,0 )
SCREAMING_SNAKE_CASE_ : Tuple = outputs.decoder_attentions
self.assertEqual(len(_A ),self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ),[self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length],)
def check_encoder_attentions_output(_A : Tuple ):
SCREAMING_SNAKE_CASE_ : int = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(_A ),self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ),[self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length],)
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ : Optional[Any] = True
SCREAMING_SNAKE_CASE_ : Optional[Any] = False
SCREAMING_SNAKE_CASE_ : Tuple = model_class(_A )
SCREAMING_SNAKE_CASE_ : Any = model(self._prepare_for_class(_A,_A ) )
SCREAMING_SNAKE_CASE_ : Tuple = len(_A )
self.assertEqual(config.output_hidden_states,_A )
check_encoder_attentions_output(_A )
if self.is_encoder_decoder:
SCREAMING_SNAKE_CASE_ : Optional[Any] = model_class(_A )
SCREAMING_SNAKE_CASE_ : int = model(self._prepare_for_class(_A,_A ) )
self.assertEqual(config.output_hidden_states,_A )
check_decoder_attentions_output(_A )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
SCREAMING_SNAKE_CASE_ : str = True
SCREAMING_SNAKE_CASE_ : int = model_class(_A )
SCREAMING_SNAKE_CASE_ : List[str] = model(self._prepare_for_class(_A,_A ) )
self.assertEqual(config.output_hidden_states,_A )
check_encoder_attentions_output(_A )
# Check attention is always last and order is fine
SCREAMING_SNAKE_CASE_ : str = True
SCREAMING_SNAKE_CASE_ : int = True
SCREAMING_SNAKE_CASE_ : Dict = model_class(_A )
SCREAMING_SNAKE_CASE_ : str = model(self._prepare_for_class(_A,_A ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1),len(_A ) )
self.assertEqual(model.config.output_hidden_states,_A )
check_encoder_attentions_output(_A )
@require_tf
class a__ ( unittest.TestCase ):
@slow
def __UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" )
SCREAMING_SNAKE_CASE_ : int = tf.constant([[0, 1, 2, 3, 4, 5]] )
SCREAMING_SNAKE_CASE_ : Tuple = model(_A )[0]
SCREAMING_SNAKE_CASE_ : List[Any] = [1, 6, 768]
self.assertEqual(output.shape,_A )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tf.constant(
[
[
[-0.03475493, -0.4686034, -0.30638832],
[0.22637248, -0.26988646, -0.7423424],
[0.10324868, -0.45013508, -0.58280784],
]
] )
tf.debugging.assert_near(output[:, :3, :3],_A,atol=1E-4 )
| 18 | 1 |
from argparse import ArgumentParser
from .add_new_model import AddNewModelCommand
from .add_new_model_like import AddNewModelLikeCommand
from .convert import ConvertCommand
from .download import DownloadCommand
from .env import EnvironmentCommand
from .lfs import LfsCommands
from .pt_to_tf import PTtoTFCommand
from .run import RunCommand
from .serving import ServeCommand
from .user import UserCommands
def _snake_case ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ArgumentParser("Transformers CLI tool" , usage="transformers-cli <command> [<args>]" )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = parser.add_subparsers(help="transformers-cli command helpers" )
# Register commands
ConvertCommand.register_subcommand(lowerCAmelCase )
DownloadCommand.register_subcommand(lowerCAmelCase )
EnvironmentCommand.register_subcommand(lowerCAmelCase )
RunCommand.register_subcommand(lowerCAmelCase )
ServeCommand.register_subcommand(lowerCAmelCase )
UserCommands.register_subcommand(lowerCAmelCase )
AddNewModelCommand.register_subcommand(lowerCAmelCase )
AddNewModelLikeCommand.register_subcommand(lowerCAmelCase )
LfsCommands.register_subcommand(lowerCAmelCase )
PTtoTFCommand.register_subcommand(lowerCAmelCase )
# Let's go
SCREAMING_SNAKE_CASE_ : Any = parser.parse_args()
if not hasattr(lowerCAmelCase , "func" ):
parser.print_help()
exit(1 )
# Run
SCREAMING_SNAKE_CASE_ : int = args.func(lowerCAmelCase )
service.run()
if __name__ == "__main__":
main()
| 18 | def _snake_case ( lowerCAmelCase : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = int(lowerCAmelCase )
if decimal in (0, 1): # Exit cases for the recursion
return str(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = divmod(lowerCAmelCase , 2 )
return binary_recursive(lowerCAmelCase ) + str(lowerCAmelCase )
def _snake_case ( lowerCAmelCase : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = str(lowerCAmelCase ).strip()
if not number:
raise ValueError("No input value was provided" )
SCREAMING_SNAKE_CASE_ : List[str] = "-" if number.startswith("-" ) else ""
SCREAMING_SNAKE_CASE_ : Optional[Any] = number.lstrip("-" )
if not number.isnumeric():
raise ValueError("Input value is not an integer" )
return f'{negative}0b{binary_recursive(int(lowerCAmelCase ) )}'
if __name__ == "__main__":
from doctest import testmod
testmod()
| 18 | 1 |
import math
import random
from typing import Any
from .hill_climbing import SearchProblem
def _snake_case ( lowerCAmelCase : Tuple , lowerCAmelCase : bool = True , lowerCAmelCase : float = math.inf , lowerCAmelCase : float = -math.inf , lowerCAmelCase : float = math.inf , lowerCAmelCase : float = -math.inf , lowerCAmelCase : bool = False , lowerCAmelCase : float = 1_0_0 , lowerCAmelCase : float = 0.01 , lowerCAmelCase : float = 1 , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = False
SCREAMING_SNAKE_CASE_ : List[Any] = search_prob
SCREAMING_SNAKE_CASE_ : Dict = start_temperate
SCREAMING_SNAKE_CASE_ : int = []
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 0
SCREAMING_SNAKE_CASE_ : Any = None
while not search_end:
SCREAMING_SNAKE_CASE_ : str = current_state.score()
if best_state is None or current_score > best_state.score():
SCREAMING_SNAKE_CASE_ : str = current_state
scores.append(lowerCAmelCase )
iterations += 1
SCREAMING_SNAKE_CASE_ : List[str] = None
SCREAMING_SNAKE_CASE_ : Optional[Any] = current_state.get_neighbors()
while (
next_state is None and neighbors
): # till we do not find a neighbor that we can move to
SCREAMING_SNAKE_CASE_ : Optional[Any] = random.randint(0 , len(lowerCAmelCase ) - 1 ) # picking a random neighbor
SCREAMING_SNAKE_CASE_ : int = neighbors.pop(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Tuple = picked_neighbor.score() - current_score
if (
picked_neighbor.x > max_x
or picked_neighbor.x < min_x
or picked_neighbor.y > max_y
or picked_neighbor.y < min_y
):
continue # neighbor outside our bounds
if not find_max:
SCREAMING_SNAKE_CASE_ : Optional[Any] = change * -1 # in case we are finding minimum
if change > 0: # improves the solution
SCREAMING_SNAKE_CASE_ : Union[str, Any] = picked_neighbor
else:
SCREAMING_SNAKE_CASE_ : Optional[Any] = (math.e) ** (
change / current_temp
) # probability generation function
if random.random() < probability: # random number within probability
SCREAMING_SNAKE_CASE_ : Tuple = picked_neighbor
SCREAMING_SNAKE_CASE_ : List[str] = current_temp - (current_temp * rate_of_decrease)
if current_temp < threshold_temp or next_state is None:
# temperature below threshold, or could not find a suitable neighbor
SCREAMING_SNAKE_CASE_ : List[Any] = True
else:
SCREAMING_SNAKE_CASE_ : str = next_state
if visualization:
from matplotlib import pyplot as plt
plt.plot(range(lowerCAmelCase ) , lowerCAmelCase )
plt.xlabel("Iterations" )
plt.ylabel("Function values" )
plt.show()
return best_state
if __name__ == "__main__":
def _snake_case ( lowerCAmelCase : Tuple , lowerCAmelCase : Any ):
"""simple docstring"""
return (x**2) + (y**2)
# starting the problem with initial coordinates (12, 47)
__lowerCamelCase : List[str] = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
__lowerCamelCase : List[Any] = simulated_annealing(
prob, find_max=False, max_x=1_00, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
'''The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 '''
f'''and 50 > y > - 5 found via hill climbing: {local_min.score()}'''
)
# starting the problem with initial coordinates (12, 47)
__lowerCamelCase : Any = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
__lowerCamelCase : int = simulated_annealing(
prob, find_max=True, max_x=1_00, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
'''The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 '''
f'''and 50 > y > - 5 found via hill climbing: {local_min.score()}'''
)
def _snake_case ( lowerCAmelCase : str , lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
return (3 * x**2) - (6 * y)
__lowerCamelCase : Any = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
__lowerCamelCase : str = simulated_annealing(prob, find_max=False, visualization=True)
print(
'''The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: '''
f'''{local_min.score()}'''
)
__lowerCamelCase : List[str] = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
__lowerCamelCase : List[Any] = simulated_annealing(prob, find_max=True, visualization=True)
print(
'''The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: '''
f'''{local_min.score()}'''
)
| 18 | from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__lowerCamelCase : Union[str, Any] = {
'''configuration_chinese_clip''': [
'''CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''ChineseCLIPConfig''',
'''ChineseCLIPOnnxConfig''',
'''ChineseCLIPTextConfig''',
'''ChineseCLIPVisionConfig''',
],
'''processing_chinese_clip''': ['''ChineseCLIPProcessor'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Any = ['''ChineseCLIPFeatureExtractor''']
__lowerCamelCase : Optional[int] = ['''ChineseCLIPImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : int = [
'''CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ChineseCLIPModel''',
'''ChineseCLIPPreTrainedModel''',
'''ChineseCLIPTextModel''',
'''ChineseCLIPVisionModel''',
]
if TYPE_CHECKING:
from .configuration_chinese_clip import (
CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
ChineseCLIPConfig,
ChineseCLIPOnnxConfig,
ChineseCLIPTextConfig,
ChineseCLIPVisionConfig,
)
from .processing_chinese_clip import ChineseCLIPProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_chinese_clip import ChineseCLIPFeatureExtractor, ChineseCLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_chinese_clip import (
CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
ChineseCLIPModel,
ChineseCLIPPreTrainedModel,
ChineseCLIPTextModel,
ChineseCLIPVisionModel,
)
else:
import sys
__lowerCamelCase : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 18 | 1 |
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel
from diffusers import DDIMScheduler, LDMPipeline, UNetaDModel, VQModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class a__ ( unittest.TestCase ):
@property
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ : Optional[int] = UNetaDModel(
block_out_channels=(32, 64),layers_per_block=2,sample_size=32,in_channels=3,out_channels=3,down_block_types=("DownBlock2D", "AttnDownBlock2D"),up_block_types=("AttnUpBlock2D", "UpBlock2D"),)
return model
@property
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ : int = VQModel(
block_out_channels=[32, 64],in_channels=3,out_channels=3,down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"],up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"],latent_channels=3,)
return model
@property
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = CLIPTextConfig(
bos_token_id=0,eos_token_id=2,hidden_size=32,intermediate_size=37,layer_norm_eps=1E-05,num_attention_heads=4,num_hidden_layers=5,pad_token_id=1,vocab_size=1000,)
return CLIPTextModel(_A )
def __UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = self.dummy_uncond_unet
SCREAMING_SNAKE_CASE_ : Optional[Any] = DDIMScheduler()
SCREAMING_SNAKE_CASE_ : Optional[int] = self.dummy_vq_model
SCREAMING_SNAKE_CASE_ : Optional[int] = LDMPipeline(unet=_A,vqvae=_A,scheduler=_A )
ldm.to(_A )
ldm.set_progress_bar_config(disable=_A )
SCREAMING_SNAKE_CASE_ : Dict = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ : str = ldm(generator=_A,num_inference_steps=2,output_type="numpy" ).images
SCREAMING_SNAKE_CASE_ : int = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ : List[Any] = ldm(generator=_A,num_inference_steps=2,output_type="numpy",return_dict=_A )[0]
SCREAMING_SNAKE_CASE_ : Any = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE_ : Optional[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
SCREAMING_SNAKE_CASE_ : Optional[Any] = np.array([0.8512, 0.818, 0.6411, 0.6808, 0.4465, 0.5618, 0.46, 0.6231, 0.5172] )
SCREAMING_SNAKE_CASE_ : int = 1E-2 if torch_device != "mps" else 3E-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < tolerance
@slow
@require_torch
class a__ ( unittest.TestCase ):
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = LDMPipeline.from_pretrained("CompVis/ldm-celebahq-256" )
ldm.to(_A )
ldm.set_progress_bar_config(disable=_A )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ : List[Any] = ldm(generator=_A,num_inference_steps=5,output_type="numpy" ).images
SCREAMING_SNAKE_CASE_ : int = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
SCREAMING_SNAKE_CASE_ : Optional[int] = np.array([0.4399, 0.44975, 0.46825, 0.474, 0.4359, 0.4581, 0.45095, 0.4341, 0.4447] )
SCREAMING_SNAKE_CASE_ : List[Any] = 1E-2 if torch_device != "mps" else 3E-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
| 18 | import argparse
import re
from flax.traverse_util import flatten_dict, unflatten_dict
from tax import checkpoints
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
from transformers.utils import logging
logging.set_verbosity_info()
# should not include what is already done by the `from_pt` argument
__lowerCamelCase : Any = {
'''/attention/''': '''/0/SelfAttention/''',
'''/self_attention/''': '''/0/SelfAttention/''',
'''/encoder_decoder_attention/''': '''/1/EncDecAttention/''',
'''value''': '''v''',
'''query''': '''q''',
'''key''': '''k''',
'''out''': '''o''',
'''pre_self_attention_layer_norm''': '''0/layer_norm''',
'''pre_cross_attention_layer_norm''': '''1/layer_norm''',
'''pre_attention_layer_norm''': '''0/layer_norm''', # previously 1, but seems wrong
'''token_embedder''': '''shared''',
'''encoder_norm''': '''final_layer_norm''',
'''decoder_norm''': '''final_layer_norm''',
'''relpos_bias/rel_embedding''': '''block/0/layer/0/SelfAttention/relative_attention_bias/weight''',
'''router/router_weights/w/''': '''router/classifier/''',
'''roer/roer_weights/w/''': '''router/classifier/''',
'''logits_dense''': '''lm_head''',
}
def _snake_case ( lowerCAmelCase : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = list(s_dict.keys() )
for key in keys:
SCREAMING_SNAKE_CASE_ : int = R".*/layers_(\d+)"
SCREAMING_SNAKE_CASE_ : List[Any] = key
if re.match(lowerCAmelCase , lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : Any = re.sub(R"layers_(\d+)" , R"block/\1/layer" , lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : List[Any] = R"(encoder|decoder)\/"
if re.match(lowerCAmelCase , lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : str = re.match(lowerCAmelCase , lowerCAmelCase ).groups()
if groups[0] == "encoder":
SCREAMING_SNAKE_CASE_ : Any = re.sub(R"/mlp/" , R"/1/mlp/" , lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : List[Any] = re.sub(R"/pre_mlp_layer_norm/" , R"/1/layer_norm/" , lowerCAmelCase )
elif groups[0] == "decoder":
SCREAMING_SNAKE_CASE_ : List[str] = re.sub(R"/mlp/" , R"/2/mlp/" , lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Dict = re.sub(R"/pre_mlp_layer_norm/" , R"/2/layer_norm/" , lowerCAmelCase )
# 2. Convert other classic mappings
for old_key, temp_key in MOE_LAYER_NAME_MAPPING.items():
if old_key in new_key:
SCREAMING_SNAKE_CASE_ : List[Any] = new_key.replace(lowerCAmelCase , lowerCAmelCase )
print(f'{key} -> {new_key}' )
SCREAMING_SNAKE_CASE_ : List[Any] = s_dict.pop(lowerCAmelCase )
if "encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
SCREAMING_SNAKE_CASE_ : str = s_dict[
"encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight"
].T
if "decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
SCREAMING_SNAKE_CASE_ : Optional[int] = s_dict[
"decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight"
].T
# 3. Take extra care of the EXPERTS layer
for key in list(s_dict.keys() ):
if "expert" in key:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = s_dict[key].shape[0]
SCREAMING_SNAKE_CASE_ : List[Any] = s_dict[key]
for idx in range(lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : Tuple = expert_weihts[idx]
print(f'{key} -> {key.replace("expert/" , "nested fstring" )}' )
s_dict.pop(lowerCAmelCase )
return s_dict
__lowerCamelCase : List[Any] = {
'''NUM_ENCODER_LAYERS''': '''num_layers''',
'''NUM_DECODER_LAYERS''': '''num_decoder_layers''',
'''NUM_HEADS''': '''num_heads''',
'''HEAD_DIM''': '''d_kv''',
'''EMBED_DIM''': '''d_model''',
'''MLP_DIM''': '''d_ff''',
'''NUM_SELECTED_EXPERTS''': '''num_selected_experts''',
'''NUM_ENCODER_SPARSE_LAYERS''': '''num_sparse_encoder_layers''',
'''NUM_DECODER_SPARSE_LAYERS''': '''num_sparse_decoder_layers''',
'''dense.MlpBlock.activations''': '''feed_forward_proj''',
}
def _snake_case ( lowerCAmelCase : Dict , lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
import regex as re
with open(lowerCAmelCase , "r" ) as f:
SCREAMING_SNAKE_CASE_ : Optional[Any] = f.read()
SCREAMING_SNAKE_CASE_ : List[str] = re.findall(R"(.*) = ([0-9.]*)" , lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Any = {}
for param, value in regex_match:
if param in GIN_TO_CONFIG_MAPPING and value != "":
SCREAMING_SNAKE_CASE_ : int = float(lowerCAmelCase ) if "." in value else int(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = re.findall(R"(.*activations) = \(\'(.*)\',\)" , lowerCAmelCase )[0]
SCREAMING_SNAKE_CASE_ : List[str] = str(activation[1] )
SCREAMING_SNAKE_CASE_ : str = num_experts
SCREAMING_SNAKE_CASE_ : Tuple = SwitchTransformersConfig(**lowerCAmelCase )
return config
def _snake_case ( lowerCAmelCase : str , lowerCAmelCase : str , lowerCAmelCase : str=None , lowerCAmelCase : Optional[Any]="./" , lowerCAmelCase : Dict=8 ):
"""simple docstring"""
print(f'Loading flax weights from : {flax_checkpoint_path}' )
SCREAMING_SNAKE_CASE_ : int = checkpoints.load_tax_checkpoint(lowerCAmelCase )
if gin_file is not None:
SCREAMING_SNAKE_CASE_ : int = convert_gin_to_config(lowerCAmelCase , lowerCAmelCase )
else:
SCREAMING_SNAKE_CASE_ : Dict = SwitchTransformersConfig.from_pretrained(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : str = SwitchTransformersForConditionalGeneration(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[int] = flax_params["target"]
SCREAMING_SNAKE_CASE_ : List[str] = flatten_dict(lowerCAmelCase , sep="/" )
SCREAMING_SNAKE_CASE_ : List[str] = rename_keys(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[int] = unflatten_dict(lowerCAmelCase , sep="/" )
# Load the flax params in the PT model
load_flax_weights_in_pytorch_model(lowerCAmelCase , lowerCAmelCase )
print(f'Save PyTorch model to {pytorch_dump_path}' )
pt_model.save_pretrained(lowerCAmelCase )
if __name__ == "__main__":
__lowerCamelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--switch_t5x_checkpoint_path''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained SwitchTransformers model. \nThis specifies the'''
''' model architecture. If not provided, a `gin_file` has to be provided.'''
),
)
parser.add_argument(
'''--gin_file''',
default=None,
type=str,
required=False,
help='''Path to the gin config file. If not provided, a `config_file` has to be passed ''',
)
parser.add_argument(
'''--config_name''', default=None, type=str, required=False, help='''Config name of SwitchTransformers model.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output pytorch model.'''
)
parser.add_argument('''--num_experts''', default=8, type=int, required=False, help='''Number of experts''')
__lowerCamelCase : Any = parser.parse_args()
convert_flax_checkpoint_to_pytorch(
args.switch_tax_checkpoint_path,
args.config_name,
args.gin_file,
args.pytorch_dump_folder_path,
args.num_experts,
)
| 18 | 1 |
import os
from argparse import ArgumentParser, Namespace
from ..data import SingleSentenceClassificationProcessor as Processor
from ..pipelines import TextClassificationPipeline
from ..utils import is_tf_available, is_torch_available, logging
from . import BaseTransformersCLICommand
if not is_tf_available() and not is_torch_available():
raise RuntimeError('''At least one of PyTorch or TensorFlow 2.0+ should be installed to use CLI training''')
# TF training parameters
__lowerCamelCase : int = False
__lowerCamelCase : Any = False
def _snake_case ( lowerCAmelCase : Namespace ):
"""simple docstring"""
return TrainCommand(lowerCAmelCase )
class a__ ( A__ ):
@staticmethod
def __UpperCamelCase ( _A : ArgumentParser ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = parser.add_parser("train",help="CLI tool to train a model on a task." )
train_parser.add_argument(
"--train_data",type=_A,required=_A,help="path to train (and optionally evaluation) dataset as a csv with tab separated labels and sentences.",)
train_parser.add_argument(
"--column_label",type=_A,default=0,help="Column of the dataset csv file with example labels." )
train_parser.add_argument(
"--column_text",type=_A,default=1,help="Column of the dataset csv file with example texts." )
train_parser.add_argument(
"--column_id",type=_A,default=2,help="Column of the dataset csv file with example ids." )
train_parser.add_argument(
"--skip_first_row",action="store_true",help="Skip the first row of the csv file (headers)." )
train_parser.add_argument("--validation_data",type=_A,default="",help="path to validation dataset." )
train_parser.add_argument(
"--validation_split",type=_A,default=0.1,help="if validation dataset is not provided, fraction of train dataset to use as validation dataset.",)
train_parser.add_argument("--output",type=_A,default="./",help="path to saved the trained model." )
train_parser.add_argument(
"--task",type=_A,default="text_classification",help="Task to train the model on." )
train_parser.add_argument(
"--model",type=_A,default="bert-base-uncased",help="Model's name or path to stored model." )
train_parser.add_argument("--train_batch_size",type=_A,default=32,help="Batch size for training." )
train_parser.add_argument("--valid_batch_size",type=_A,default=64,help="Batch size for validation." )
train_parser.add_argument("--learning_rate",type=_A,default=3E-5,help="Learning rate." )
train_parser.add_argument("--adam_epsilon",type=_A,default=1E-08,help="Epsilon for Adam optimizer." )
train_parser.set_defaults(func=_A )
def __init__( self : Optional[int],_A : Namespace ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = logging.get_logger("transformers-cli/training" )
SCREAMING_SNAKE_CASE_ : Any = "tf" if is_tf_available() else "torch"
os.makedirs(args.output,exist_ok=_A )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = args.output
SCREAMING_SNAKE_CASE_ : Optional[int] = args.column_label
SCREAMING_SNAKE_CASE_ : Optional[int] = args.column_text
SCREAMING_SNAKE_CASE_ : int = args.column_id
self.logger.info(F'Loading {args.task} pipeline for {args.model}' )
if args.task == "text_classification":
SCREAMING_SNAKE_CASE_ : str = TextClassificationPipeline.from_pretrained(args.model )
elif args.task == "token_classification":
raise NotImplementedError
elif args.task == "question_answering":
raise NotImplementedError
self.logger.info(F'Loading dataset from {args.train_data}' )
SCREAMING_SNAKE_CASE_ : Dict = Processor.create_from_csv(
args.train_data,column_label=args.column_label,column_text=args.column_text,column_id=args.column_id,skip_first_row=args.skip_first_row,)
SCREAMING_SNAKE_CASE_ : Tuple = None
if args.validation_data:
self.logger.info(F'Loading validation dataset from {args.validation_data}' )
SCREAMING_SNAKE_CASE_ : Optional[Any] = Processor.create_from_csv(
args.validation_data,column_label=args.column_label,column_text=args.column_text,column_id=args.column_id,skip_first_row=args.skip_first_row,)
SCREAMING_SNAKE_CASE_ : str = args.validation_split
SCREAMING_SNAKE_CASE_ : int = args.train_batch_size
SCREAMING_SNAKE_CASE_ : Union[str, Any] = args.valid_batch_size
SCREAMING_SNAKE_CASE_ : Any = args.learning_rate
SCREAMING_SNAKE_CASE_ : Union[str, Any] = args.adam_epsilon
def __UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
if self.framework == "tf":
return self.run_tf()
return self.run_torch()
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
raise NotImplementedError
def __UpperCamelCase ( self : Dict ):
"""simple docstring"""
self.pipeline.fit(
self.train_dataset,validation_data=self.valid_dataset,validation_split=self.validation_split,learning_rate=self.learning_rate,adam_epsilon=self.adam_epsilon,train_batch_size=self.train_batch_size,valid_batch_size=self.valid_batch_size,)
# Save trained pipeline
self.pipeline.save_pretrained(self.output )
| 18 | from math import factorial, radians
def _snake_case ( lowerCAmelCase : float , lowerCAmelCase : int = 1_8 , lowerCAmelCase : int = 1_0 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = angle_in_degrees - ((angle_in_degrees // 360.0) * 360.0)
# Converting from degrees to radians
SCREAMING_SNAKE_CASE_ : Tuple = radians(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[Any] = angle_in_radians
SCREAMING_SNAKE_CASE_ : List[str] = 3
SCREAMING_SNAKE_CASE_ : str = -1
for _ in range(lowerCAmelCase ):
result += (b * (angle_in_radians**a)) / factorial(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Dict = -b # One positive term and the next will be negative and so on...
a += 2 # Increased by 2 for every term.
return round(lowerCAmelCase , lowerCAmelCase )
if __name__ == "__main__":
__import__('''doctest''').testmod()
| 18 | 1 |
import random
from typing import Any
def _snake_case ( lowerCAmelCase : list ):
"""simple docstring"""
for _ in range(len(lowerCAmelCase ) ):
SCREAMING_SNAKE_CASE_ : Any = random.randint(0 , len(lowerCAmelCase ) - 1 )
SCREAMING_SNAKE_CASE_ : List[Any] = random.randint(0 , len(lowerCAmelCase ) - 1 )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[int] = data[b], data[a]
return data
if __name__ == "__main__":
__lowerCamelCase : Optional[int] = [0, 1, 2, 3, 4, 5, 6, 7]
__lowerCamelCase : Union[str, Any] = ['''python''', '''says''', '''hello''', '''!''']
print('''Fisher-Yates Shuffle:''')
print('''List''', integers, strings)
print('''FY Shuffle''', fisher_yates_shuffle(integers), fisher_yates_shuffle(strings))
| 18 | from functools import lru_cache
@lru_cache
def _snake_case ( lowerCAmelCase : int ):
"""simple docstring"""
if num < 0:
raise ValueError("Number should not be negative." )
return 1 if num in (0, 1) else num * factorial(num - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 18 | 1 |
import argparse
import logging
import pickle
import random
import time
import numpy as np
from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO
)
__lowerCamelCase : int = logging.getLogger(__name__)
def _snake_case ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = argparse.ArgumentParser(
description="Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids)." )
parser.add_argument("--file_path" , type=lowerCAmelCase , default="data/dump.txt" , help="The path to the data." )
parser.add_argument("--tokenizer_type" , type=lowerCAmelCase , default="bert" , choices=["bert", "roberta", "gpt2"] )
parser.add_argument("--tokenizer_name" , type=lowerCAmelCase , default="bert-base-uncased" , help="The tokenizer to use." )
parser.add_argument("--dump_file" , type=lowerCAmelCase , default="data/dump" , help="The dump file prefix." )
SCREAMING_SNAKE_CASE_ : str = parser.parse_args()
logger.info(f'Loading Tokenizer ({args.tokenizer_name})' )
if args.tokenizer_type == "bert":
SCREAMING_SNAKE_CASE_ : Union[str, Any] = BertTokenizer.from_pretrained(args.tokenizer_name )
SCREAMING_SNAKE_CASE_ : Optional[Any] = tokenizer.special_tokens_map["cls_token"] # `[CLS]`
SCREAMING_SNAKE_CASE_ : str = tokenizer.special_tokens_map["sep_token"] # `[SEP]`
elif args.tokenizer_type == "roberta":
SCREAMING_SNAKE_CASE_ : Optional[Any] = RobertaTokenizer.from_pretrained(args.tokenizer_name )
SCREAMING_SNAKE_CASE_ : Optional[int] = tokenizer.special_tokens_map["cls_token"] # `<s>`
SCREAMING_SNAKE_CASE_ : Optional[int] = tokenizer.special_tokens_map["sep_token"] # `</s>`
elif args.tokenizer_type == "gpt2":
SCREAMING_SNAKE_CASE_ : Dict = GPTaTokenizer.from_pretrained(args.tokenizer_name )
SCREAMING_SNAKE_CASE_ : Any = tokenizer.special_tokens_map["bos_token"] # `<|endoftext|>`
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tokenizer.special_tokens_map["eos_token"] # `<|endoftext|>`
logger.info(f'Loading text from {args.file_path}' )
with open(args.file_path , "r" , encoding="utf8" ) as fp:
SCREAMING_SNAKE_CASE_ : int = fp.readlines()
logger.info("Start encoding" )
logger.info(f'{len(lowerCAmelCase )} examples to process.' )
SCREAMING_SNAKE_CASE_ : Any = []
SCREAMING_SNAKE_CASE_ : List[str] = 0
SCREAMING_SNAKE_CASE_ : Optional[int] = 1_0_0_0_0
SCREAMING_SNAKE_CASE_ : Optional[Any] = time.time()
for text in data:
SCREAMING_SNAKE_CASE_ : Tuple = f'{bos} {text.strip()} {sep}'
SCREAMING_SNAKE_CASE_ : List[str] = tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase )
rslt.append(lowerCAmelCase )
iter += 1
if iter % interval == 0:
SCREAMING_SNAKE_CASE_ : List[Any] = time.time()
logger.info(f'{iter} examples processed. - {(end-start):.2f}s/{interval}expl' )
SCREAMING_SNAKE_CASE_ : Dict = time.time()
logger.info("Finished binarization" )
logger.info(f'{len(lowerCAmelCase )} examples processed.' )
SCREAMING_SNAKE_CASE_ : Optional[int] = f'{args.dump_file}.{args.tokenizer_name}.pickle'
SCREAMING_SNAKE_CASE_ : Dict = tokenizer.vocab_size
if vocab_size < (1 << 1_6):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [np.uintaa(lowerCAmelCase ) for d in rslt]
else:
SCREAMING_SNAKE_CASE_ : Dict = [np.intaa(lowerCAmelCase ) for d in rslt]
random.shuffle(rslt_ )
logger.info(f'Dump to {dp_file}' )
with open(lowerCAmelCase , "wb" ) as handle:
pickle.dump(rslt_ , lowerCAmelCase , protocol=pickle.HIGHEST_PROTOCOL )
if __name__ == "__main__":
main()
| 18 | from collections import defaultdict
def _snake_case ( lowerCAmelCase : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = 1
SCREAMING_SNAKE_CASE_ : Tuple = True
for v in tree[start]:
if v not in visited:
ret += dfs(lowerCAmelCase )
if ret % 2 == 0:
cuts.append(lowerCAmelCase )
return ret
def _snake_case ( ):
"""simple docstring"""
dfs(1 )
if __name__ == "__main__":
__lowerCamelCase , __lowerCamelCase : Union[str, Any] = 10, 9
__lowerCamelCase : Optional[int] = defaultdict(list)
__lowerCamelCase : dict[int, bool] = {}
__lowerCamelCase : list[int] = []
__lowerCamelCase : Optional[Any] = 0
__lowerCamelCase : Any = [(2, 1), (3, 1), (4, 3), (5, 2), (6, 1), (7, 2), (8, 6), (9, 8), (10, 8)]
for u, v in edges:
tree[u].append(v)
tree[v].append(u)
even_tree()
print(len(cuts) - 1)
| 18 | 1 |
import requests
__lowerCamelCase : Dict = '''YOUR API KEY'''
def _snake_case ( lowerCAmelCase : str , lowerCAmelCase : str = giphy_api_key ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = "+".join(query.split() )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = f'https://api.giphy.com/v1/gifs/search?q={formatted_query}&api_key={api_key}'
SCREAMING_SNAKE_CASE_ : List[str] = requests.get(lowerCAmelCase ).json()["data"]
return [gif["url"] for gif in gifs]
if __name__ == "__main__":
print('''\n'''.join(get_gifs('''space ship''')))
| 18 | # Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def _snake_case ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = ArgumentParser("Accelerate CLI tool" , usage="accelerate <command> [<args>]" , allow_abbrev=lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[int] = parser.add_subparsers(help="accelerate command helpers" )
# Register commands
get_config_parser(subparsers=lowerCAmelCase )
env_command_parser(subparsers=lowerCAmelCase )
launch_command_parser(subparsers=lowerCAmelCase )
tpu_command_parser(subparsers=lowerCAmelCase )
test_command_parser(subparsers=lowerCAmelCase )
# Let's go
SCREAMING_SNAKE_CASE_ : Dict = parser.parse_args()
if not hasattr(lowerCAmelCase , "func" ):
parser.print_help()
exit(1 )
# Run
args.func(lowerCAmelCase )
if __name__ == "__main__":
main()
| 18 | 1 |
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import BatchEncoding, MarianTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import is_sentencepiece_available, is_tf_available, is_torch_available
if is_sentencepiece_available():
from transformers.models.marian.tokenization_marian import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
__lowerCamelCase : Tuple = get_tests_dir('''fixtures/test_sentencepiece.model''')
__lowerCamelCase : Optional[Any] = {'''target_lang''': '''fi''', '''source_lang''': '''en'''}
__lowerCamelCase : Any = '''>>zh<<'''
__lowerCamelCase : str = '''Helsinki-NLP/'''
if is_torch_available():
__lowerCamelCase : List[Any] = '''pt'''
elif is_tf_available():
__lowerCamelCase : List[str] = '''tf'''
else:
__lowerCamelCase : Dict = '''jax'''
@require_sentencepiece
class a__ ( A__ , unittest.TestCase ):
A = MarianTokenizer
A = False
A = True
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
super().setUp()
SCREAMING_SNAKE_CASE_ : Dict = ["</s>", "<unk>", "▁This", "▁is", "▁a", "▁t", "est", "\u0120", "<pad>"]
SCREAMING_SNAKE_CASE_ : List[str] = dict(zip(_A,range(len(_A ) ) ) )
SCREAMING_SNAKE_CASE_ : str = Path(self.tmpdirname )
save_json(_A,save_dir / VOCAB_FILES_NAMES["vocab"] )
save_json(_A,save_dir / VOCAB_FILES_NAMES["tokenizer_config_file"] )
if not (save_dir / VOCAB_FILES_NAMES["source_spm"]).exists():
copyfile(_A,save_dir / VOCAB_FILES_NAMES["source_spm"] )
copyfile(_A,save_dir / VOCAB_FILES_NAMES["target_spm"] )
SCREAMING_SNAKE_CASE_ : int = MarianTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def __UpperCamelCase ( self : Union[str, Any],**_A : Optional[Any] ):
"""simple docstring"""
return MarianTokenizer.from_pretrained(self.tmpdirname,**_A )
def __UpperCamelCase ( self : Union[str, Any],_A : List[str] ):
"""simple docstring"""
return (
"This is a test",
"This is a test",
)
def __UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = "</s>"
SCREAMING_SNAKE_CASE_ : str = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_A ),_A )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_A ),_A )
def __UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0],"</s>" )
self.assertEqual(vocab_keys[1],"<unk>" )
self.assertEqual(vocab_keys[-1],"<pad>" )
self.assertEqual(len(_A ),9 )
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size,9 )
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = MarianTokenizer.from_pretrained(F'{ORG_NAME}opus-mt-en-de' )
SCREAMING_SNAKE_CASE_ : Dict = en_de_tokenizer(["I am a small frog"],return_tensors=_A )
self.assertIsInstance(_A,_A )
SCREAMING_SNAKE_CASE_ : str = [38, 121, 14, 697, 3_8848, 0]
self.assertListEqual(_A,batch.input_ids[0] )
SCREAMING_SNAKE_CASE_ : str = tempfile.mkdtemp()
en_de_tokenizer.save_pretrained(_A )
SCREAMING_SNAKE_CASE_ : Optional[int] = [x.name for x in Path(_A ).glob("*" )]
self.assertIn("source.spm",_A )
MarianTokenizer.from_pretrained(_A )
def __UpperCamelCase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ : str = tok(
["I am a small frog" * 1000, "I am a small frog"],padding=_A,truncation=_A,return_tensors=_A )
self.assertIsInstance(_A,_A )
self.assertEqual(batch.input_ids.shape,(2, 512) )
def __UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ : str = tok(["I am a tiny frog", "I am a small frog"],padding=_A,return_tensors=_A )
self.assertIsInstance(_A,_A )
self.assertEqual(batch_smaller.input_ids.shape,(2, 10) )
@slow
def __UpperCamelCase ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = {"input_ids": [[4_3495, 462, 20, 4_2164, 1369, 52, 464, 132, 1703, 492, 13, 7491, 3_8999, 6, 8, 464, 132, 1703, 492, 13, 4669, 3_7867, 13, 7525, 27, 1593, 988, 13, 3_3972, 7029, 6, 20, 8251, 383, 2, 270, 5866, 3788, 2, 2353, 8251, 1_2338, 2, 1_3958, 387, 2, 3629, 6953, 188, 2900, 2, 1_3958, 8011, 1_1501, 23, 8460, 4073, 3_4009, 20, 435, 1_1439, 27, 8, 8460, 4073, 6004, 20, 9988, 375, 27, 33, 266, 1945, 1076, 1350, 3_7867, 3288, 5, 577, 1076, 4374, 8, 5082, 5, 2_6453, 257, 556, 403, 2, 242, 132, 383, 316, 492, 8, 1_0767, 6, 316, 304, 4239, 3, 0], [148, 1_5722, 19, 1839, 12, 1350, 13, 2_2327, 5082, 5418, 4_7567, 3_5938, 59, 318, 1_9552, 108, 2183, 54, 1_4976, 4835, 32, 547, 1114, 8, 315, 2417, 5, 92, 1_9088, 3, 0, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100], [36, 6395, 1_2570, 3_9147, 1_1597, 6, 266, 4, 4_5405, 7296, 3, 0, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_A,model_name="Helsinki-NLP/opus-mt-en-de",revision="1a8c2263da11e68e50938f97e10cd57820bd504c",decode_kwargs={"use_source_tokenizer": True},)
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = MarianTokenizer.from_pretrained("hf-internal-testing/test-marian-two-vocabs" )
SCREAMING_SNAKE_CASE_ : Tuple = "Tämä on testi"
SCREAMING_SNAKE_CASE_ : List[str] = "This is a test"
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [76, 7, 2047, 2]
SCREAMING_SNAKE_CASE_ : str = [69, 12, 11, 940, 2]
SCREAMING_SNAKE_CASE_ : List[Any] = tokenizer(_A ).input_ids
self.assertListEqual(_A,_A )
SCREAMING_SNAKE_CASE_ : Dict = tokenizer(text_target=_A ).input_ids
self.assertListEqual(_A,_A )
SCREAMING_SNAKE_CASE_ : Any = tokenizer.decode(_A,skip_special_tokens=_A )
self.assertEqual(_A,_A )
| 18 | import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from tokenizers.pre_tokenizers import BertPreTokenizer, PreTokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roformer import RoFormerTokenizer
from .tokenization_utils import JiebaPreTokenizer
__lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
__lowerCamelCase : Dict = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
__lowerCamelCase : int = {
'''vocab_file''': {
'''junnyu/roformer_chinese_small''': '''https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/vocab.txt''',
'''junnyu/roformer_chinese_base''': '''https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/vocab.txt''',
'''junnyu/roformer_chinese_char_small''': (
'''https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/vocab.txt'''
),
'''junnyu/roformer_chinese_char_base''': (
'''https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/vocab.txt'''
),
'''junnyu/roformer_small_discriminator''': (
'''https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/vocab.txt'''
),
'''junnyu/roformer_small_generator''': (
'''https://huggingface.co/junnyu/roformer_small_generator/resolve/main/vocab.txt'''
),
}
}
__lowerCamelCase : Any = {
'''junnyu/roformer_chinese_small''': 15_36,
'''junnyu/roformer_chinese_base''': 15_36,
'''junnyu/roformer_chinese_char_small''': 5_12,
'''junnyu/roformer_chinese_char_base''': 5_12,
'''junnyu/roformer_small_discriminator''': 1_28,
'''junnyu/roformer_small_generator''': 1_28,
}
__lowerCamelCase : Union[str, Any] = {
'''junnyu/roformer_chinese_small''': {'''do_lower_case''': True},
'''junnyu/roformer_chinese_base''': {'''do_lower_case''': True},
'''junnyu/roformer_chinese_char_small''': {'''do_lower_case''': True},
'''junnyu/roformer_chinese_char_base''': {'''do_lower_case''': True},
'''junnyu/roformer_small_discriminator''': {'''do_lower_case''': True},
'''junnyu/roformer_small_generator''': {'''do_lower_case''': True},
}
class a__ ( A__ ):
A = VOCAB_FILES_NAMES
A = PRETRAINED_VOCAB_FILES_MAP
A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A = PRETRAINED_INIT_CONFIGURATION
A = RoFormerTokenizer
def __init__( self : List[str],_A : int=None,_A : int=None,_A : int=True,_A : List[Any]="[UNK]",_A : Tuple="[SEP]",_A : List[Any]="[PAD]",_A : Optional[int]="[CLS]",_A : Optional[Any]="[MASK]",_A : Optional[int]=True,_A : List[str]=None,**_A : List[Any],):
"""simple docstring"""
super().__init__(
_A,tokenizer_file=_A,do_lower_case=_A,unk_token=_A,sep_token=_A,pad_token=_A,cls_token=_A,mask_token=_A,tokenize_chinese_chars=_A,strip_accents=_A,**_A,)
SCREAMING_SNAKE_CASE_ : Tuple = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
pre_tok_state.get("lowercase",_A ) != do_lower_case
or pre_tok_state.get("strip_accents",_A ) != strip_accents
):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = getattr(_A,pre_tok_state.pop("type" ) )
SCREAMING_SNAKE_CASE_ : Any = do_lower_case
SCREAMING_SNAKE_CASE_ : List[str] = strip_accents
SCREAMING_SNAKE_CASE_ : str = pre_tok_class(**_A )
SCREAMING_SNAKE_CASE_ : List[str] = do_lower_case
def __getstate__( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = self.__dict__.copy()
SCREAMING_SNAKE_CASE_ : Optional[Any] = BertPreTokenizer()
return state
def __setstate__( self : List[Any],_A : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = d
SCREAMING_SNAKE_CASE_ : List[str] = self.__dict__["_tokenizer"].get_vocab()
SCREAMING_SNAKE_CASE_ : Any = PreTokenizer.custom(JiebaPreTokenizer(_A ) )
def __UpperCamelCase ( self : Union[str, Any],_A : List[Any],_A : str=None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __UpperCamelCase ( self : str,_A : List[int],_A : Optional[List[int]] = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = [self.sep_token_id]
SCREAMING_SNAKE_CASE_ : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __UpperCamelCase ( self : int,_A : str,_A : Optional[str] = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self._tokenizer.model.save(_A,name=_A )
return tuple(_A )
def __UpperCamelCase ( self : int,_A : Optional[int],_A : List[Any]=None,_A : Tuple=None,_A : str=False,**_A : List[Any],):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = BertPreTokenizer()
return super().save_pretrained(_A,_A,_A,_A,**_A )
| 18 | 1 |
from __future__ import annotations
def _snake_case ( lowerCAmelCase : int | str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = str(lowerCAmelCase )
return n == n[::-1]
def _snake_case ( lowerCAmelCase : int = 1_0_0_0_0_0_0 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = 0
for i in range(1 , lowerCAmelCase ):
if is_palindrome(lowerCAmelCase ) and is_palindrome(bin(lowerCAmelCase ).split("b" )[1] ):
total += i
return total
if __name__ == "__main__":
print(solution(int(str(input().strip()))))
| 18 | import unittest
from transformers import SqueezeBertConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
)
class a__ ( A__ ):
def __init__( self : Tuple,_A : Optional[int],_A : Any=13,_A : List[str]=7,_A : int=True,_A : Dict=True,_A : Dict=False,_A : List[Any]=True,_A : Any=99,_A : Optional[int]=32,_A : Any=5,_A : List[Any]=4,_A : Dict=64,_A : Optional[Any]="gelu",_A : Tuple=0.1,_A : Any=0.1,_A : List[Any]=512,_A : Dict=16,_A : Optional[Any]=2,_A : Union[str, Any]=0.02,_A : List[str]=3,_A : Optional[Any]=4,_A : Union[str, Any]=None,_A : Tuple=2,_A : List[str]=2,_A : str=2,_A : Dict=2,_A : Optional[Any]=4,_A : Union[str, Any]=1,):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = parent
SCREAMING_SNAKE_CASE_ : Optional[int] = batch_size
SCREAMING_SNAKE_CASE_ : Dict = seq_length
SCREAMING_SNAKE_CASE_ : Dict = is_training
SCREAMING_SNAKE_CASE_ : Optional[int] = use_input_mask
SCREAMING_SNAKE_CASE_ : int = use_token_type_ids
SCREAMING_SNAKE_CASE_ : Optional[int] = use_labels
SCREAMING_SNAKE_CASE_ : Tuple = vocab_size
SCREAMING_SNAKE_CASE_ : Any = hidden_size
SCREAMING_SNAKE_CASE_ : Union[str, Any] = num_hidden_layers
SCREAMING_SNAKE_CASE_ : Tuple = num_attention_heads
SCREAMING_SNAKE_CASE_ : List[Any] = intermediate_size
SCREAMING_SNAKE_CASE_ : List[str] = hidden_act
SCREAMING_SNAKE_CASE_ : List[str] = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ : Optional[int] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : str = max_position_embeddings
SCREAMING_SNAKE_CASE_ : str = type_vocab_size
SCREAMING_SNAKE_CASE_ : List[str] = type_sequence_label_size
SCREAMING_SNAKE_CASE_ : Optional[Any] = initializer_range
SCREAMING_SNAKE_CASE_ : Tuple = num_labels
SCREAMING_SNAKE_CASE_ : List[Any] = num_choices
SCREAMING_SNAKE_CASE_ : Dict = scope
SCREAMING_SNAKE_CASE_ : int = q_groups
SCREAMING_SNAKE_CASE_ : Tuple = k_groups
SCREAMING_SNAKE_CASE_ : List[Any] = v_groups
SCREAMING_SNAKE_CASE_ : Tuple = post_attention_groups
SCREAMING_SNAKE_CASE_ : int = intermediate_groups
SCREAMING_SNAKE_CASE_ : List[Any] = output_groups
def __UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length],self.vocab_size )
SCREAMING_SNAKE_CASE_ : List[Any] = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE_ : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE_ : Optional[Any] = None
SCREAMING_SNAKE_CASE_ : Any = None
SCREAMING_SNAKE_CASE_ : str = None
if self.use_labels:
SCREAMING_SNAKE_CASE_ : str = ids_tensor([self.batch_size],self.type_sequence_label_size )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length],self.num_labels )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ids_tensor([self.batch_size],self.num_choices )
SCREAMING_SNAKE_CASE_ : Optional[int] = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def __UpperCamelCase ( self : str ):
"""simple docstring"""
return SqueezeBertConfig(
embedding_size=self.hidden_size,vocab_size=self.vocab_size,hidden_size=self.hidden_size,num_hidden_layers=self.num_hidden_layers,num_attention_heads=self.num_attention_heads,intermediate_size=self.intermediate_size,hidden_act=self.hidden_act,attention_probs_dropout_prob=self.hidden_dropout_prob,attention_dropout=self.attention_probs_dropout_prob,max_position_embeddings=self.max_position_embeddings,initializer_range=self.initializer_range,q_groups=self.q_groups,k_groups=self.k_groups,v_groups=self.v_groups,post_attention_groups=self.post_attention_groups,intermediate_groups=self.intermediate_groups,output_groups=self.output_groups,)
def __UpperCamelCase ( self : Tuple,_A : Union[str, Any],_A : Union[str, Any],_A : int,_A : Optional[int],_A : Union[str, Any],_A : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = SqueezeBertModel(config=_A )
model.to(_A )
model.eval()
SCREAMING_SNAKE_CASE_ : Any = model(_A,_A )
SCREAMING_SNAKE_CASE_ : List[str] = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape,(self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCamelCase ( self : Dict,_A : Any,_A : Tuple,_A : str,_A : Any,_A : Union[str, Any],_A : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = SqueezeBertForMaskedLM(config=_A )
model.to(_A )
model.eval()
SCREAMING_SNAKE_CASE_ : List[str] = model(_A,attention_mask=_A,labels=_A )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCamelCase ( self : Optional[int],_A : Union[str, Any],_A : Union[str, Any],_A : Any,_A : Tuple,_A : int,_A : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = SqueezeBertForQuestionAnswering(config=_A )
model.to(_A )
model.eval()
SCREAMING_SNAKE_CASE_ : Dict = model(
_A,attention_mask=_A,start_positions=_A,end_positions=_A )
self.parent.assertEqual(result.start_logits.shape,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape,(self.batch_size, self.seq_length) )
def __UpperCamelCase ( self : List[Any],_A : List[str],_A : Tuple,_A : List[Any],_A : List[str],_A : List[str],_A : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = self.num_labels
SCREAMING_SNAKE_CASE_ : List[str] = SqueezeBertForSequenceClassification(_A )
model.to(_A )
model.eval()
SCREAMING_SNAKE_CASE_ : Dict = model(_A,attention_mask=_A,labels=_A )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.num_labels) )
def __UpperCamelCase ( self : str,_A : Optional[int],_A : str,_A : List[Any],_A : List[str],_A : str,_A : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.num_labels
SCREAMING_SNAKE_CASE_ : Optional[int] = SqueezeBertForTokenClassification(config=_A )
model.to(_A )
model.eval()
SCREAMING_SNAKE_CASE_ : Optional[int] = model(_A,attention_mask=_A,labels=_A )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.seq_length, self.num_labels) )
def __UpperCamelCase ( self : List[Any],_A : Tuple,_A : str,_A : Optional[Any],_A : int,_A : str,_A : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = self.num_choices
SCREAMING_SNAKE_CASE_ : Union[str, Any] = SqueezeBertForMultipleChoice(config=_A )
model.to(_A )
model.eval()
SCREAMING_SNAKE_CASE_ : Dict = input_ids.unsqueeze(1 ).expand(-1,self.num_choices,-1 ).contiguous()
SCREAMING_SNAKE_CASE_ : str = input_mask.unsqueeze(1 ).expand(-1,self.num_choices,-1 ).contiguous()
SCREAMING_SNAKE_CASE_ : Optional[int] = model(
_A,attention_mask=_A,labels=_A,)
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.num_choices) )
def __UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.prepare_config_and_inputs()
((SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_)) : Dict = config_and_inputs
SCREAMING_SNAKE_CASE_ : Dict = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class a__ ( A__ , A__ , unittest.TestCase ):
A = (
(
SqueezeBertModel,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
)
if is_torch_available()
else None
)
A = (
{
'feature-extraction': SqueezeBertModel,
'fill-mask': SqueezeBertForMaskedLM,
'question-answering': SqueezeBertForQuestionAnswering,
'text-classification': SqueezeBertForSequenceClassification,
'token-classification': SqueezeBertForTokenClassification,
'zero-shot': SqueezeBertForSequenceClassification,
}
if is_torch_available()
else {}
)
A = False
A = True
A = False
def __UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = SqueezeBertModelTester(self )
SCREAMING_SNAKE_CASE_ : List[str] = ConfigTester(self,config_class=_A,dim=37 )
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_model(*_A )
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_masked_lm(*_A )
def __UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_question_answering(*_A )
def __UpperCamelCase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_sequence_classification(*_A )
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_token_classification(*_A )
def __UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_multiple_choice(*_A )
@slow
def __UpperCamelCase ( self : Any ):
"""simple docstring"""
for model_name in SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE_ : Tuple = SqueezeBertModel.from_pretrained(_A )
self.assertIsNotNone(_A )
@require_sentencepiece
@require_tokenizers
@require_torch
class a__ ( unittest.TestCase ):
@slow
def __UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = SqueezeBertForSequenceClassification.from_pretrained("squeezebert/squeezebert-mnli" )
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.tensor([[1, 2_9414, 232, 328, 740, 1140, 1_2695, 69, 13, 1588, 2]] )
SCREAMING_SNAKE_CASE_ : List[Any] = model(_A )[0]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.Size((1, 3) )
self.assertEqual(output.shape,_A )
SCREAMING_SNAKE_CASE_ : int = torch.tensor([[0.6401, -0.0349, -0.6041]] )
self.assertTrue(torch.allclose(_A,_A,atol=1E-4 ) )
| 18 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase : Dict = logging.get_logger(__name__)
__lowerCamelCase : Union[str, Any] = {
'''edbeeching/decision-transformer-gym-hopper-medium''': (
'''https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json'''
),
# See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer
}
class a__ ( A__ ):
A = 'decision_transformer'
A = ['past_key_values']
A = {
'max_position_embeddings': 'n_positions',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self : int,_A : Optional[int]=17,_A : List[str]=4,_A : Any=128,_A : Optional[Any]=4096,_A : Tuple=True,_A : Any=1,_A : str=1024,_A : int=3,_A : Optional[Any]=1,_A : int=None,_A : List[str]="relu",_A : List[str]=0.1,_A : Any=0.1,_A : Optional[Any]=0.1,_A : Union[str, Any]=1E-5,_A : Tuple=0.02,_A : int=True,_A : List[str]=True,_A : List[Any]=5_0256,_A : Any=5_0256,_A : Optional[Any]=False,_A : str=False,**_A : str,):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = state_dim
SCREAMING_SNAKE_CASE_ : Union[str, Any] = act_dim
SCREAMING_SNAKE_CASE_ : Optional[Any] = hidden_size
SCREAMING_SNAKE_CASE_ : Optional[int] = max_ep_len
SCREAMING_SNAKE_CASE_ : Optional[int] = action_tanh
SCREAMING_SNAKE_CASE_ : Any = vocab_size
SCREAMING_SNAKE_CASE_ : List[Any] = n_positions
SCREAMING_SNAKE_CASE_ : Tuple = n_layer
SCREAMING_SNAKE_CASE_ : Union[str, Any] = n_head
SCREAMING_SNAKE_CASE_ : str = n_inner
SCREAMING_SNAKE_CASE_ : List[Any] = activation_function
SCREAMING_SNAKE_CASE_ : int = resid_pdrop
SCREAMING_SNAKE_CASE_ : Dict = embd_pdrop
SCREAMING_SNAKE_CASE_ : Union[str, Any] = attn_pdrop
SCREAMING_SNAKE_CASE_ : Union[str, Any] = layer_norm_epsilon
SCREAMING_SNAKE_CASE_ : List[Any] = initializer_range
SCREAMING_SNAKE_CASE_ : Optional[Any] = scale_attn_weights
SCREAMING_SNAKE_CASE_ : Optional[int] = use_cache
SCREAMING_SNAKE_CASE_ : Optional[int] = scale_attn_by_inverse_layer_idx
SCREAMING_SNAKE_CASE_ : int = reorder_and_upcast_attn
SCREAMING_SNAKE_CASE_ : Any = bos_token_id
SCREAMING_SNAKE_CASE_ : Tuple = eos_token_id
super().__init__(bos_token_id=_A,eos_token_id=_A,**_A )
| 18 | import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.text import TextDatasetReader
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def _snake_case ( lowerCAmelCase : str , lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
assert isinstance(lowerCAmelCase , lowerCAmelCase )
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def _snake_case ( lowerCAmelCase : Tuple , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tmp_path / "cache"
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {"text": "string"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
SCREAMING_SNAKE_CASE_ : List[str] = TextDatasetReader(lowerCAmelCase , cache_dir=lowerCAmelCase , keep_in_memory=lowerCAmelCase ).read()
_check_text_dataset(lowerCAmelCase , lowerCAmelCase )
@pytest.mark.parametrize(
"features" , [
None,
{"text": "string"},
{"text": "int32"},
{"text": "float32"},
] , )
def _snake_case ( lowerCAmelCase : Tuple , lowerCAmelCase : Dict , lowerCAmelCase : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = tmp_path / "cache"
SCREAMING_SNAKE_CASE_ : Optional[Any] = {"text": "string"}
SCREAMING_SNAKE_CASE_ : Any = features.copy() if features else default_expected_features
SCREAMING_SNAKE_CASE_ : List[str] = (
Features({feature: Value(lowerCAmelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
SCREAMING_SNAKE_CASE_ : Optional[Any] = TextDatasetReader(lowerCAmelCase , features=lowerCAmelCase , cache_dir=lowerCAmelCase ).read()
_check_text_dataset(lowerCAmelCase , lowerCAmelCase )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def _snake_case ( lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tmp_path / "cache"
SCREAMING_SNAKE_CASE_ : List[str] = {"text": "string"}
SCREAMING_SNAKE_CASE_ : List[str] = TextDatasetReader(lowerCAmelCase , cache_dir=lowerCAmelCase , split=lowerCAmelCase ).read()
_check_text_dataset(lowerCAmelCase , lowerCAmelCase )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("path_type" , [str, list] )
def _snake_case ( lowerCAmelCase : int , lowerCAmelCase : int , lowerCAmelCase : Optional[int] ):
"""simple docstring"""
if issubclass(lowerCAmelCase , lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = text_path
elif issubclass(lowerCAmelCase , lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [text_path]
SCREAMING_SNAKE_CASE_ : int = tmp_path / "cache"
SCREAMING_SNAKE_CASE_ : Optional[int] = {"text": "string"}
SCREAMING_SNAKE_CASE_ : List[str] = TextDatasetReader(lowerCAmelCase , cache_dir=lowerCAmelCase ).read()
_check_text_dataset(lowerCAmelCase , lowerCAmelCase )
def _snake_case ( lowerCAmelCase : Dict , lowerCAmelCase : Dict , lowerCAmelCase : List[str]=("train",) ):
"""simple docstring"""
assert isinstance(lowerCAmelCase , lowerCAmelCase )
for split in splits:
SCREAMING_SNAKE_CASE_ : int = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def _snake_case ( lowerCAmelCase : List[str] , lowerCAmelCase : str , lowerCAmelCase : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = tmp_path / "cache"
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {"text": "string"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
SCREAMING_SNAKE_CASE_ : List[Any] = TextDatasetReader({"train": text_path} , cache_dir=lowerCAmelCase , keep_in_memory=lowerCAmelCase ).read()
_check_text_datasetdict(lowerCAmelCase , lowerCAmelCase )
@pytest.mark.parametrize(
"features" , [
None,
{"text": "string"},
{"text": "int32"},
{"text": "float32"},
] , )
def _snake_case ( lowerCAmelCase : Union[str, Any] , lowerCAmelCase : List[Any] , lowerCAmelCase : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = tmp_path / "cache"
# CSV file loses col_1 string dtype information: default now is "int64" instead of "string"
SCREAMING_SNAKE_CASE_ : Tuple = {"text": "string"}
SCREAMING_SNAKE_CASE_ : Any = features.copy() if features else default_expected_features
SCREAMING_SNAKE_CASE_ : Dict = (
Features({feature: Value(lowerCAmelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
SCREAMING_SNAKE_CASE_ : str = TextDatasetReader({"train": text_path} , features=lowerCAmelCase , cache_dir=lowerCAmelCase ).read()
_check_text_datasetdict(lowerCAmelCase , lowerCAmelCase )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def _snake_case ( lowerCAmelCase : Optional[Any] , lowerCAmelCase : Any , lowerCAmelCase : Dict ):
"""simple docstring"""
if split:
SCREAMING_SNAKE_CASE_ : Optional[int] = {split: text_path}
else:
SCREAMING_SNAKE_CASE_ : List[Any] = "train"
SCREAMING_SNAKE_CASE_ : Tuple = {"train": text_path, "test": text_path}
SCREAMING_SNAKE_CASE_ : Any = tmp_path / "cache"
SCREAMING_SNAKE_CASE_ : List[str] = {"text": "string"}
SCREAMING_SNAKE_CASE_ : str = TextDatasetReader(lowerCAmelCase , cache_dir=lowerCAmelCase ).read()
_check_text_datasetdict(lowerCAmelCase , lowerCAmelCase , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
| 18 | 1 |
import tempfile
import unittest
import numpy as np
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_flax
if is_flax_available():
import os
from flax.core.frozen_dict import unfreeze
from flax.traverse_util import flatten_dict
from transformers import FlaxBertModel
__lowerCamelCase : int = '''0.12''' # assumed parallelism: 8
@require_flax
@is_staging_test
class a__ ( unittest.TestCase ):
@classmethod
def __UpperCamelCase ( cls : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = TOKEN
HfFolder.save_token(_A )
@classmethod
def __UpperCamelCase ( cls : Union[str, Any] ):
"""simple docstring"""
try:
delete_repo(token=cls._token,repo_id="test-model-flax" )
except HTTPError:
pass
try:
delete_repo(token=cls._token,repo_id="valid_org/test-model-flax-org" )
except HTTPError:
pass
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = BertConfig(
vocab_size=99,hidden_size=32,num_hidden_layers=5,num_attention_heads=4,intermediate_size=37 )
SCREAMING_SNAKE_CASE_ : List[Any] = FlaxBertModel(_A )
model.push_to_hub("test-model-flax",use_auth_token=self._token )
SCREAMING_SNAKE_CASE_ : List[Any] = FlaxBertModel.from_pretrained(F'{USER}/test-model-flax' )
SCREAMING_SNAKE_CASE_ : str = flatten_dict(unfreeze(model.params ) )
SCREAMING_SNAKE_CASE_ : List[Any] = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
SCREAMING_SNAKE_CASE_ : int = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_A,1E-3,msg=F'{key} not identical' )
# Reset repo
delete_repo(token=self._token,repo_id="test-model-flax" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(_A,repo_id="test-model-flax",push_to_hub=_A,use_auth_token=self._token )
SCREAMING_SNAKE_CASE_ : Any = FlaxBertModel.from_pretrained(F'{USER}/test-model-flax' )
SCREAMING_SNAKE_CASE_ : str = flatten_dict(unfreeze(model.params ) )
SCREAMING_SNAKE_CASE_ : List[str] = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
SCREAMING_SNAKE_CASE_ : str = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_A,1E-3,msg=F'{key} not identical' )
def __UpperCamelCase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = BertConfig(
vocab_size=99,hidden_size=32,num_hidden_layers=5,num_attention_heads=4,intermediate_size=37 )
SCREAMING_SNAKE_CASE_ : List[Any] = FlaxBertModel(_A )
model.push_to_hub("valid_org/test-model-flax-org",use_auth_token=self._token )
SCREAMING_SNAKE_CASE_ : Tuple = FlaxBertModel.from_pretrained("valid_org/test-model-flax-org" )
SCREAMING_SNAKE_CASE_ : str = flatten_dict(unfreeze(model.params ) )
SCREAMING_SNAKE_CASE_ : List[Any] = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
SCREAMING_SNAKE_CASE_ : Optional[Any] = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_A,1E-3,msg=F'{key} not identical' )
# Reset repo
delete_repo(token=self._token,repo_id="valid_org/test-model-flax-org" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(
_A,repo_id="valid_org/test-model-flax-org",push_to_hub=_A,use_auth_token=self._token )
SCREAMING_SNAKE_CASE_ : int = FlaxBertModel.from_pretrained("valid_org/test-model-flax-org" )
SCREAMING_SNAKE_CASE_ : Dict = flatten_dict(unfreeze(model.params ) )
SCREAMING_SNAKE_CASE_ : List[str] = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
SCREAMING_SNAKE_CASE_ : List[Any] = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_A,1E-3,msg=F'{key} not identical' )
def _snake_case ( lowerCAmelCase : List[str] , lowerCAmelCase : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = True
SCREAMING_SNAKE_CASE_ : int = flatten_dict(modela.params )
SCREAMING_SNAKE_CASE_ : str = flatten_dict(modela.params )
for key in flat_params_a.keys():
if np.sum(np.abs(flat_params_a[key] - flat_params_a[key] ) ) > 1E-4:
SCREAMING_SNAKE_CASE_ : Optional[Any] = False
return models_are_equal
@require_flax
class a__ ( unittest.TestCase ):
def __UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = BertConfig.from_pretrained("hf-internal-testing/tiny-bert-flax-only" )
SCREAMING_SNAKE_CASE_ : List[Any] = FlaxBertModel(_A )
SCREAMING_SNAKE_CASE_ : Optional[int] = "bert"
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(_A,_A ) )
with self.assertRaises(_A ):
SCREAMING_SNAKE_CASE_ : str = FlaxBertModel.from_pretrained(_A )
SCREAMING_SNAKE_CASE_ : Tuple = FlaxBertModel.from_pretrained(_A,subfolder=_A )
self.assertTrue(check_models_equal(_A,_A ) )
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = BertConfig.from_pretrained("hf-internal-testing/tiny-bert-flax-only" )
SCREAMING_SNAKE_CASE_ : Dict = FlaxBertModel(_A )
SCREAMING_SNAKE_CASE_ : str = "bert"
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(_A,_A ),max_shard_size="10KB" )
with self.assertRaises(_A ):
SCREAMING_SNAKE_CASE_ : int = FlaxBertModel.from_pretrained(_A )
SCREAMING_SNAKE_CASE_ : str = FlaxBertModel.from_pretrained(_A,subfolder=_A )
self.assertTrue(check_models_equal(_A,_A ) )
def __UpperCamelCase ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = "bert"
SCREAMING_SNAKE_CASE_ : Any = "hf-internal-testing/tiny-random-bert-subfolder"
with self.assertRaises(_A ):
SCREAMING_SNAKE_CASE_ : Dict = FlaxBertModel.from_pretrained(_A )
SCREAMING_SNAKE_CASE_ : List[str] = FlaxBertModel.from_pretrained(_A,subfolder=_A )
self.assertIsNotNone(_A )
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = "bert"
SCREAMING_SNAKE_CASE_ : Tuple = "hf-internal-testing/tiny-random-bert-sharded-subfolder"
with self.assertRaises(_A ):
SCREAMING_SNAKE_CASE_ : Dict = FlaxBertModel.from_pretrained(_A )
SCREAMING_SNAKE_CASE_ : str = FlaxBertModel.from_pretrained(_A,subfolder=_A )
self.assertIsNotNone(_A )
| 18 | import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
import diffusers
from diffusers import (
AutoencoderKL,
EulerDiscreteScheduler,
StableDiffusionLatentUpscalePipeline,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.schedulers import KarrasDiffusionSchedulers
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
def _snake_case ( lowerCAmelCase : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = [tensor.shape for tensor in tensor_list]
return all(shape == shapes[0] for shape in shapes[1:] )
class a__ ( A__ , A__ , A__ , unittest.TestCase ):
A = StableDiffusionLatentUpscalePipeline
A = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
'height',
'width',
'cross_attention_kwargs',
'negative_prompt_embeds',
'prompt_embeds',
}
A = PipelineTesterMixin.required_optional_params - {'num_images_per_prompt'}
A = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
A = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
A = frozenset([] )
A = True
@property
def __UpperCamelCase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = 1
SCREAMING_SNAKE_CASE_ : Optional[int] = 4
SCREAMING_SNAKE_CASE_ : Optional[int] = (16, 16)
SCREAMING_SNAKE_CASE_ : Dict = floats_tensor((batch_size, num_channels) + sizes,rng=random.Random(0 ) ).to(_A )
return image
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ : str = UNetaDConditionModel(
act_fn="gelu",attention_head_dim=8,norm_num_groups=_A,block_out_channels=[32, 32, 64, 64],time_cond_proj_dim=160,conv_in_kernel=1,conv_out_kernel=1,cross_attention_dim=32,down_block_types=(
"KDownBlock2D",
"KCrossAttnDownBlock2D",
"KCrossAttnDownBlock2D",
"KCrossAttnDownBlock2D",
),in_channels=8,mid_block_type=_A,only_cross_attention=_A,out_channels=5,resnet_time_scale_shift="scale_shift",time_embedding_type="fourier",timestep_post_act="gelu",up_block_types=("KCrossAttnUpBlock2D", "KCrossAttnUpBlock2D", "KCrossAttnUpBlock2D", "KUpBlock2D"),)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = AutoencoderKL(
block_out_channels=[32, 32, 64, 64],in_channels=3,out_channels=3,down_block_types=[
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
],up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],latent_channels=4,)
SCREAMING_SNAKE_CASE_ : int = EulerDiscreteScheduler(prediction_type="sample" )
SCREAMING_SNAKE_CASE_ : List[Any] = CLIPTextConfig(
bos_token_id=0,eos_token_id=2,hidden_size=32,intermediate_size=37,layer_norm_eps=1E-05,num_attention_heads=4,num_hidden_layers=5,pad_token_id=1,vocab_size=1000,hidden_act="quick_gelu",projection_dim=512,)
SCREAMING_SNAKE_CASE_ : Tuple = CLIPTextModel(_A )
SCREAMING_SNAKE_CASE_ : int = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {
"unet": model.eval(),
"vae": vae.eval(),
"scheduler": scheduler,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
}
return components
def __UpperCamelCase ( self : List[Any],_A : int,_A : Tuple=0 ):
"""simple docstring"""
if str(_A ).startswith("mps" ):
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.manual_seed(_A )
else:
SCREAMING_SNAKE_CASE_ : Dict = torch.Generator(device=_A ).manual_seed(_A )
SCREAMING_SNAKE_CASE_ : Tuple = {
"prompt": "A painting of a squirrel eating a burger",
"image": self.dummy_image.cpu(),
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = "cpu"
SCREAMING_SNAKE_CASE_ : List[str] = self.get_dummy_components()
SCREAMING_SNAKE_CASE_ : List[str] = self.pipeline_class(**_A )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
SCREAMING_SNAKE_CASE_ : Tuple = self.get_dummy_inputs(_A )
SCREAMING_SNAKE_CASE_ : Dict = pipe(**_A ).images
SCREAMING_SNAKE_CASE_ : Optional[int] = image[0, -3:, -3:, -1]
self.assertEqual(image.shape,(1, 256, 256, 3) )
SCREAMING_SNAKE_CASE_ : Optional[Any] = np.array(
[0.47222412, 0.41921633, 0.44717434, 0.46874192, 0.42588258, 0.46150726, 0.4677534, 0.45583832, 0.48579055] )
SCREAMING_SNAKE_CASE_ : Optional[int] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_A,1E-3 )
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
super().test_attention_slicing_forward_pass(expected_max_diff=7E-3 )
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
super().test_cpu_offload_forward_pass(expected_max_diff=3E-3 )
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
def __UpperCamelCase ( self : int ):
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=7E-3 )
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=3E-3 )
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
super().test_save_load_local(expected_max_difference=3E-3 )
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
super().test_save_load_optional_components(expected_max_difference=3E-3 )
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = [
"DDIMScheduler",
"DDPMScheduler",
"PNDMScheduler",
"HeunDiscreteScheduler",
"EulerAncestralDiscreteScheduler",
"KDPM2DiscreteScheduler",
"KDPM2AncestralDiscreteScheduler",
"DPMSolverSDEScheduler",
]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.get_dummy_components()
SCREAMING_SNAKE_CASE_ : Any = self.pipeline_class(**_A )
# make sure that PNDM does not need warm-up
pipe.scheduler.register_to_config(skip_prk_steps=_A )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
SCREAMING_SNAKE_CASE_ : Any = self.get_dummy_inputs(_A )
SCREAMING_SNAKE_CASE_ : Optional[Any] = 2
SCREAMING_SNAKE_CASE_ : Optional[int] = []
for scheduler_enum in KarrasDiffusionSchedulers:
if scheduler_enum.name in skip_schedulers:
# no sigma schedulers are not supported
# no schedulers
continue
SCREAMING_SNAKE_CASE_ : Tuple = getattr(_A,scheduler_enum.name )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = scheduler_cls.from_config(pipe.scheduler.config )
SCREAMING_SNAKE_CASE_ : Optional[int] = pipe(**_A )[0]
outputs.append(_A )
assert check_same_shape(_A )
@require_torch_gpu
@slow
class a__ ( unittest.TestCase ):
def __UpperCamelCase ( self : Dict ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = torch.manual_seed(33 )
SCREAMING_SNAKE_CASE_ : Optional[int] = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4",torch_dtype=torch.floataa )
pipe.to("cuda" )
SCREAMING_SNAKE_CASE_ : List[Any] = StableDiffusionLatentUpscalePipeline.from_pretrained(
"stabilityai/sd-x2-latent-upscaler",torch_dtype=torch.floataa )
upscaler.to("cuda" )
SCREAMING_SNAKE_CASE_ : Tuple = "a photo of an astronaut high resolution, unreal engine, ultra realistic"
SCREAMING_SNAKE_CASE_ : str = pipe(_A,generator=_A,output_type="latent" ).images
SCREAMING_SNAKE_CASE_ : Optional[Any] = upscaler(
prompt=_A,image=_A,num_inference_steps=20,guidance_scale=0,generator=_A,output_type="np",).images[0]
SCREAMING_SNAKE_CASE_ : str = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/astronaut_1024.npy" )
assert np.abs((expected_image - image).mean() ) < 5E-2
def __UpperCamelCase ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = torch.manual_seed(33 )
SCREAMING_SNAKE_CASE_ : Optional[Any] = StableDiffusionLatentUpscalePipeline.from_pretrained(
"stabilityai/sd-x2-latent-upscaler",torch_dtype=torch.floataa )
upscaler.to("cuda" )
SCREAMING_SNAKE_CASE_ : Any = "the temple of fire by Ross Tran and Gerardo Dottori, oil on canvas"
SCREAMING_SNAKE_CASE_ : Optional[Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_512.png" )
SCREAMING_SNAKE_CASE_ : str = upscaler(
prompt=_A,image=_A,num_inference_steps=20,guidance_scale=0,generator=_A,output_type="np",).images[0]
SCREAMING_SNAKE_CASE_ : Any = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_1024.npy" )
assert np.abs((expected_image - image).max() ) < 5E-2
| 18 | 1 |
import unittest
from transformers import load_tool
from transformers.utils import is_torch_available
if is_torch_available():
import torch
from transformers.testing_utils import require_torch
from .test_tools_common import ToolTesterMixin
@require_torch
class a__ ( unittest.TestCase , A__ ):
def __UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = load_tool("text-to-speech" )
self.tool.setup()
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ : Dict = self.tool("hey" )
SCREAMING_SNAKE_CASE_ : Optional[int] = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3],torch.tensor([-0.0005966668832115829, -0.0003657640190795064, -0.00013439502799883485] ),) )
def __UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ : List[Any] = self.tool("hey" )
SCREAMING_SNAKE_CASE_ : Any = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3],torch.tensor([-0.0005966668832115829, -0.0003657640190795064, -0.00013439502799883485] ),) )
| 18 | from collections import OrderedDict
from typing import Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...feature_extraction_utils import FeatureExtractionMixin
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType, logging
__lowerCamelCase : Optional[int] = logging.get_logger(__name__)
__lowerCamelCase : Tuple = {
'''deepmind/language-perceiver''': '''https://huggingface.co/deepmind/language-perceiver/resolve/main/config.json''',
# See all Perceiver models at https://huggingface.co/models?filter=perceiver
}
class a__ ( A__ ):
A = 'perceiver'
def __init__( self : List[Any],_A : Tuple=256,_A : str=1280,_A : List[Any]=768,_A : Union[str, Any]=1,_A : Union[str, Any]=26,_A : List[str]=8,_A : List[Any]=8,_A : List[Any]=None,_A : List[Any]=None,_A : Union[str, Any]="kv",_A : Any=1,_A : int=1,_A : Dict="gelu",_A : Any=0.1,_A : int=0.02,_A : int=1E-12,_A : Any=True,_A : Optional[Any]=262,_A : List[Any]=2048,_A : str=56,_A : Optional[int]=[368, 496],_A : Dict=16,_A : Tuple=1920,_A : List[Any]=16,_A : str=[1, 16, 224, 224],**_A : Optional[Any],):
"""simple docstring"""
super().__init__(**_A )
SCREAMING_SNAKE_CASE_ : Dict = num_latents
SCREAMING_SNAKE_CASE_ : List[Any] = d_latents
SCREAMING_SNAKE_CASE_ : Union[str, Any] = d_model
SCREAMING_SNAKE_CASE_ : Optional[int] = num_blocks
SCREAMING_SNAKE_CASE_ : List[Any] = num_self_attends_per_block
SCREAMING_SNAKE_CASE_ : Tuple = num_self_attention_heads
SCREAMING_SNAKE_CASE_ : List[str] = num_cross_attention_heads
SCREAMING_SNAKE_CASE_ : List[Any] = qk_channels
SCREAMING_SNAKE_CASE_ : Any = v_channels
SCREAMING_SNAKE_CASE_ : Any = cross_attention_shape_for_attention
SCREAMING_SNAKE_CASE_ : List[str] = self_attention_widening_factor
SCREAMING_SNAKE_CASE_ : Any = cross_attention_widening_factor
SCREAMING_SNAKE_CASE_ : List[Any] = hidden_act
SCREAMING_SNAKE_CASE_ : List[str] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : Any = initializer_range
SCREAMING_SNAKE_CASE_ : List[Any] = layer_norm_eps
SCREAMING_SNAKE_CASE_ : Tuple = use_query_residual
# masked language modeling attributes
SCREAMING_SNAKE_CASE_ : List[str] = vocab_size
SCREAMING_SNAKE_CASE_ : Union[str, Any] = max_position_embeddings
# image classification attributes
SCREAMING_SNAKE_CASE_ : Dict = image_size
# flow attributes
SCREAMING_SNAKE_CASE_ : List[Any] = train_size
# multimodal autoencoding attributes
SCREAMING_SNAKE_CASE_ : str = num_frames
SCREAMING_SNAKE_CASE_ : Any = audio_samples_per_frame
SCREAMING_SNAKE_CASE_ : Tuple = samples_per_patch
SCREAMING_SNAKE_CASE_ : Optional[Any] = output_shape
class a__ ( A__ ):
@property
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE_ : List[str] = {0: "batch", 1: "choice", 2: "sequence"}
else:
SCREAMING_SNAKE_CASE_ : str = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("inputs", dynamic_axis),
("attention_mask", dynamic_axis),
] )
@property
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
return 1E-4
def __UpperCamelCase ( self : List[str],_A : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"],_A : int = -1,_A : int = -1,_A : int = -1,_A : bool = False,_A : Optional[TensorType] = None,_A : int = 3,_A : int = 40,_A : int = 40,):
"""simple docstring"""
if isinstance(_A,_A ):
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
SCREAMING_SNAKE_CASE_ : Tuple = compute_effective_axis_dimension(
_A,fixed_dimension=OnnxConfig.default_fixed_batch,num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
SCREAMING_SNAKE_CASE_ : Tuple = preprocessor.num_special_tokens_to_add(_A )
SCREAMING_SNAKE_CASE_ : Any = compute_effective_axis_dimension(
_A,fixed_dimension=OnnxConfig.default_fixed_sequence,num_token_to_add=_A )
# Generate dummy inputs according to compute batch and sequence
SCREAMING_SNAKE_CASE_ : Optional[Any] = [" ".join(["a"] ) * seq_length] * batch_size
SCREAMING_SNAKE_CASE_ : str = dict(preprocessor(_A,return_tensors=_A ) )
SCREAMING_SNAKE_CASE_ : List[str] = inputs.pop("input_ids" )
return inputs
elif isinstance(_A,_A ) and preprocessor.model_input_names[0] == "pixel_values":
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
SCREAMING_SNAKE_CASE_ : Any = compute_effective_axis_dimension(_A,fixed_dimension=OnnxConfig.default_fixed_batch )
SCREAMING_SNAKE_CASE_ : Optional[int] = self._generate_dummy_images(_A,_A,_A,_A )
SCREAMING_SNAKE_CASE_ : Any = dict(preprocessor(images=_A,return_tensors=_A ) )
SCREAMING_SNAKE_CASE_ : Any = inputs.pop("pixel_values" )
return inputs
else:
raise ValueError(
"Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor." )
| 18 | 1 |
from __future__ import annotations
__lowerCamelCase : Tuple = [-10, -5, 0, 5, 5.1, 11, 13, 21, 3, 4, -21, -10, -5, -1, 0]
__lowerCamelCase : str = [-5, 0, 5, 5.1, 11, 13, 21, -1, 4, -1, -10, -5, -1, 0, -1]
def _snake_case ( lowerCAmelCase : list[float] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = []
SCREAMING_SNAKE_CASE_ : List[str] = len(lowerCAmelCase )
for i in range(lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : float = -1
for j in range(i + 1 , lowerCAmelCase ):
if arr[i] < arr[j]:
SCREAMING_SNAKE_CASE_ : Any = arr[j]
break
result.append(lowerCAmelCase )
return result
def _snake_case ( lowerCAmelCase : list[float] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = []
for i, outer in enumerate(lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : float = -1
for inner in arr[i + 1 :]:
if outer < inner:
SCREAMING_SNAKE_CASE_ : List[str] = inner
break
result.append(lowerCAmelCase )
return result
def _snake_case ( lowerCAmelCase : list[float] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = len(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : list[float] = []
SCREAMING_SNAKE_CASE_ : list[float] = [-1] * arr_size
for index in reversed(range(lowerCAmelCase ) ):
if stack:
while stack[-1] <= arr[index]:
stack.pop()
if not stack:
break
if stack:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = stack[-1]
stack.append(arr[index] )
return result
if __name__ == "__main__":
from doctest import testmod
from timeit import timeit
testmod()
print(next_greatest_element_slow(arr))
print(next_greatest_element_fast(arr))
print(next_greatest_element(arr))
__lowerCamelCase : Dict = (
'''from __main__ import arr, next_greatest_element_slow, '''
'''next_greatest_element_fast, next_greatest_element'''
)
print(
'''next_greatest_element_slow():''',
timeit('''next_greatest_element_slow(arr)''', setup=setup),
)
print(
'''next_greatest_element_fast():''',
timeit('''next_greatest_element_fast(arr)''', setup=setup),
)
print(
''' next_greatest_element():''',
timeit('''next_greatest_element(arr)''', setup=setup),
)
| 18 | from collections import Counter
from pathlib import Path
from typing import Optional, Tuple
import yaml
class a__ ( yaml.SafeLoader ):
def __UpperCamelCase ( self : str,_A : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = [self.constructed_objects[key_node] for key_node, _ in node.value]
SCREAMING_SNAKE_CASE_ : List[str] = [tuple(_A ) if isinstance(_A,_A ) else key for key in keys]
SCREAMING_SNAKE_CASE_ : Optional[int] = Counter(_A )
SCREAMING_SNAKE_CASE_ : Tuple = [key for key in counter if counter[key] > 1]
if duplicate_keys:
raise TypeError(F'Got duplicate yaml keys: {duplicate_keys}' )
def __UpperCamelCase ( self : Tuple,_A : Dict,_A : List[Any]=False ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = super().construct_mapping(_A,deep=_A )
self._check_no_duplicates_on_constructed_node(_A )
return mapping
def _snake_case ( lowerCAmelCase : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = list(readme_content.splitlines() )
if full_content and full_content[0] == "---" and "---" in full_content[1:]:
SCREAMING_SNAKE_CASE_ : List[Any] = full_content[1:].index("---" ) + 1
SCREAMING_SNAKE_CASE_ : int = "\n".join(full_content[1:sep_idx] )
return yamlblock, "\n".join(full_content[sep_idx + 1 :] )
return None, "\n".join(lowerCAmelCase )
class a__ ( A__ ):
# class attributes
A = {'train_eval_index'} # train-eval-index in the YAML metadata
@classmethod
def __UpperCamelCase ( cls : Any,_A : Path ):
"""simple docstring"""
with open(_A,encoding="utf-8" ) as readme_file:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = _split_yaml_from_readme(readme_file.read() )
if yaml_string is not None:
return cls.from_yaml_string(_A )
else:
return cls()
def __UpperCamelCase ( self : Dict,_A : Path ):
"""simple docstring"""
if path.exists():
with open(_A,encoding="utf-8" ) as readme_file:
SCREAMING_SNAKE_CASE_ : int = readme_file.read()
else:
SCREAMING_SNAKE_CASE_ : Any = None
SCREAMING_SNAKE_CASE_ : int = self._to_readme(_A )
with open(_A,"w",encoding="utf-8" ) as readme_file:
readme_file.write(_A )
def __UpperCamelCase ( self : Optional[int],_A : Optional[str] = None ):
"""simple docstring"""
if readme_content is not None:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[Any] = _split_yaml_from_readme(_A )
SCREAMING_SNAKE_CASE_ : Tuple = "---\n" + self.to_yaml_string() + "---\n" + content
else:
SCREAMING_SNAKE_CASE_ : Dict = "---\n" + self.to_yaml_string() + "---\n"
return full_content
@classmethod
def __UpperCamelCase ( cls : Dict,_A : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = yaml.load(_A,Loader=_NoDuplicateSafeLoader ) or {}
# Convert the YAML keys to DatasetMetadata fields
SCREAMING_SNAKE_CASE_ : Any = {
(key.replace("-","_" ) if key.replace("-","_" ) in cls._FIELDS_WITH_DASHES else key): value
for key, value in metadata_dict.items()
}
return cls(**_A )
def __UpperCamelCase ( self : Dict ):
"""simple docstring"""
return yaml.safe_dump(
{
(key.replace("_","-" ) if key in self._FIELDS_WITH_DASHES else key): value
for key, value in self.items()
},sort_keys=_A,allow_unicode=_A,encoding="utf-8",).decode("utf-8" )
__lowerCamelCase : List[Any] = {
'''image-classification''': [],
'''translation''': [],
'''image-segmentation''': [],
'''fill-mask''': [],
'''automatic-speech-recognition''': [],
'''token-classification''': [],
'''sentence-similarity''': [],
'''audio-classification''': [],
'''question-answering''': [],
'''summarization''': [],
'''zero-shot-classification''': [],
'''table-to-text''': [],
'''feature-extraction''': [],
'''other''': [],
'''multiple-choice''': [],
'''text-classification''': [],
'''text-to-image''': [],
'''text2text-generation''': [],
'''zero-shot-image-classification''': [],
'''tabular-classification''': [],
'''tabular-regression''': [],
'''image-to-image''': [],
'''tabular-to-text''': [],
'''unconditional-image-generation''': [],
'''text-retrieval''': [],
'''text-to-speech''': [],
'''object-detection''': [],
'''audio-to-audio''': [],
'''text-generation''': [],
'''conversational''': [],
'''table-question-answering''': [],
'''visual-question-answering''': [],
'''image-to-text''': [],
'''reinforcement-learning''': [],
'''voice-activity-detection''': [],
'''time-series-forecasting''': [],
'''document-question-answering''': [],
}
if __name__ == "__main__":
from argparse import ArgumentParser
__lowerCamelCase : List[Any] = ArgumentParser(usage='''Validate the yaml metadata block of a README.md file.''')
ap.add_argument('''readme_filepath''')
__lowerCamelCase : Dict = ap.parse_args()
__lowerCamelCase : List[Any] = Path(args.readme_filepath)
__lowerCamelCase : Optional[int] = DatasetMetadata.from_readme(readme_filepath)
print(dataset_metadata)
dataset_metadata.to_readme(readme_filepath)
| 18 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__lowerCamelCase : List[str] = {
'''configuration_graphormer''': ['''GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GraphormerConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Optional[Any] = [
'''GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GraphormerForGraphClassification''',
'''GraphormerModel''',
'''GraphormerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_graphormer import GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, GraphormerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_graphormer import (
GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
GraphormerForGraphClassification,
GraphormerModel,
GraphormerPreTrainedModel,
)
else:
import sys
__lowerCamelCase : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 18 | from __future__ import annotations
from math import pi, sqrt
def _snake_case ( lowerCAmelCase : float , lowerCAmelCase : float ):
"""simple docstring"""
if inductance <= 0:
raise ValueError("Inductance cannot be 0 or negative" )
elif capacitance <= 0:
raise ValueError("Capacitance cannot be 0 or negative" )
else:
return (
"Resonant frequency",
float(1 / (2 * pi * (sqrt(inductance * capacitance ))) ),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 18 | 1 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class a__ ( A__ ):
A = ['image_processor', 'tokenizer']
A = 'CLIPImageProcessor'
A = ('XLMRobertaTokenizer', 'XLMRobertaTokenizerFast')
def __init__( self : Optional[int],_A : Optional[int]=None,_A : str=None,**_A : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead.",_A,)
SCREAMING_SNAKE_CASE_ : Any = kwargs.pop("feature_extractor" )
SCREAMING_SNAKE_CASE_ : Any = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(_A,_A )
def __call__( self : Optional[Any],_A : int=None,_A : str=None,_A : List[Any]=None,**_A : Optional[int] ):
"""simple docstring"""
if text is None and images is None:
raise ValueError("You have to specify either text or images. Both cannot be none." )
if text is not None:
SCREAMING_SNAKE_CASE_ : Dict = self.tokenizer(_A,return_tensors=_A,**_A )
if images is not None:
SCREAMING_SNAKE_CASE_ : str = self.image_processor(_A,return_tensors=_A,**_A )
if text is not None and images is not None:
SCREAMING_SNAKE_CASE_ : str = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**_A ),tensor_type=_A )
def __UpperCamelCase ( self : Dict,*_A : int,**_A : Union[str, Any] ):
"""simple docstring"""
return self.tokenizer.batch_decode(*_A,**_A )
def __UpperCamelCase ( self : Union[str, Any],*_A : str,**_A : Any ):
"""simple docstring"""
return self.tokenizer.decode(*_A,**_A )
@property
def __UpperCamelCase ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.tokenizer.model_input_names
SCREAMING_SNAKE_CASE_ : Optional[int] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 18 | def _snake_case ( lowerCAmelCase : list ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = len(lowerCAmelCase )
for i in range(1 , lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : int = collection[i]
SCREAMING_SNAKE_CASE_ : Any = 0
SCREAMING_SNAKE_CASE_ : Tuple = i - 1
while low <= high:
SCREAMING_SNAKE_CASE_ : int = (low + high) // 2
if val < collection[mid]:
SCREAMING_SNAKE_CASE_ : Optional[Any] = mid - 1
else:
SCREAMING_SNAKE_CASE_ : Tuple = mid + 1
for j in range(lowerCAmelCase , lowerCAmelCase , -1 ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = collection[j - 1]
SCREAMING_SNAKE_CASE_ : int = val
return collection
if __name__ == "__main__":
__lowerCamelCase : Dict = input('''Enter numbers separated by a comma:\n''').strip()
__lowerCamelCase : List[str] = [int(item) for item in user_input.split(''',''')]
print(binary_insertion_sort(unsorted))
| 18 | 1 |
import unittest
from transformers import is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow
if is_flax_available():
import optax
from flax.training.common_utils import onehot
from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration
from transformers.models.ta.modeling_flax_ta import shift_tokens_right
@require_torch
@require_sentencepiece
@require_tokenizers
@require_flax
class a__ ( unittest.TestCase ):
@slow
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = FlaxMTaForConditionalGeneration.from_pretrained("google/mt5-small" )
SCREAMING_SNAKE_CASE_ : str = AutoTokenizer.from_pretrained("google/mt5-small" )
SCREAMING_SNAKE_CASE_ : List[str] = tokenizer("Hello there",return_tensors="np" ).input_ids
SCREAMING_SNAKE_CASE_ : str = tokenizer("Hi I am",return_tensors="np" ).input_ids
SCREAMING_SNAKE_CASE_ : List[str] = shift_tokens_right(_A,model.config.pad_token_id,model.config.decoder_start_token_id )
SCREAMING_SNAKE_CASE_ : str = model(_A,decoder_input_ids=_A ).logits
SCREAMING_SNAKE_CASE_ : Dict = optax.softmax_cross_entropy(_A,onehot(_A,logits.shape[-1] ) ).mean()
SCREAMING_SNAKE_CASE_ : Any = -(labels.shape[-1] * loss.item())
SCREAMING_SNAKE_CASE_ : List[str] = -84.9127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 )
| 18 | from collections.abc import Sequence
from queue import Queue
class a__ :
def __init__( self : int,_A : List[Any],_A : Optional[Any],_A : Optional[int],_A : int=None,_A : List[str]=None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = start
SCREAMING_SNAKE_CASE_ : List[str] = end
SCREAMING_SNAKE_CASE_ : Tuple = val
SCREAMING_SNAKE_CASE_ : List[str] = (start + end) // 2
SCREAMING_SNAKE_CASE_ : Optional[int] = left
SCREAMING_SNAKE_CASE_ : str = right
def __repr__( self : Tuple ):
"""simple docstring"""
return F'SegmentTreeNode(start={self.start}, end={self.end}, val={self.val})'
class a__ :
def __init__( self : Any,_A : Sequence,_A : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = collection
SCREAMING_SNAKE_CASE_ : Optional[int] = function
if self.collection:
SCREAMING_SNAKE_CASE_ : List[str] = self._build_tree(0,len(_A ) - 1 )
def __UpperCamelCase ( self : int,_A : Any,_A : List[Any] ):
"""simple docstring"""
self._update_tree(self.root,_A,_A )
def __UpperCamelCase ( self : str,_A : Any,_A : List[Any] ):
"""simple docstring"""
return self._query_range(self.root,_A,_A )
def __UpperCamelCase ( self : Any,_A : Optional[int],_A : int ):
"""simple docstring"""
if start == end:
return SegmentTreeNode(_A,_A,self.collection[start] )
SCREAMING_SNAKE_CASE_ : List[Any] = (start + end) // 2
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self._build_tree(_A,_A )
SCREAMING_SNAKE_CASE_ : Optional[int] = self._build_tree(mid + 1,_A )
return SegmentTreeNode(_A,_A,self.fn(left.val,right.val ),_A,_A )
def __UpperCamelCase ( self : int,_A : int,_A : Tuple,_A : Dict ):
"""simple docstring"""
if node.start == i and node.end == i:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = val
return
if i <= node.mid:
self._update_tree(node.left,_A,_A )
else:
self._update_tree(node.right,_A,_A )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.fn(node.left.val,node.right.val )
def __UpperCamelCase ( self : str,_A : List[str],_A : Optional[int],_A : Optional[Any] ):
"""simple docstring"""
if node.start == i and node.end == j:
return node.val
if i <= node.mid:
if j <= node.mid:
# range in left child tree
return self._query_range(node.left,_A,_A )
else:
# range in left child tree and right child tree
return self.fn(
self._query_range(node.left,_A,node.mid ),self._query_range(node.right,node.mid + 1,_A ),)
else:
# range in right child tree
return self._query_range(node.right,_A,_A )
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
if self.root is not None:
SCREAMING_SNAKE_CASE_ : int = Queue()
queue.put(self.root )
while not queue.empty():
SCREAMING_SNAKE_CASE_ : Tuple = queue.get()
yield node
if node.left is not None:
queue.put(node.left )
if node.right is not None:
queue.put(node.right )
if __name__ == "__main__":
import operator
for fn in [operator.add, max, min]:
print('''*''' * 50)
__lowerCamelCase : int = SegmentTree([2, 1, 5, 3, 4], fn)
for node in arr.traverse():
print(node)
print()
arr.update(1, 5)
for node in arr.traverse():
print(node)
print()
print(arr.query_range(3, 4)) # 7
print(arr.query_range(2, 2)) # 5
print(arr.query_range(1, 3)) # 13
print()
| 18 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase : Optional[int] = logging.get_logger(__name__)
__lowerCamelCase : List[Any] = {
'''google/realm-cc-news-pretrained-embedder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/config.json'''
),
'''google/realm-cc-news-pretrained-encoder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/config.json'''
),
'''google/realm-cc-news-pretrained-scorer''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/config.json'''
),
'''google/realm-cc-news-pretrained-openqa''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/config.json'''
),
'''google/realm-orqa-nq-openqa''': '''https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/config.json''',
'''google/realm-orqa-nq-reader''': '''https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/config.json''',
'''google/realm-orqa-wq-openqa''': '''https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/config.json''',
'''google/realm-orqa-wq-reader''': '''https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/config.json''',
# See all REALM models at https://huggingface.co/models?filter=realm
}
class a__ ( A__ ):
A = 'realm'
def __init__( self : Optional[Any],_A : int=3_0522,_A : Union[str, Any]=768,_A : Dict=128,_A : Optional[int]=12,_A : List[str]=12,_A : Optional[int]=8,_A : Tuple=3072,_A : Optional[int]="gelu_new",_A : Tuple=0.1,_A : Optional[int]=0.1,_A : Optional[int]=512,_A : Optional[int]=2,_A : Any=0.02,_A : Tuple=1E-12,_A : Union[str, Any]=256,_A : int=10,_A : Tuple=1E-3,_A : Tuple=5,_A : List[str]=320,_A : List[Any]=1335_3718,_A : Optional[Any]=5000,_A : List[Any]=1,_A : int=0,_A : str=2,**_A : Optional[Any],):
"""simple docstring"""
super().__init__(pad_token_id=_A,bos_token_id=_A,eos_token_id=_A,**_A )
# Common config
SCREAMING_SNAKE_CASE_ : Any = vocab_size
SCREAMING_SNAKE_CASE_ : List[Any] = max_position_embeddings
SCREAMING_SNAKE_CASE_ : Any = hidden_size
SCREAMING_SNAKE_CASE_ : Any = retriever_proj_size
SCREAMING_SNAKE_CASE_ : Optional[Any] = num_hidden_layers
SCREAMING_SNAKE_CASE_ : Tuple = num_attention_heads
SCREAMING_SNAKE_CASE_ : str = num_candidates
SCREAMING_SNAKE_CASE_ : Tuple = intermediate_size
SCREAMING_SNAKE_CASE_ : Optional[int] = hidden_act
SCREAMING_SNAKE_CASE_ : Optional[int] = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ : List[str] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : Dict = initializer_range
SCREAMING_SNAKE_CASE_ : Dict = type_vocab_size
SCREAMING_SNAKE_CASE_ : Union[str, Any] = layer_norm_eps
# Reader config
SCREAMING_SNAKE_CASE_ : Optional[Any] = span_hidden_size
SCREAMING_SNAKE_CASE_ : Any = max_span_width
SCREAMING_SNAKE_CASE_ : int = reader_layer_norm_eps
SCREAMING_SNAKE_CASE_ : Dict = reader_beam_size
SCREAMING_SNAKE_CASE_ : Any = reader_seq_len
# Retrieval config
SCREAMING_SNAKE_CASE_ : int = num_block_records
SCREAMING_SNAKE_CASE_ : Tuple = searcher_beam_size
| 18 | def _snake_case ( lowerCAmelCase : int , lowerCAmelCase : int ):
"""simple docstring"""
while b:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int = b, a % b
return a
def _snake_case ( lowerCAmelCase : int , lowerCAmelCase : int ):
"""simple docstring"""
return a if b == 0 else euclidean_gcd_recursive(lowerCAmelCase , a % b )
def _snake_case ( ):
"""simple docstring"""
print(f'euclidean_gcd(3, 5) = {euclidean_gcd(3 , 5 )}' )
print(f'euclidean_gcd(5, 3) = {euclidean_gcd(5 , 3 )}' )
print(f'euclidean_gcd(1, 3) = {euclidean_gcd(1 , 3 )}' )
print(f'euclidean_gcd(3, 6) = {euclidean_gcd(3 , 6 )}' )
print(f'euclidean_gcd(6, 3) = {euclidean_gcd(6 , 3 )}' )
print(f'euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 , 5 )}' )
print(f'euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 , 3 )}' )
print(f'euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 , 3 )}' )
print(f'euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 , 6 )}' )
print(f'euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 , 3 )}' )
if __name__ == "__main__":
main()
| 18 | 1 |
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
__lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
class a__ ( A__ ):
A = 'linear'
A = 'cosine'
A = 'cosine_with_restarts'
A = 'polynomial'
A = 'constant'
A = 'constant_with_warmup'
A = 'piecewise_constant'
def _snake_case ( lowerCAmelCase : Optimizer , lowerCAmelCase : int = -1 ):
"""simple docstring"""
return LambdaLR(lowerCAmelCase , lambda lowerCAmelCase : 1 , last_epoch=lowerCAmelCase )
def _snake_case ( lowerCAmelCase : Optimizer , lowerCAmelCase : int , lowerCAmelCase : int = -1 ):
"""simple docstring"""
def lr_lambda(lowerCAmelCase : int ):
if current_step < num_warmup_steps:
return float(lowerCAmelCase ) / float(max(1.0 , lowerCAmelCase ) )
return 1.0
return LambdaLR(lowerCAmelCase , lowerCAmelCase , last_epoch=lowerCAmelCase )
def _snake_case ( lowerCAmelCase : Optimizer , lowerCAmelCase : str , lowerCAmelCase : int = -1 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = {}
SCREAMING_SNAKE_CASE_ : Optional[int] = step_rules.split("," )
for rule_str in rule_list[:-1]:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple = rule_str.split(":" )
SCREAMING_SNAKE_CASE_ : Any = int(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : str = float(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : int = value
SCREAMING_SNAKE_CASE_ : List[str] = float(rule_list[-1] )
def create_rules_function(lowerCAmelCase : Tuple , lowerCAmelCase : Optional[int] ):
def rule_func(lowerCAmelCase : int ) -> float:
SCREAMING_SNAKE_CASE_ : List[str] = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(lowerCAmelCase ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
SCREAMING_SNAKE_CASE_ : Optional[Any] = create_rules_function(lowerCAmelCase , lowerCAmelCase )
return LambdaLR(lowerCAmelCase , lowerCAmelCase , last_epoch=lowerCAmelCase )
def _snake_case ( lowerCAmelCase : Optional[Any] , lowerCAmelCase : List[str] , lowerCAmelCase : List[str] , lowerCAmelCase : Any=-1 ):
"""simple docstring"""
def lr_lambda(lowerCAmelCase : int ):
if current_step < num_warmup_steps:
return float(lowerCAmelCase ) / float(max(1 , lowerCAmelCase ) )
return max(
0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) )
return LambdaLR(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
def _snake_case ( lowerCAmelCase : Optimizer , lowerCAmelCase : int , lowerCAmelCase : int , lowerCAmelCase : float = 0.5 , lowerCAmelCase : int = -1 ):
"""simple docstring"""
def lr_lambda(lowerCAmelCase : Dict ):
if current_step < num_warmup_steps:
return float(lowerCAmelCase ) / float(max(1 , lowerCAmelCase ) )
SCREAMING_SNAKE_CASE_ : Any = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(lowerCAmelCase ) * 2.0 * progress )) )
return LambdaLR(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
def _snake_case ( lowerCAmelCase : Optimizer , lowerCAmelCase : int , lowerCAmelCase : int , lowerCAmelCase : int = 1 , lowerCAmelCase : int = -1 ):
"""simple docstring"""
def lr_lambda(lowerCAmelCase : Tuple ):
if current_step < num_warmup_steps:
return float(lowerCAmelCase ) / float(max(1 , lowerCAmelCase ) )
SCREAMING_SNAKE_CASE_ : Dict = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(lowerCAmelCase ) * progress) % 1.0) )) )
return LambdaLR(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
def _snake_case ( lowerCAmelCase : Dict , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Tuple , lowerCAmelCase : str=1E-7 , lowerCAmelCase : Union[str, Any]=1.0 , lowerCAmelCase : Any=-1 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = optimizer.defaults["lr"]
if not (lr_init > lr_end):
raise ValueError(f'lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})' )
def lr_lambda(lowerCAmelCase : int ):
if current_step < num_warmup_steps:
return float(lowerCAmelCase ) / float(max(1 , lowerCAmelCase ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
SCREAMING_SNAKE_CASE_ : List[str] = lr_init - lr_end
SCREAMING_SNAKE_CASE_ : Any = num_training_steps - num_warmup_steps
SCREAMING_SNAKE_CASE_ : str = 1 - (current_step - num_warmup_steps) / decay_steps
SCREAMING_SNAKE_CASE_ : List[str] = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
__lowerCamelCase : Tuple = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def _snake_case ( lowerCAmelCase : Union[str, SchedulerType] , lowerCAmelCase : Optimizer , lowerCAmelCase : Optional[str] = None , lowerCAmelCase : Optional[int] = None , lowerCAmelCase : Optional[int] = None , lowerCAmelCase : int = 1 , lowerCAmelCase : float = 1.0 , lowerCAmelCase : int = -1 , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = SchedulerType(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : List[Any] = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(lowerCAmelCase , last_epoch=lowerCAmelCase )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(lowerCAmelCase , step_rules=lowerCAmelCase , last_epoch=lowerCAmelCase )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(f'{name} requires `num_warmup_steps`, please provide that argument.' )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(lowerCAmelCase , num_warmup_steps=lowerCAmelCase , last_epoch=lowerCAmelCase )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(f'{name} requires `num_training_steps`, please provide that argument.' )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
lowerCAmelCase , num_warmup_steps=lowerCAmelCase , num_training_steps=lowerCAmelCase , num_cycles=lowerCAmelCase , last_epoch=lowerCAmelCase , )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
lowerCAmelCase , num_warmup_steps=lowerCAmelCase , num_training_steps=lowerCAmelCase , power=lowerCAmelCase , last_epoch=lowerCAmelCase , )
return schedule_func(
lowerCAmelCase , num_warmup_steps=lowerCAmelCase , num_training_steps=lowerCAmelCase , last_epoch=lowerCAmelCase )
| 18 | from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__lowerCamelCase : Dict = {'''configuration_vit_mae''': ['''VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTMAEConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Any = [
'''VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ViTMAEForPreTraining''',
'''ViTMAELayer''',
'''ViTMAEModel''',
'''ViTMAEPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Optional[Any] = [
'''TFViTMAEForPreTraining''',
'''TFViTMAEModel''',
'''TFViTMAEPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_mae import (
VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMAEForPreTraining,
ViTMAELayer,
ViTMAEModel,
ViTMAEPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel
else:
import sys
__lowerCamelCase : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 18 | 1 |
from __future__ import annotations
from sys import maxsize
from typing import Generic, TypeVar
__lowerCamelCase : Union[str, Any] = TypeVar('''T''')
def _snake_case ( lowerCAmelCase : int ):
"""simple docstring"""
return (position - 1) // 2
def _snake_case ( lowerCAmelCase : int ):
"""simple docstring"""
return (2 * position) + 1
def _snake_case ( lowerCAmelCase : int ):
"""simple docstring"""
return (2 * position) + 2
class a__ ( Generic[T] ):
def __init__( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : list[tuple[T, int]] = []
SCREAMING_SNAKE_CASE_ : dict[T, int] = {}
SCREAMING_SNAKE_CASE_ : int = 0
def __len__( self : List[str] ):
"""simple docstring"""
return self.elements
def __repr__( self : Dict ):
"""simple docstring"""
return str(self.heap )
def __UpperCamelCase ( self : int ):
"""simple docstring"""
return self.elements == 0
def __UpperCamelCase ( self : str,_A : T,_A : int ):
"""simple docstring"""
self.heap.append((elem, weight) )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.elements
self.elements += 1
self._bubble_up(_A )
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
if self.elements > 1:
self._swap_nodes(0,self.elements - 1 )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[Any] = self.heap.pop()
del self.position_map[elem]
self.elements -= 1
if self.elements > 0:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[str] = self.heap[0]
self._bubble_down(_A )
return elem
def __UpperCamelCase ( self : Union[str, Any],_A : T,_A : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = self.position_map[elem]
SCREAMING_SNAKE_CASE_ : int = (elem, weight)
if position > 0:
SCREAMING_SNAKE_CASE_ : int = get_parent_position(_A )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int = self.heap[parent_position]
if parent_weight > weight:
self._bubble_up(_A )
else:
self._bubble_down(_A )
else:
self._bubble_down(_A )
def __UpperCamelCase ( self : List[Any],_A : T ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = self.position_map[elem]
if curr_pos == 0:
return None
SCREAMING_SNAKE_CASE_ : Tuple = get_parent_position(_A )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[Any] = self.heap[curr_pos]
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : str = self.heap[parent_position]
if parent_weight > weight:
self._swap_nodes(_A,_A )
return self._bubble_up(_A )
return None
def __UpperCamelCase ( self : Optional[Any],_A : T ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = self.position_map[elem]
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : str = self.heap[curr_pos]
SCREAMING_SNAKE_CASE_ : Optional[int] = get_child_left_position(_A )
SCREAMING_SNAKE_CASE_ : int = get_child_right_position(_A )
if child_left_position < self.elements and child_right_position < self.elements:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : str = self.heap[child_left_position]
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[str] = self.heap[child_right_position]
if child_right_weight < child_left_weight and child_right_weight < weight:
self._swap_nodes(_A,_A )
return self._bubble_down(_A )
if child_left_position < self.elements:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Any = self.heap[child_left_position]
if child_left_weight < weight:
self._swap_nodes(_A,_A )
return self._bubble_down(_A )
else:
return None
if child_right_position < self.elements:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[int] = self.heap[child_right_position]
if child_right_weight < weight:
self._swap_nodes(_A,_A )
return self._bubble_down(_A )
return None
def __UpperCamelCase ( self : Optional[int],_A : int,_A : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = self.heap[nodea_pos][0]
SCREAMING_SNAKE_CASE_ : List[Any] = self.heap[nodea_pos][0]
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[int] = (
self.heap[nodea_pos],
self.heap[nodea_pos],
)
SCREAMING_SNAKE_CASE_ : Tuple = nodea_pos
SCREAMING_SNAKE_CASE_ : List[Any] = nodea_pos
class a__ ( Generic[T] ):
def __init__( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : dict[T, dict[T, int]] = {}
SCREAMING_SNAKE_CASE_ : int = 0
def __repr__( self : Optional[Any] ):
"""simple docstring"""
return str(self.connections )
def __len__( self : str ):
"""simple docstring"""
return self.nodes
def __UpperCamelCase ( self : Tuple,_A : T ):
"""simple docstring"""
if node not in self.connections:
SCREAMING_SNAKE_CASE_ : Optional[int] = {}
self.nodes += 1
def __UpperCamelCase ( self : Tuple,_A : T,_A : T,_A : int ):
"""simple docstring"""
self.add_node(_A )
self.add_node(_A )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = weight
SCREAMING_SNAKE_CASE_ : Union[str, Any] = weight
def _snake_case ( lowerCAmelCase : GraphUndirectedWeighted[T] , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : dict[T, int] = {node: maxsize for node in graph.connections}
SCREAMING_SNAKE_CASE_ : dict[T, T | None] = {node: None for node in graph.connections}
SCREAMING_SNAKE_CASE_ : MinPriorityQueue[T] = MinPriorityQueue()
for node, weight in dist.items():
priority_queue.push(lowerCAmelCase , lowerCAmelCase )
if priority_queue.is_empty():
return dist, parent
# initialization
SCREAMING_SNAKE_CASE_ : Optional[Any] = priority_queue.extract_min()
SCREAMING_SNAKE_CASE_ : Dict = 0
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
SCREAMING_SNAKE_CASE_ : int = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(lowerCAmelCase , dist[neighbour] )
SCREAMING_SNAKE_CASE_ : Optional[int] = node
# running prim's algorithm
while not priority_queue.is_empty():
SCREAMING_SNAKE_CASE_ : Any = priority_queue.extract_min()
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
SCREAMING_SNAKE_CASE_ : Dict = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(lowerCAmelCase , dist[neighbour] )
SCREAMING_SNAKE_CASE_ : int = node
return dist, parent
| 18 | import argparse
from pathlib import Path
from transformers import AutoConfig, AutoTokenizer, RagConfig, RagSequenceForGeneration, RagTokenForGeneration
def _snake_case ( lowerCAmelCase : int , lowerCAmelCase : str , lowerCAmelCase : str , lowerCAmelCase : Path , lowerCAmelCase : str = None , lowerCAmelCase : str = None , lowerCAmelCase : str = None , ):
"""simple docstring"""
if config_name_or_path is None:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = "facebook/rag-token-base" if model_type == "rag_token" else "facebook/rag-sequence-base"
if generator_tokenizer_name_or_path is None:
SCREAMING_SNAKE_CASE_ : Dict = generator_name_or_path
if question_encoder_tokenizer_name_or_path is None:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = question_encoder_name_or_path
SCREAMING_SNAKE_CASE_ : Union[str, Any] = RagTokenForGeneration if model_type == "rag_token" else RagSequenceForGeneration
# Save model.
SCREAMING_SNAKE_CASE_ : List[Any] = RagConfig.from_pretrained(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Tuple = AutoConfig.from_pretrained(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : int = AutoConfig.from_pretrained(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = gen_config
SCREAMING_SNAKE_CASE_ : Optional[Any] = question_encoder_config
SCREAMING_SNAKE_CASE_ : Dict = model_class.from_pretrained_question_encoder_generator(
lowerCAmelCase , lowerCAmelCase , config=lowerCAmelCase )
rag_model.save_pretrained(lowerCAmelCase )
# Sanity check.
model_class.from_pretrained(lowerCAmelCase )
# Save tokenizers.
SCREAMING_SNAKE_CASE_ : Optional[Any] = AutoTokenizer.from_pretrained(lowerCAmelCase )
gen_tokenizer.save_pretrained(dest_dir / "generator_tokenizer/" )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = AutoTokenizer.from_pretrained(lowerCAmelCase )
question_encoder_tokenizer.save_pretrained(dest_dir / "question_encoder_tokenizer/" )
if __name__ == "__main__":
__lowerCamelCase : List[Any] = argparse.ArgumentParser()
parser.add_argument(
'''--model_type''',
choices=['''rag_sequence''', '''rag_token'''],
required=True,
type=str,
help='''RAG model type: rag_sequence, rag_token''',
)
parser.add_argument('''--dest''', type=str, required=True, help='''Path to the output checkpoint directory.''')
parser.add_argument('''--generator_name_or_path''', type=str, required=True, help='''Generator model identifier''')
parser.add_argument(
'''--question_encoder_name_or_path''', type=str, required=True, help='''Question encoder model identifier'''
)
parser.add_argument(
'''--generator_tokenizer_name_or_path''',
type=str,
help='''Generator tokenizer identifier, if not specified, resolves to ``generator_name_or_path``''',
)
parser.add_argument(
'''--question_encoder_tokenizer_name_or_path''',
type=str,
help='''Question encoder tokenizer identifier, if not specified, resolves to ``question_encoder_name_or_path``''',
)
parser.add_argument(
'''--config_name_or_path''',
type=str,
help=(
'''Identifier of the model config to use, if not provided, resolves to a base config for a given'''
''' ``model_type``'''
),
)
__lowerCamelCase : str = parser.parse_args()
__lowerCamelCase : int = Path(args.dest)
dest_dir.mkdir(exist_ok=True)
consolidate(
args.model_type,
args.generator_name_or_path,
args.question_encoder_name_or_path,
dest_dir,
args.config_name_or_path,
args.generator_tokenizer_name_or_path,
args.question_encoder_tokenizer_name_or_path,
)
| 18 | 1 |
import inspect
import unittest
from transformers import DPTConfig
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel
from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class a__ :
def __init__( self : str,_A : str,_A : str=2,_A : List[str]=32,_A : List[Any]=16,_A : Any=3,_A : str=True,_A : Union[str, Any]=True,_A : Union[str, Any]=32,_A : Optional[Any]=4,_A : Any=[0, 1, 2, 3],_A : Any=4,_A : Tuple=37,_A : List[str]="gelu",_A : Optional[int]=0.1,_A : str=0.1,_A : int=0.02,_A : Optional[int]=3,_A : str=[1, 384, 24, 24],_A : str=True,_A : Tuple=None,):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = parent
SCREAMING_SNAKE_CASE_ : Any = batch_size
SCREAMING_SNAKE_CASE_ : Any = image_size
SCREAMING_SNAKE_CASE_ : Optional[Any] = patch_size
SCREAMING_SNAKE_CASE_ : List[str] = num_channels
SCREAMING_SNAKE_CASE_ : List[str] = is_training
SCREAMING_SNAKE_CASE_ : Any = use_labels
SCREAMING_SNAKE_CASE_ : Union[str, Any] = hidden_size
SCREAMING_SNAKE_CASE_ : Any = num_hidden_layers
SCREAMING_SNAKE_CASE_ : Tuple = backbone_out_indices
SCREAMING_SNAKE_CASE_ : Union[str, Any] = num_attention_heads
SCREAMING_SNAKE_CASE_ : Optional[Any] = intermediate_size
SCREAMING_SNAKE_CASE_ : Union[str, Any] = hidden_act
SCREAMING_SNAKE_CASE_ : Optional[Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ : int = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : Dict = initializer_range
SCREAMING_SNAKE_CASE_ : int = num_labels
SCREAMING_SNAKE_CASE_ : Optional[Any] = backbone_featmap_shape
SCREAMING_SNAKE_CASE_ : str = scope
SCREAMING_SNAKE_CASE_ : Any = is_hybrid
# sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = (image_size // patch_size) ** 2
SCREAMING_SNAKE_CASE_ : Optional[Any] = num_patches + 1
def __UpperCamelCase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE_ : Dict = None
if self.use_labels:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ids_tensor([self.batch_size, self.image_size, self.image_size],self.num_labels )
SCREAMING_SNAKE_CASE_ : str = self.get_config()
return config, pixel_values, labels
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {
"global_padding": "same",
"layer_type": "bottleneck",
"depths": [3, 4, 9],
"out_features": ["stage1", "stage2", "stage3"],
"embedding_dynamic_padding": True,
"hidden_sizes": [96, 192, 384, 768],
"num_groups": 2,
}
return DPTConfig(
image_size=self.image_size,patch_size=self.patch_size,num_channels=self.num_channels,hidden_size=self.hidden_size,num_hidden_layers=self.num_hidden_layers,backbone_out_indices=self.backbone_out_indices,num_attention_heads=self.num_attention_heads,intermediate_size=self.intermediate_size,hidden_act=self.hidden_act,hidden_dropout_prob=self.hidden_dropout_prob,attention_probs_dropout_prob=self.attention_probs_dropout_prob,is_decoder=_A,initializer_range=self.initializer_range,is_hybrid=self.is_hybrid,backbone_config=_A,backbone_featmap_shape=self.backbone_featmap_shape,)
def __UpperCamelCase ( self : Union[str, Any],_A : Optional[int],_A : Union[str, Any],_A : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = DPTModel(config=_A )
model.to(_A )
model.eval()
SCREAMING_SNAKE_CASE_ : Any = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape,(self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCamelCase ( self : Dict,_A : List[Any],_A : str,_A : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = self.num_labels
SCREAMING_SNAKE_CASE_ : Optional[Any] = DPTForDepthEstimation(_A )
model.to(_A )
model.eval()
SCREAMING_SNAKE_CASE_ : Tuple = model(_A )
self.parent.assertEqual(result.predicted_depth.shape,(self.batch_size, self.image_size, self.image_size) )
def __UpperCamelCase ( self : List[str],_A : Tuple,_A : Tuple,_A : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = self.num_labels
SCREAMING_SNAKE_CASE_ : Dict = DPTForSemanticSegmentation(_A )
model.to(_A )
model.eval()
SCREAMING_SNAKE_CASE_ : Any = model(_A,labels=_A )
self.parent.assertEqual(
result.logits.shape,(self.batch_size, self.num_labels, self.image_size, self.image_size) )
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : str = config_and_inputs
SCREAMING_SNAKE_CASE_ : int = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class a__ ( A__ , A__ , unittest.TestCase ):
A = (DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else ()
A = (
{
'depth-estimation': DPTForDepthEstimation,
'feature-extraction': DPTModel,
'image-segmentation': DPTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
A = False
A = False
A = False
def __UpperCamelCase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = DPTModelTester(self )
SCREAMING_SNAKE_CASE_ : int = ConfigTester(self,config_class=_A,has_text_modality=_A,hidden_size=37 )
def __UpperCamelCase ( self : str ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="DPT does not use inputs_embeds" )
def __UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
pass
def __UpperCamelCase ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ : Tuple = model_class(_A )
self.assertIsInstance(model.get_input_embeddings(),(nn.Module) )
SCREAMING_SNAKE_CASE_ : Optional[int] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_A,nn.Linear ) )
def __UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ : List[str] = model_class(_A )
SCREAMING_SNAKE_CASE_ : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE_ : Tuple = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE_ : Dict = ["pixel_values"]
self.assertListEqual(arg_names[:1],_A )
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def __UpperCamelCase ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_depth_estimation(*_A )
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*_A )
def __UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ : Optional[int] = True
if model_class in get_values(_A ):
continue
SCREAMING_SNAKE_CASE_ : List[Any] = model_class(_A )
model.to(_A )
model.train()
SCREAMING_SNAKE_CASE_ : List[Any] = self._prepare_for_class(_A,_A,return_labels=_A )
SCREAMING_SNAKE_CASE_ : Dict = model(**_A ).loss
loss.backward()
def __UpperCamelCase ( self : Any ):
"""simple docstring"""
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ : List[str] = False
SCREAMING_SNAKE_CASE_ : Optional[int] = True
if model_class in get_values(_A ) or not model_class.supports_gradient_checkpointing:
continue
SCREAMING_SNAKE_CASE_ : Tuple = model_class(_A )
model.to(_A )
model.gradient_checkpointing_enable()
model.train()
SCREAMING_SNAKE_CASE_ : Tuple = self._prepare_for_class(_A,_A,return_labels=_A )
SCREAMING_SNAKE_CASE_ : Tuple = model(**_A ).loss
loss.backward()
def __UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ : Any = _config_zero_init(_A )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ : List[str] = model_class(config=_A )
# Skip the check for the backbone
SCREAMING_SNAKE_CASE_ : Any = []
for name, module in model.named_modules():
if module.__class__.__name__ == "DPTViTHybridEmbeddings":
SCREAMING_SNAKE_CASE_ : List[str] = [F'{name}.{key}' for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item(),[0.0, 1.0],msg=F'Parameter {name} of model {model_class} seems not properly initialized',)
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
pass
@slow
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]:
SCREAMING_SNAKE_CASE_ : Any = DPTModel.from_pretrained(_A )
self.assertIsNotNone(_A )
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ : Dict = "add"
with self.assertRaises(_A ):
SCREAMING_SNAKE_CASE_ : Any = DPTForDepthEstimation(_A )
def _snake_case ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
@slow
class a__ ( unittest.TestCase ):
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = DPTImageProcessor.from_pretrained("Intel/dpt-hybrid-midas" )
SCREAMING_SNAKE_CASE_ : Dict = DPTForDepthEstimation.from_pretrained("Intel/dpt-hybrid-midas" ).to(_A )
SCREAMING_SNAKE_CASE_ : Optional[int] = prepare_img()
SCREAMING_SNAKE_CASE_ : Optional[Any] = image_processor(images=_A,return_tensors="pt" ).to(_A )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : List[Any] = model(**_A )
SCREAMING_SNAKE_CASE_ : Tuple = outputs.predicted_depth
# verify the predicted depth
SCREAMING_SNAKE_CASE_ : int = torch.Size((1, 384, 384) )
self.assertEqual(predicted_depth.shape,_A )
SCREAMING_SNAKE_CASE_ : Any = torch.tensor(
[[[5.6437, 5.6146, 5.6511], [5.4371, 5.5649, 5.5958], [5.5215, 5.5184, 5.5293]]] ).to(_A )
self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 100,_A,atol=1E-4 ) )
| 18 | import unittest
import numpy as np
from transformers.testing_utils import require_flax, require_tf, require_torch
from transformers.utils import (
expand_dims,
flatten_dict,
is_flax_available,
is_tf_available,
is_torch_available,
reshape,
squeeze,
transpose,
)
if is_flax_available():
import jax.numpy as jnp
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
class a__ ( unittest.TestCase ):
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = {
"task_specific_params": {
"summarization": {"length_penalty": 1.0, "max_length": 128, "min_length": 12, "num_beams": 4},
"summarization_cnn": {"length_penalty": 2.0, "max_length": 142, "min_length": 56, "num_beams": 4},
"summarization_xsum": {"length_penalty": 1.0, "max_length": 62, "min_length": 11, "num_beams": 6},
}
}
SCREAMING_SNAKE_CASE_ : Any = {
"task_specific_params.summarization.length_penalty": 1.0,
"task_specific_params.summarization.max_length": 128,
"task_specific_params.summarization.min_length": 12,
"task_specific_params.summarization.num_beams": 4,
"task_specific_params.summarization_cnn.length_penalty": 2.0,
"task_specific_params.summarization_cnn.max_length": 142,
"task_specific_params.summarization_cnn.min_length": 56,
"task_specific_params.summarization_cnn.num_beams": 4,
"task_specific_params.summarization_xsum.length_penalty": 1.0,
"task_specific_params.summarization_xsum.max_length": 62,
"task_specific_params.summarization_xsum.min_length": 11,
"task_specific_params.summarization_xsum.num_beams": 6,
}
self.assertEqual(flatten_dict(_A ),_A )
def __UpperCamelCase ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = np.random.randn(3,4 )
self.assertTrue(np.allclose(transpose(_A ),x.transpose() ) )
SCREAMING_SNAKE_CASE_ : Tuple = np.random.randn(3,4,5 )
self.assertTrue(np.allclose(transpose(_A,axes=(1, 2, 0) ),x.transpose((1, 2, 0) ) ) )
@require_torch
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = np.random.randn(3,4 )
SCREAMING_SNAKE_CASE_ : Any = torch.tensor(_A )
self.assertTrue(np.allclose(transpose(_A ),transpose(_A ).numpy() ) )
SCREAMING_SNAKE_CASE_ : Dict = np.random.randn(3,4,5 )
SCREAMING_SNAKE_CASE_ : Tuple = torch.tensor(_A )
self.assertTrue(np.allclose(transpose(_A,axes=(1, 2, 0) ),transpose(_A,axes=(1, 2, 0) ).numpy() ) )
@require_tf
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = np.random.randn(3,4 )
SCREAMING_SNAKE_CASE_ : Optional[Any] = tf.constant(_A )
self.assertTrue(np.allclose(transpose(_A ),transpose(_A ).numpy() ) )
SCREAMING_SNAKE_CASE_ : List[str] = np.random.randn(3,4,5 )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tf.constant(_A )
self.assertTrue(np.allclose(transpose(_A,axes=(1, 2, 0) ),transpose(_A,axes=(1, 2, 0) ).numpy() ) )
@require_flax
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = np.random.randn(3,4 )
SCREAMING_SNAKE_CASE_ : Tuple = jnp.array(_A )
self.assertTrue(np.allclose(transpose(_A ),np.asarray(transpose(_A ) ) ) )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = np.random.randn(3,4,5 )
SCREAMING_SNAKE_CASE_ : List[Any] = jnp.array(_A )
self.assertTrue(np.allclose(transpose(_A,axes=(1, 2, 0) ),np.asarray(transpose(_A,axes=(1, 2, 0) ) ) ) )
def __UpperCamelCase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = np.random.randn(3,4 )
self.assertTrue(np.allclose(reshape(_A,(4, 3) ),np.reshape(_A,(4, 3) ) ) )
SCREAMING_SNAKE_CASE_ : Any = np.random.randn(3,4,5 )
self.assertTrue(np.allclose(reshape(_A,(12, 5) ),np.reshape(_A,(12, 5) ) ) )
@require_torch
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = np.random.randn(3,4 )
SCREAMING_SNAKE_CASE_ : List[str] = torch.tensor(_A )
self.assertTrue(np.allclose(reshape(_A,(4, 3) ),reshape(_A,(4, 3) ).numpy() ) )
SCREAMING_SNAKE_CASE_ : Dict = np.random.randn(3,4,5 )
SCREAMING_SNAKE_CASE_ : int = torch.tensor(_A )
self.assertTrue(np.allclose(reshape(_A,(12, 5) ),reshape(_A,(12, 5) ).numpy() ) )
@require_tf
def __UpperCamelCase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = np.random.randn(3,4 )
SCREAMING_SNAKE_CASE_ : Optional[Any] = tf.constant(_A )
self.assertTrue(np.allclose(reshape(_A,(4, 3) ),reshape(_A,(4, 3) ).numpy() ) )
SCREAMING_SNAKE_CASE_ : int = np.random.randn(3,4,5 )
SCREAMING_SNAKE_CASE_ : Any = tf.constant(_A )
self.assertTrue(np.allclose(reshape(_A,(12, 5) ),reshape(_A,(12, 5) ).numpy() ) )
@require_flax
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = np.random.randn(3,4 )
SCREAMING_SNAKE_CASE_ : int = jnp.array(_A )
self.assertTrue(np.allclose(reshape(_A,(4, 3) ),np.asarray(reshape(_A,(4, 3) ) ) ) )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = np.random.randn(3,4,5 )
SCREAMING_SNAKE_CASE_ : Tuple = jnp.array(_A )
self.assertTrue(np.allclose(reshape(_A,(12, 5) ),np.asarray(reshape(_A,(12, 5) ) ) ) )
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = np.random.randn(1,3,4 )
self.assertTrue(np.allclose(squeeze(_A ),np.squeeze(_A ) ) )
SCREAMING_SNAKE_CASE_ : Any = np.random.randn(1,4,1,5 )
self.assertTrue(np.allclose(squeeze(_A,axis=2 ),np.squeeze(_A,axis=2 ) ) )
@require_torch
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = np.random.randn(1,3,4 )
SCREAMING_SNAKE_CASE_ : Any = torch.tensor(_A )
self.assertTrue(np.allclose(squeeze(_A ),squeeze(_A ).numpy() ) )
SCREAMING_SNAKE_CASE_ : Any = np.random.randn(1,4,1,5 )
SCREAMING_SNAKE_CASE_ : Dict = torch.tensor(_A )
self.assertTrue(np.allclose(squeeze(_A,axis=2 ),squeeze(_A,axis=2 ).numpy() ) )
@require_tf
def __UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = np.random.randn(1,3,4 )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tf.constant(_A )
self.assertTrue(np.allclose(squeeze(_A ),squeeze(_A ).numpy() ) )
SCREAMING_SNAKE_CASE_ : Any = np.random.randn(1,4,1,5 )
SCREAMING_SNAKE_CASE_ : Optional[int] = tf.constant(_A )
self.assertTrue(np.allclose(squeeze(_A,axis=2 ),squeeze(_A,axis=2 ).numpy() ) )
@require_flax
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = np.random.randn(1,3,4 )
SCREAMING_SNAKE_CASE_ : List[str] = jnp.array(_A )
self.assertTrue(np.allclose(squeeze(_A ),np.asarray(squeeze(_A ) ) ) )
SCREAMING_SNAKE_CASE_ : str = np.random.randn(1,4,1,5 )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = jnp.array(_A )
self.assertTrue(np.allclose(squeeze(_A,axis=2 ),np.asarray(squeeze(_A,axis=2 ) ) ) )
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = np.random.randn(3,4 )
self.assertTrue(np.allclose(expand_dims(_A,axis=1 ),np.expand_dims(_A,axis=1 ) ) )
@require_torch
def __UpperCamelCase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = np.random.randn(3,4 )
SCREAMING_SNAKE_CASE_ : List[Any] = torch.tensor(_A )
self.assertTrue(np.allclose(expand_dims(_A,axis=1 ),expand_dims(_A,axis=1 ).numpy() ) )
@require_tf
def __UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = np.random.randn(3,4 )
SCREAMING_SNAKE_CASE_ : Optional[int] = tf.constant(_A )
self.assertTrue(np.allclose(expand_dims(_A,axis=1 ),expand_dims(_A,axis=1 ).numpy() ) )
@require_flax
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = np.random.randn(3,4 )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = jnp.array(_A )
self.assertTrue(np.allclose(expand_dims(_A,axis=1 ),np.asarray(expand_dims(_A,axis=1 ) ) ) )
| 18 | 1 |
from collections import defaultdict
from math import ceil, sqrt
def _snake_case ( lowerCAmelCase : int = 1_0_0_0_0_0_0 , lowerCAmelCase : int = 1_0 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : defaultdict = defaultdict(lowerCAmelCase )
for outer_width in range(3 , (t_limit // 4) + 2 ):
if outer_width * outer_width > t_limit:
SCREAMING_SNAKE_CASE_ : str = max(
ceil(sqrt(outer_width * outer_width - t_limit ) ) , 1 )
else:
SCREAMING_SNAKE_CASE_ : Optional[int] = 1
hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2
for hole_width in range(lowerCAmelCase , outer_width - 1 , 2 ):
count[outer_width * outer_width - hole_width * hole_width] += 1
return sum(1 for n in count.values() if 1 <= n <= 1_0 )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 18 | import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__lowerCamelCase : List[str] = logging.get_logger(__name__)
__lowerCamelCase : Tuple = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt'''}
__lowerCamelCase : List[Any] = {
'''vocab_file''': {
'''allenai/longformer-base-4096''': '''https://huggingface.co/allenai/longformer-base-4096/resolve/main/vocab.json''',
'''allenai/longformer-large-4096''': (
'''https://huggingface.co/allenai/longformer-large-4096/resolve/main/vocab.json'''
),
'''allenai/longformer-large-4096-finetuned-triviaqa''': (
'''https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/vocab.json'''
),
'''allenai/longformer-base-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/vocab.json'''
),
'''allenai/longformer-large-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/vocab.json'''
),
},
'''merges_file''': {
'''allenai/longformer-base-4096''': '''https://huggingface.co/allenai/longformer-base-4096/resolve/main/merges.txt''',
'''allenai/longformer-large-4096''': (
'''https://huggingface.co/allenai/longformer-large-4096/resolve/main/merges.txt'''
),
'''allenai/longformer-large-4096-finetuned-triviaqa''': (
'''https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/merges.txt'''
),
'''allenai/longformer-base-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/merges.txt'''
),
'''allenai/longformer-large-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/merges.txt'''
),
},
}
__lowerCamelCase : int = {
'''allenai/longformer-base-4096''': 40_96,
'''allenai/longformer-large-4096''': 40_96,
'''allenai/longformer-large-4096-finetuned-triviaqa''': 40_96,
'''allenai/longformer-base-4096-extra.pos.embd.only''': 40_96,
'''allenai/longformer-large-4096-extra.pos.embd.only''': 40_96,
}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def _snake_case ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = (
list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) )
)
SCREAMING_SNAKE_CASE_ : str = bs[:]
SCREAMING_SNAKE_CASE_ : Optional[int] = 0
for b in range(2**8 ):
if b not in bs:
bs.append(lowerCAmelCase )
cs.append(2**8 + n )
n += 1
SCREAMING_SNAKE_CASE_ : List[str] = [chr(lowerCAmelCase ) for n in cs]
return dict(zip(lowerCAmelCase , lowerCAmelCase ) )
def _snake_case ( lowerCAmelCase : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = set()
SCREAMING_SNAKE_CASE_ : Tuple = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
SCREAMING_SNAKE_CASE_ : List[str] = char
return pairs
class a__ ( A__ ):
A = VOCAB_FILES_NAMES
A = PRETRAINED_VOCAB_FILES_MAP
A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A = ['input_ids', 'attention_mask']
def __init__( self : Union[str, Any],_A : List[Any],_A : Tuple,_A : str="replace",_A : Optional[int]="<s>",_A : Dict="</s>",_A : Any="</s>",_A : Optional[Any]="<s>",_A : Union[str, Any]="<unk>",_A : int="<pad>",_A : Dict="<mask>",_A : int=False,**_A : Dict,):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = AddedToken(_A,lstrip=_A,rstrip=_A ) if isinstance(_A,_A ) else bos_token
SCREAMING_SNAKE_CASE_ : Optional[int] = AddedToken(_A,lstrip=_A,rstrip=_A ) if isinstance(_A,_A ) else eos_token
SCREAMING_SNAKE_CASE_ : str = AddedToken(_A,lstrip=_A,rstrip=_A ) if isinstance(_A,_A ) else sep_token
SCREAMING_SNAKE_CASE_ : Union[str, Any] = AddedToken(_A,lstrip=_A,rstrip=_A ) if isinstance(_A,_A ) else cls_token
SCREAMING_SNAKE_CASE_ : List[str] = AddedToken(_A,lstrip=_A,rstrip=_A ) if isinstance(_A,_A ) else unk_token
SCREAMING_SNAKE_CASE_ : Optional[Any] = AddedToken(_A,lstrip=_A,rstrip=_A ) if isinstance(_A,_A ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
SCREAMING_SNAKE_CASE_ : Dict = AddedToken(_A,lstrip=_A,rstrip=_A ) if isinstance(_A,_A ) else mask_token
super().__init__(
errors=_A,bos_token=_A,eos_token=_A,unk_token=_A,sep_token=_A,cls_token=_A,pad_token=_A,mask_token=_A,add_prefix_space=_A,**_A,)
with open(_A,encoding="utf-8" ) as vocab_handle:
SCREAMING_SNAKE_CASE_ : Tuple = json.load(_A )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {v: k for k, v in self.encoder.items()}
SCREAMING_SNAKE_CASE_ : Any = errors # how to handle errors in decoding
SCREAMING_SNAKE_CASE_ : Optional[Any] = bytes_to_unicode()
SCREAMING_SNAKE_CASE_ : str = {v: k for k, v in self.byte_encoder.items()}
with open(_A,encoding="utf-8" ) as merges_handle:
SCREAMING_SNAKE_CASE_ : int = merges_handle.read().split("\n" )[1:-1]
SCREAMING_SNAKE_CASE_ : List[str] = [tuple(merge.split() ) for merge in bpe_merges]
SCREAMING_SNAKE_CASE_ : Optional[int] = dict(zip(_A,range(len(_A ) ) ) )
SCREAMING_SNAKE_CASE_ : Any = {}
SCREAMING_SNAKE_CASE_ : List[str] = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
SCREAMING_SNAKE_CASE_ : List[Any] = re.compile(R"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" )
@property
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
return len(self.encoder )
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
return dict(self.encoder,**self.added_tokens_encoder )
def __UpperCamelCase ( self : Any,_A : int ):
"""simple docstring"""
if token in self.cache:
return self.cache[token]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tuple(_A )
SCREAMING_SNAKE_CASE_ : str = get_pairs(_A )
if not pairs:
return token
while True:
SCREAMING_SNAKE_CASE_ : Tuple = min(_A,key=lambda _A : self.bpe_ranks.get(_A,float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple = bigram
SCREAMING_SNAKE_CASE_ : int = []
SCREAMING_SNAKE_CASE_ : Dict = 0
while i < len(_A ):
try:
SCREAMING_SNAKE_CASE_ : Tuple = word.index(_A,_A )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
SCREAMING_SNAKE_CASE_ : str = j
if word[i] == first and i < len(_A ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
SCREAMING_SNAKE_CASE_ : Dict = tuple(_A )
SCREAMING_SNAKE_CASE_ : List[str] = new_word
if len(_A ) == 1:
break
else:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = get_pairs(_A )
SCREAMING_SNAKE_CASE_ : List[str] = " ".join(_A )
SCREAMING_SNAKE_CASE_ : Any = word
return word
def __UpperCamelCase ( self : Dict,_A : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = []
for token in re.findall(self.pat,_A ):
SCREAMING_SNAKE_CASE_ : Any = "".join(
self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(_A ).split(" " ) )
return bpe_tokens
def __UpperCamelCase ( self : Optional[int],_A : str ):
"""simple docstring"""
return self.encoder.get(_A,self.encoder.get(self.unk_token ) )
def __UpperCamelCase ( self : Tuple,_A : str ):
"""simple docstring"""
return self.decoder.get(_A )
def __UpperCamelCase ( self : List[str],_A : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = "".join(_A )
SCREAMING_SNAKE_CASE_ : Tuple = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8",errors=self.errors )
return text
def __UpperCamelCase ( self : List[Any],_A : str,_A : Optional[str] = None ):
"""simple docstring"""
if not os.path.isdir(_A ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
SCREAMING_SNAKE_CASE_ : Tuple = os.path.join(
_A,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
SCREAMING_SNAKE_CASE_ : Any = os.path.join(
_A,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(_A,"w",encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder,indent=2,sort_keys=_A,ensure_ascii=_A ) + "\n" )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 0
with open(_A,"w",encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items(),key=lambda _A : kv[1] ):
if index != token_index:
logger.warning(
F'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
" Please check that the tokenizer is not corrupted!" )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = token_index
writer.write(" ".join(_A ) + "\n" )
index += 1
return vocab_file, merge_file
def __UpperCamelCase ( self : Optional[Any],_A : List[int],_A : Optional[List[int]] = None ):
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
SCREAMING_SNAKE_CASE_ : str = [self.cls_token_id]
SCREAMING_SNAKE_CASE_ : Tuple = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __UpperCamelCase ( self : str,_A : List[int],_A : Optional[List[int]] = None,_A : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_A,token_ids_a=_A,already_has_special_tokens=_A )
if token_ids_a is None:
return [1] + ([0] * len(_A )) + [1]
return [1] + ([0] * len(_A )) + [1, 1] + ([0] * len(_A )) + [1]
def __UpperCamelCase ( self : Any,_A : List[int],_A : Optional[List[int]] = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = [self.sep_token_id]
SCREAMING_SNAKE_CASE_ : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __UpperCamelCase ( self : Any,_A : Union[str, Any],_A : Any=False,**_A : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = kwargs.pop("add_prefix_space",self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(_A ) > 0 and not text[0].isspace()):
SCREAMING_SNAKE_CASE_ : str = " " + text
return (text, kwargs)
| 18 | 1 |
import tempfile
import unittest
import numpy as np
import transformers
from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel
if is_torch_available():
import torch
class a__ :
def __init__( self : int,_A : Tuple,_A : Union[str, Any]=14,_A : int=7,_A : Tuple=True,_A : Tuple=True,_A : int=False,_A : Dict=True,_A : List[Any]=99,_A : List[Any]=32,_A : Union[str, Any]=4,_A : List[str]=4,_A : str=4,_A : List[Any]=37,_A : int="gelu",_A : str=0.1,_A : Tuple=0.1,_A : Tuple=512,_A : int=0.02,):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = parent
SCREAMING_SNAKE_CASE_ : Any = batch_size
SCREAMING_SNAKE_CASE_ : Optional[Any] = seq_length
SCREAMING_SNAKE_CASE_ : Dict = is_training
SCREAMING_SNAKE_CASE_ : str = use_input_mask
SCREAMING_SNAKE_CASE_ : int = use_token_type_ids
SCREAMING_SNAKE_CASE_ : Optional[Any] = use_labels
SCREAMING_SNAKE_CASE_ : Any = vocab_size
SCREAMING_SNAKE_CASE_ : int = hidden_size
SCREAMING_SNAKE_CASE_ : Optional[int] = rotary_dim
SCREAMING_SNAKE_CASE_ : Optional[int] = num_hidden_layers
SCREAMING_SNAKE_CASE_ : Tuple = num_attention_heads
SCREAMING_SNAKE_CASE_ : Any = intermediate_size
SCREAMING_SNAKE_CASE_ : Any = hidden_act
SCREAMING_SNAKE_CASE_ : Optional[int] = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ : List[Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : List[str] = max_position_embeddings
SCREAMING_SNAKE_CASE_ : Optional[int] = initializer_range
SCREAMING_SNAKE_CASE_ : Union[str, Any] = None
SCREAMING_SNAKE_CASE_ : str = vocab_size - 1
SCREAMING_SNAKE_CASE_ : Optional[Any] = vocab_size - 1
SCREAMING_SNAKE_CASE_ : int = vocab_size - 1
def __UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length],self.vocab_size )
SCREAMING_SNAKE_CASE_ : Optional[Any] = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE_ : Dict = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE_ : int = GPTJConfig(
vocab_size=self.vocab_size,n_embd=self.hidden_size,n_layer=self.num_hidden_layers,n_head=self.num_attention_heads,n_positions=self.max_position_embeddings,use_cache=_A,bos_token_id=self.bos_token_id,eos_token_id=self.eos_token_id,pad_token_id=self.pad_token_id,rotary_dim=self.rotary_dim,)
return (config, input_ids, input_mask)
def __UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[Any] = config_and_inputs
SCREAMING_SNAKE_CASE_ : Dict = {"input_ids": input_ids, "attention_mask": attention_mask}
return config, inputs_dict
def __UpperCamelCase ( self : int,_A : Tuple,_A : str,_A : Union[str, Any],_A : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = 20
SCREAMING_SNAKE_CASE_ : Any = model_class_name(_A )
SCREAMING_SNAKE_CASE_ : Dict = model.init_cache(input_ids.shape[0],_A )
SCREAMING_SNAKE_CASE_ : List[str] = jnp.ones((input_ids.shape[0], max_decoder_length),dtype="i4" )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :],(input_ids.shape[0], input_ids.shape[-1] - 1) )
SCREAMING_SNAKE_CASE_ : str = model(
input_ids[:, :-1],attention_mask=_A,past_key_values=_A,position_ids=_A,)
SCREAMING_SNAKE_CASE_ : List[str] = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]],dtype="i4" )
SCREAMING_SNAKE_CASE_ : Any = model(
input_ids[:, -1:],attention_mask=_A,past_key_values=outputs_cache.past_key_values,position_ids=_A,)
SCREAMING_SNAKE_CASE_ : Any = model(_A )
SCREAMING_SNAKE_CASE_ : Optional[int] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3,msg=F'Max diff is {diff}' )
def __UpperCamelCase ( self : Optional[Any],_A : int,_A : Union[str, Any],_A : int,_A : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 20
SCREAMING_SNAKE_CASE_ : List[str] = model_class_name(_A )
SCREAMING_SNAKE_CASE_ : List[Any] = jnp.concatenate(
[attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]) )],axis=-1,)
SCREAMING_SNAKE_CASE_ : Tuple = model.init_cache(input_ids.shape[0],_A )
SCREAMING_SNAKE_CASE_ : Optional[Any] = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :],(input_ids.shape[0], input_ids.shape[-1] - 1) )
SCREAMING_SNAKE_CASE_ : List[str] = model(
input_ids[:, :-1],attention_mask=_A,past_key_values=_A,position_ids=_A,)
SCREAMING_SNAKE_CASE_ : Dict = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]],dtype="i4" )
SCREAMING_SNAKE_CASE_ : int = model(
input_ids[:, -1:],past_key_values=outputs_cache.past_key_values,attention_mask=_A,position_ids=_A,)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = model(_A,attention_mask=_A )
SCREAMING_SNAKE_CASE_ : int = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3,msg=F'Max diff is {diff}' )
@require_flax
class a__ ( A__ , A__ , unittest.TestCase ):
A = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else ()
A = (FlaxGPTJForCausalLM,) if is_flax_available() else ()
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = FlaxGPTJModelTester(self )
def __UpperCamelCase ( self : Dict ):
"""simple docstring"""
for model_class_name in self.all_model_classes:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward(_A,_A,_A,_A )
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
for model_class_name in self.all_model_classes:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward_with_attn_mask(
_A,_A,_A,_A )
@tooslow
def __UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = GPTaTokenizer.from_pretrained("gpt2",pad_token="<|endoftext|>",padding_side="left" )
SCREAMING_SNAKE_CASE_ : List[Any] = tokenizer(["Hello this is a long string", "Hey"],return_tensors="np",padding=_A,truncation=_A )
SCREAMING_SNAKE_CASE_ : List[Any] = FlaxGPTJForCausalLM.from_pretrained("EleutherAI/gpt-j-6B" )
SCREAMING_SNAKE_CASE_ : str = False
SCREAMING_SNAKE_CASE_ : Any = model.config.eos_token_id
SCREAMING_SNAKE_CASE_ : Dict = jax.jit(model.generate )
SCREAMING_SNAKE_CASE_ : str = jit_generate(
inputs["input_ids"],attention_mask=inputs["attention_mask"],pad_token_id=tokenizer.pad_token_id ).sequences
SCREAMING_SNAKE_CASE_ : Optional[int] = tokenizer.batch_decode(_A,skip_special_tokens=_A )
SCREAMING_SNAKE_CASE_ : Optional[int] = [
"Hello this is a long string of text.\n\nI'm trying to get the text of the",
"Hey, I'm a little late to the party. I'm going to",
]
self.assertListEqual(_A,_A )
@is_pt_flax_cross_test
def __UpperCamelCase ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
SCREAMING_SNAKE_CASE_ : Tuple = self._prepare_for_class(_A,_A )
SCREAMING_SNAKE_CASE_ : Tuple = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
SCREAMING_SNAKE_CASE_ : str = model_class.__name__[4:] # Skip the "Flax" at the beginning
SCREAMING_SNAKE_CASE_ : str = getattr(_A,_A )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[Any] = pt_inputs["input_ids"].shape
SCREAMING_SNAKE_CASE_ : Union[str, Any] = np.random.randint(0,seq_length - 1,size=(batch_size,) )
for batch_idx, start_index in enumerate(_A ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 0
SCREAMING_SNAKE_CASE_ : Optional[Any] = 1
SCREAMING_SNAKE_CASE_ : Optional[Any] = 0
SCREAMING_SNAKE_CASE_ : Any = 1
SCREAMING_SNAKE_CASE_ : List[Any] = pt_model_class(_A ).eval()
SCREAMING_SNAKE_CASE_ : Dict = model_class(_A,dtype=jnp.floataa )
SCREAMING_SNAKE_CASE_ : Optional[Any] = convert_pytorch_state_dict_to_flax(pt_model.state_dict(),_A )
SCREAMING_SNAKE_CASE_ : Any = fx_state
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : Optional[Any] = pt_model(**_A ).to_tuple()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = fx_model(**_A ).to_tuple()
self.assertEqual(len(_A ),len(_A ),"Output lengths differ between Flax and PyTorch" )
for fx_output, pt_output in zip(_A,_A ):
self.assert_almost_equals(fx_output[:, -1],pt_output[:, -1].numpy(),4E-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(_A )
SCREAMING_SNAKE_CASE_ : int = model_class.from_pretrained(_A,from_pt=_A )
SCREAMING_SNAKE_CASE_ : str = fx_model_loaded(**_A ).to_tuple()
self.assertEqual(
len(_A ),len(_A ),"Output lengths differ between Flax and PyTorch" )
for fx_output_loaded, pt_output in zip(_A,_A ):
self.assert_almost_equals(fx_output_loaded[:, -1],pt_output[:, -1].numpy(),4E-2 )
@is_pt_flax_cross_test
def __UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
SCREAMING_SNAKE_CASE_ : str = self._prepare_for_class(_A,_A )
SCREAMING_SNAKE_CASE_ : Any = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
SCREAMING_SNAKE_CASE_ : Tuple = model_class.__name__[4:] # Skip the "Flax" at the beginning
SCREAMING_SNAKE_CASE_ : Optional[int] = getattr(_A,_A )
SCREAMING_SNAKE_CASE_ : List[str] = pt_model_class(_A ).eval()
SCREAMING_SNAKE_CASE_ : Any = model_class(_A,dtype=jnp.floataa )
SCREAMING_SNAKE_CASE_ : Tuple = load_flax_weights_in_pytorch_model(_A,fx_model.params )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[Any] = pt_inputs["input_ids"].shape
SCREAMING_SNAKE_CASE_ : Optional[int] = np.random.randint(0,seq_length - 1,size=(batch_size,) )
for batch_idx, start_index in enumerate(_A ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 0
SCREAMING_SNAKE_CASE_ : Tuple = 1
SCREAMING_SNAKE_CASE_ : int = 0
SCREAMING_SNAKE_CASE_ : int = 1
# make sure weights are tied in PyTorch
pt_model.tie_weights()
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : Any = pt_model(**_A ).to_tuple()
SCREAMING_SNAKE_CASE_ : List[str] = fx_model(**_A ).to_tuple()
self.assertEqual(len(_A ),len(_A ),"Output lengths differ between Flax and PyTorch" )
for fx_output, pt_output in zip(_A,_A ):
self.assert_almost_equals(fx_output[:, -1],pt_output[:, -1].numpy(),4E-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(_A )
SCREAMING_SNAKE_CASE_ : Tuple = pt_model_class.from_pretrained(_A,from_flax=_A )
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : Optional[Any] = pt_model_loaded(**_A ).to_tuple()
self.assertEqual(
len(_A ),len(_A ),"Output lengths differ between Flax and PyTorch" )
for fx_output, pt_output in zip(_A,_A ):
self.assert_almost_equals(fx_output[:, -1],pt_output[:, -1].numpy(),4E-2 )
@tooslow
def __UpperCamelCase ( self : str ):
"""simple docstring"""
for model_class_name in self.all_model_classes:
SCREAMING_SNAKE_CASE_ : str = model_class_name.from_pretrained("EleutherAI/gpt-j-6B" )
SCREAMING_SNAKE_CASE_ : Optional[int] = model(np.ones((1, 1) ) )
self.assertIsNotNone(_A )
| 18 | from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class a__ :
def __init__( self : Optional[int],_A : Dict,_A : List[str]=13,_A : List[str]=7,_A : int=True,_A : str=True,_A : Union[str, Any]=True,_A : Tuple=True,_A : Dict=99,_A : Tuple=32,_A : Tuple=2,_A : Tuple=4,_A : Optional[Any]=37,_A : str="gelu",_A : Dict=0.1,_A : List[Any]=0.1,_A : List[str]=512,_A : str=16,_A : int=2,_A : Dict=0.02,_A : List[Any]=3,_A : Optional[Any]=4,_A : Optional[int]=None,):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = parent
SCREAMING_SNAKE_CASE_ : Any = 13
SCREAMING_SNAKE_CASE_ : List[str] = 7
SCREAMING_SNAKE_CASE_ : Dict = True
SCREAMING_SNAKE_CASE_ : Optional[Any] = True
SCREAMING_SNAKE_CASE_ : Tuple = True
SCREAMING_SNAKE_CASE_ : List[str] = True
SCREAMING_SNAKE_CASE_ : List[str] = 99
SCREAMING_SNAKE_CASE_ : Tuple = 384
SCREAMING_SNAKE_CASE_ : Optional[Any] = 2
SCREAMING_SNAKE_CASE_ : Any = 4
SCREAMING_SNAKE_CASE_ : str = 37
SCREAMING_SNAKE_CASE_ : Optional[Any] = "gelu"
SCREAMING_SNAKE_CASE_ : List[Any] = 0.1
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 0.1
SCREAMING_SNAKE_CASE_ : Dict = 512
SCREAMING_SNAKE_CASE_ : int = 16
SCREAMING_SNAKE_CASE_ : Optional[int] = 2
SCREAMING_SNAKE_CASE_ : Any = 0.02
SCREAMING_SNAKE_CASE_ : str = 3
SCREAMING_SNAKE_CASE_ : int = 4
SCREAMING_SNAKE_CASE_ : Dict = 128
SCREAMING_SNAKE_CASE_ : Any = 2
SCREAMING_SNAKE_CASE_ : Tuple = 9
SCREAMING_SNAKE_CASE_ : List[Any] = 1
SCREAMING_SNAKE_CASE_ : Any = None
def __UpperCamelCase ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = ids_tensor([self.batch_size, self.seq_length],self.vocab_size )
SCREAMING_SNAKE_CASE_ : Any = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE_ : List[str] = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE_ : List[Any] = ids_tensor([self.batch_size, self.seq_length],self.type_vocab_size )
SCREAMING_SNAKE_CASE_ : Dict = None
SCREAMING_SNAKE_CASE_ : Dict = None
SCREAMING_SNAKE_CASE_ : str = None
if self.use_labels:
SCREAMING_SNAKE_CASE_ : List[str] = ids_tensor([self.batch_size],self.type_sequence_label_size )
SCREAMING_SNAKE_CASE_ : Dict = ids_tensor([self.batch_size, self.seq_length],self.num_labels )
SCREAMING_SNAKE_CASE_ : List[str] = ids_tensor([self.batch_size],self.num_choices )
SCREAMING_SNAKE_CASE_ : Any = ConvBertConfig(
vocab_size=self.vocab_size,hidden_size=self.hidden_size,num_hidden_layers=self.num_hidden_layers,num_attention_heads=self.num_attention_heads,intermediate_size=self.intermediate_size,hidden_act=self.hidden_act,hidden_dropout_prob=self.hidden_dropout_prob,attention_probs_dropout_prob=self.attention_probs_dropout_prob,max_position_embeddings=self.max_position_embeddings,type_vocab_size=self.type_vocab_size,initializer_range=self.initializer_range,return_dict=_A,)
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __UpperCamelCase ( self : Optional[int],_A : List[Any],_A : int,_A : Tuple,_A : Optional[int],_A : Union[str, Any],_A : Union[str, Any],_A : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = TFConvBertModel(config=_A )
SCREAMING_SNAKE_CASE_ : Optional[int] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
SCREAMING_SNAKE_CASE_ : str = [input_ids, input_mask]
SCREAMING_SNAKE_CASE_ : List[str] = model(_A )
SCREAMING_SNAKE_CASE_ : Dict = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape,(self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCamelCase ( self : Dict,_A : Dict,_A : int,_A : Union[str, Any],_A : List[Any],_A : int,_A : str,_A : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = TFConvBertForMaskedLM(config=_A )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
SCREAMING_SNAKE_CASE_ : List[Any] = model(_A )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCamelCase ( self : Any,_A : Optional[int],_A : List[Any],_A : Union[str, Any],_A : List[Any],_A : Union[str, Any],_A : Optional[int],_A : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = self.num_labels
SCREAMING_SNAKE_CASE_ : Any = TFConvBertForSequenceClassification(config=_A )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
SCREAMING_SNAKE_CASE_ : Optional[Any] = model(_A )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.num_labels) )
def __UpperCamelCase ( self : int,_A : int,_A : Dict,_A : List[str],_A : Tuple,_A : Dict,_A : Optional[int],_A : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.num_choices
SCREAMING_SNAKE_CASE_ : Optional[int] = TFConvBertForMultipleChoice(config=_A )
SCREAMING_SNAKE_CASE_ : Any = tf.tile(tf.expand_dims(_A,1 ),(1, self.num_choices, 1) )
SCREAMING_SNAKE_CASE_ : Any = tf.tile(tf.expand_dims(_A,1 ),(1, self.num_choices, 1) )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tf.tile(tf.expand_dims(_A,1 ),(1, self.num_choices, 1) )
SCREAMING_SNAKE_CASE_ : int = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
SCREAMING_SNAKE_CASE_ : int = model(_A )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.num_choices) )
def __UpperCamelCase ( self : List[Any],_A : Union[str, Any],_A : int,_A : Optional[int],_A : str,_A : str,_A : Tuple,_A : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = self.num_labels
SCREAMING_SNAKE_CASE_ : Union[str, Any] = TFConvBertForTokenClassification(config=_A )
SCREAMING_SNAKE_CASE_ : Tuple = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
SCREAMING_SNAKE_CASE_ : str = model(_A )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.seq_length, self.num_labels) )
def __UpperCamelCase ( self : List[Any],_A : int,_A : List[str],_A : List[Any],_A : Any,_A : Optional[int],_A : List[str],_A : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = TFConvBertForQuestionAnswering(config=_A )
SCREAMING_SNAKE_CASE_ : Dict = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
SCREAMING_SNAKE_CASE_ : Any = model(_A )
self.parent.assertEqual(result.start_logits.shape,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape,(self.batch_size, self.seq_length) )
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) ,
) : List[Any] = config_and_inputs
SCREAMING_SNAKE_CASE_ : Any = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class a__ ( A__ , A__ , unittest.TestCase ):
A = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
A = (
{
'feature-extraction': TFConvBertModel,
'fill-mask': TFConvBertForMaskedLM,
'question-answering': TFConvBertForQuestionAnswering,
'text-classification': TFConvBertForSequenceClassification,
'token-classification': TFConvBertForTokenClassification,
'zero-shot': TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
A = False
A = False
A = False
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = TFConvBertModelTester(self )
SCREAMING_SNAKE_CASE_ : Tuple = ConfigTester(self,config_class=_A,hidden_size=37 )
def __UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def __UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_A )
def __UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_A )
def __UpperCamelCase ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_A )
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_A )
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_A )
@slow
def __UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ : List[str] = True
SCREAMING_SNAKE_CASE_ : Any = True
if hasattr(_A,"use_cache" ):
SCREAMING_SNAKE_CASE_ : List[Any] = True
SCREAMING_SNAKE_CASE_ : int = getattr(self.model_tester,"encoder_seq_length",self.model_tester.seq_length )
SCREAMING_SNAKE_CASE_ : Optional[Any] = getattr(self.model_tester,"key_length",_A )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ : List[str] = self._prepare_for_class(_A,_A )
SCREAMING_SNAKE_CASE_ : List[Any] = model_class(_A )
SCREAMING_SNAKE_CASE_ : Optional[int] = len(model(_A ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_A,saved_model=_A )
SCREAMING_SNAKE_CASE_ : Optional[Any] = os.path.join(_A,"saved_model","1" )
SCREAMING_SNAKE_CASE_ : Tuple = tf.keras.models.load_model(_A )
SCREAMING_SNAKE_CASE_ : str = model(_A )
if self.is_encoder_decoder:
SCREAMING_SNAKE_CASE_ : Optional[Any] = outputs["encoder_hidden_states"]
SCREAMING_SNAKE_CASE_ : str = outputs["encoder_attentions"]
else:
SCREAMING_SNAKE_CASE_ : Any = outputs["hidden_states"]
SCREAMING_SNAKE_CASE_ : List[str] = outputs["attentions"]
self.assertEqual(len(_A ),_A )
SCREAMING_SNAKE_CASE_ : Any = getattr(
self.model_tester,"expected_num_hidden_layers",self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(_A ),_A )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ),[self.model_tester.seq_length, self.model_tester.hidden_size],)
self.assertEqual(len(_A ),self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ),[self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length],)
@slow
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" )
self.assertIsNotNone(_A )
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ : List[str] = True
SCREAMING_SNAKE_CASE_ : List[str] = getattr(self.model_tester,"decoder_seq_length",self.model_tester.seq_length )
SCREAMING_SNAKE_CASE_ : Any = getattr(self.model_tester,"encoder_seq_length",self.model_tester.seq_length )
SCREAMING_SNAKE_CASE_ : Optional[int] = getattr(self.model_tester,"key_length",_A )
SCREAMING_SNAKE_CASE_ : int = getattr(self.model_tester,"key_length",_A )
def check_decoder_attentions_output(_A : Dict ):
SCREAMING_SNAKE_CASE_ : int = len(_A )
self.assertEqual(out_len % 2,0 )
SCREAMING_SNAKE_CASE_ : Tuple = outputs.decoder_attentions
self.assertEqual(len(_A ),self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ),[self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length],)
def check_encoder_attentions_output(_A : Tuple ):
SCREAMING_SNAKE_CASE_ : int = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(_A ),self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ),[self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length],)
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ : Optional[Any] = True
SCREAMING_SNAKE_CASE_ : Optional[Any] = False
SCREAMING_SNAKE_CASE_ : Tuple = model_class(_A )
SCREAMING_SNAKE_CASE_ : Any = model(self._prepare_for_class(_A,_A ) )
SCREAMING_SNAKE_CASE_ : Tuple = len(_A )
self.assertEqual(config.output_hidden_states,_A )
check_encoder_attentions_output(_A )
if self.is_encoder_decoder:
SCREAMING_SNAKE_CASE_ : Optional[Any] = model_class(_A )
SCREAMING_SNAKE_CASE_ : int = model(self._prepare_for_class(_A,_A ) )
self.assertEqual(config.output_hidden_states,_A )
check_decoder_attentions_output(_A )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
SCREAMING_SNAKE_CASE_ : str = True
SCREAMING_SNAKE_CASE_ : int = model_class(_A )
SCREAMING_SNAKE_CASE_ : List[str] = model(self._prepare_for_class(_A,_A ) )
self.assertEqual(config.output_hidden_states,_A )
check_encoder_attentions_output(_A )
# Check attention is always last and order is fine
SCREAMING_SNAKE_CASE_ : str = True
SCREAMING_SNAKE_CASE_ : int = True
SCREAMING_SNAKE_CASE_ : Dict = model_class(_A )
SCREAMING_SNAKE_CASE_ : str = model(self._prepare_for_class(_A,_A ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1),len(_A ) )
self.assertEqual(model.config.output_hidden_states,_A )
check_encoder_attentions_output(_A )
@require_tf
class a__ ( unittest.TestCase ):
@slow
def __UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" )
SCREAMING_SNAKE_CASE_ : int = tf.constant([[0, 1, 2, 3, 4, 5]] )
SCREAMING_SNAKE_CASE_ : Tuple = model(_A )[0]
SCREAMING_SNAKE_CASE_ : List[Any] = [1, 6, 768]
self.assertEqual(output.shape,_A )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tf.constant(
[
[
[-0.03475493, -0.4686034, -0.30638832],
[0.22637248, -0.26988646, -0.7423424],
[0.10324868, -0.45013508, -0.58280784],
]
] )
tf.debugging.assert_near(output[:, :3, :3],_A,atol=1E-4 )
| 18 | 1 |
import sys
__lowerCamelCase : List[Any] = (
'''73167176531330624919225119674426574742355349194934'''
'''96983520312774506326239578318016984801869478851843'''
'''85861560789112949495459501737958331952853208805511'''
'''12540698747158523863050715693290963295227443043557'''
'''66896648950445244523161731856403098711121722383113'''
'''62229893423380308135336276614282806444486645238749'''
'''30358907296290491560440772390713810515859307960866'''
'''70172427121883998797908792274921901699720888093776'''
'''65727333001053367881220235421809751254540594752243'''
'''52584907711670556013604839586446706324415722155397'''
'''53697817977846174064955149290862569321978468622482'''
'''83972241375657056057490261407972968652414535100474'''
'''82166370484403199890008895243450658541227588666881'''
'''16427171479924442928230863465674813919123162824586'''
'''17866458359124566529476545682848912883142607690042'''
'''24219022671055626321111109370544217506941658960408'''
'''07198403850962455444362981230987879927244284909188'''
'''84580156166097919133875499200524063689912560717606'''
'''05886116467109405077541002256983155200055935729725'''
'''71636269561882670428252483600823257530420752963450'''
)
def _snake_case ( lowerCAmelCase : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = 1
for digit in s:
product *= int(lowerCAmelCase )
return product
def _snake_case ( lowerCAmelCase : str = N ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = -sys.maxsize - 1
SCREAMING_SNAKE_CASE_ : Optional[Any] = n[:1_3]
SCREAMING_SNAKE_CASE_ : Dict = 1_3
while cur_index < len(lowerCAmelCase ) - 1_3:
if int(n[cur_index] ) >= int(substr[0] ):
SCREAMING_SNAKE_CASE_ : str = substr[1:] + n[cur_index]
cur_index += 1
else:
SCREAMING_SNAKE_CASE_ : Dict = max(lowerCAmelCase , str_eval(lowerCAmelCase ) )
SCREAMING_SNAKE_CASE_ : List[str] = n[cur_index : cur_index + 1_3]
cur_index += 1_3
return largest_product
if __name__ == "__main__":
print(f'''{solution() = }''')
| 18 | def _snake_case ( lowerCAmelCase : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = int(lowerCAmelCase )
if decimal in (0, 1): # Exit cases for the recursion
return str(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = divmod(lowerCAmelCase , 2 )
return binary_recursive(lowerCAmelCase ) + str(lowerCAmelCase )
def _snake_case ( lowerCAmelCase : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = str(lowerCAmelCase ).strip()
if not number:
raise ValueError("No input value was provided" )
SCREAMING_SNAKE_CASE_ : List[str] = "-" if number.startswith("-" ) else ""
SCREAMING_SNAKE_CASE_ : Optional[Any] = number.lstrip("-" )
if not number.isnumeric():
raise ValueError("Input value is not an integer" )
return f'{negative}0b{binary_recursive(int(lowerCAmelCase ) )}'
if __name__ == "__main__":
from doctest import testmod
testmod()
| 18 | 1 |
import csv
import tweepy
# Twitter API credentials
__lowerCamelCase : Dict = ''''''
__lowerCamelCase : Union[str, Any] = ''''''
__lowerCamelCase : Dict = ''''''
__lowerCamelCase : List[Any] = ''''''
def _snake_case ( lowerCAmelCase : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = tweepy.OAuthHandler(lowerCAmelCase , lowerCAmelCase )
auth.set_access_token(lowerCAmelCase , lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tweepy.API(lowerCAmelCase )
# initialize a list to hold all the tweepy Tweets
SCREAMING_SNAKE_CASE_ : int = []
# make initial request for most recent tweets (200 is the maximum allowed count)
SCREAMING_SNAKE_CASE_ : List[Any] = api.user_timeline(screen_name=lowerCAmelCase , count=2_0_0 )
# save most recent tweets
alltweets.extend(lowerCAmelCase )
# save the id of the oldest tweet less one
SCREAMING_SNAKE_CASE_ : List[Any] = alltweets[-1].id - 1
# keep grabbing tweets until there are no tweets left to grab
while len(lowerCAmelCase ) > 0:
print(f'getting tweets before {oldest}' )
# all subsequent requests use the max_id param to prevent duplicates
SCREAMING_SNAKE_CASE_ : int = api.user_timeline(
screen_name=lowerCAmelCase , count=2_0_0 , max_id=lowerCAmelCase )
# save most recent tweets
alltweets.extend(lowerCAmelCase )
# update the id of the oldest tweet less one
SCREAMING_SNAKE_CASE_ : str = alltweets[-1].id - 1
print(f'...{len(lowerCAmelCase )} tweets downloaded so far' )
# transform the tweepy tweets into a 2D array that will populate the csv
SCREAMING_SNAKE_CASE_ : Dict = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets]
# write the csv
with open(f'new_{screen_name}_tweets.csv' , "w" ) as f:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = csv.writer(lowerCAmelCase )
writer.writerow(["id", "created_at", "text"] )
writer.writerows(lowerCAmelCase )
if __name__ == "__main__":
# pass in the username of the account you want to download
get_all_tweets('''FirePing32''')
| 18 | from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__lowerCamelCase : Union[str, Any] = {
'''configuration_chinese_clip''': [
'''CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''ChineseCLIPConfig''',
'''ChineseCLIPOnnxConfig''',
'''ChineseCLIPTextConfig''',
'''ChineseCLIPVisionConfig''',
],
'''processing_chinese_clip''': ['''ChineseCLIPProcessor'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Any = ['''ChineseCLIPFeatureExtractor''']
__lowerCamelCase : Optional[int] = ['''ChineseCLIPImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : int = [
'''CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ChineseCLIPModel''',
'''ChineseCLIPPreTrainedModel''',
'''ChineseCLIPTextModel''',
'''ChineseCLIPVisionModel''',
]
if TYPE_CHECKING:
from .configuration_chinese_clip import (
CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
ChineseCLIPConfig,
ChineseCLIPOnnxConfig,
ChineseCLIPTextConfig,
ChineseCLIPVisionConfig,
)
from .processing_chinese_clip import ChineseCLIPProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_chinese_clip import ChineseCLIPFeatureExtractor, ChineseCLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_chinese_clip import (
CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
ChineseCLIPModel,
ChineseCLIPPreTrainedModel,
ChineseCLIPTextModel,
ChineseCLIPVisionModel,
)
else:
import sys
__lowerCamelCase : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 18 | 1 |
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class a__ ( A__ ):
A = 'ClapFeatureExtractor'
A = ('RobertaTokenizer', 'RobertaTokenizerFast')
def __init__( self : Union[str, Any],_A : Optional[int],_A : int ):
"""simple docstring"""
super().__init__(_A,_A )
def __call__( self : int,_A : int=None,_A : List[Any]=None,_A : str=None,**_A : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = kwargs.pop("sampling_rate",_A )
if text is None and audios is None:
raise ValueError("You have to specify either text or audios. Both cannot be none." )
if text is not None:
SCREAMING_SNAKE_CASE_ : Dict = self.tokenizer(_A,return_tensors=_A,**_A )
if audios is not None:
SCREAMING_SNAKE_CASE_ : List[Any] = self.feature_extractor(
_A,sampling_rate=_A,return_tensors=_A,**_A )
if text is not None and audios is not None:
SCREAMING_SNAKE_CASE_ : str = audio_features.input_features
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**_A ),tensor_type=_A )
def __UpperCamelCase ( self : Optional[Any],*_A : str,**_A : str ):
"""simple docstring"""
return self.tokenizer.batch_decode(*_A,**_A )
def __UpperCamelCase ( self : str,*_A : Union[str, Any],**_A : List[str] ):
"""simple docstring"""
return self.tokenizer.decode(*_A,**_A )
@property
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = self.tokenizer.model_input_names
SCREAMING_SNAKE_CASE_ : str = self.feature_extractor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names ) )
| 18 | import argparse
import re
from flax.traverse_util import flatten_dict, unflatten_dict
from tax import checkpoints
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
from transformers.utils import logging
logging.set_verbosity_info()
# should not include what is already done by the `from_pt` argument
__lowerCamelCase : Any = {
'''/attention/''': '''/0/SelfAttention/''',
'''/self_attention/''': '''/0/SelfAttention/''',
'''/encoder_decoder_attention/''': '''/1/EncDecAttention/''',
'''value''': '''v''',
'''query''': '''q''',
'''key''': '''k''',
'''out''': '''o''',
'''pre_self_attention_layer_norm''': '''0/layer_norm''',
'''pre_cross_attention_layer_norm''': '''1/layer_norm''',
'''pre_attention_layer_norm''': '''0/layer_norm''', # previously 1, but seems wrong
'''token_embedder''': '''shared''',
'''encoder_norm''': '''final_layer_norm''',
'''decoder_norm''': '''final_layer_norm''',
'''relpos_bias/rel_embedding''': '''block/0/layer/0/SelfAttention/relative_attention_bias/weight''',
'''router/router_weights/w/''': '''router/classifier/''',
'''roer/roer_weights/w/''': '''router/classifier/''',
'''logits_dense''': '''lm_head''',
}
def _snake_case ( lowerCAmelCase : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = list(s_dict.keys() )
for key in keys:
SCREAMING_SNAKE_CASE_ : int = R".*/layers_(\d+)"
SCREAMING_SNAKE_CASE_ : List[Any] = key
if re.match(lowerCAmelCase , lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : Any = re.sub(R"layers_(\d+)" , R"block/\1/layer" , lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : List[Any] = R"(encoder|decoder)\/"
if re.match(lowerCAmelCase , lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : str = re.match(lowerCAmelCase , lowerCAmelCase ).groups()
if groups[0] == "encoder":
SCREAMING_SNAKE_CASE_ : Any = re.sub(R"/mlp/" , R"/1/mlp/" , lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : List[Any] = re.sub(R"/pre_mlp_layer_norm/" , R"/1/layer_norm/" , lowerCAmelCase )
elif groups[0] == "decoder":
SCREAMING_SNAKE_CASE_ : List[str] = re.sub(R"/mlp/" , R"/2/mlp/" , lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Dict = re.sub(R"/pre_mlp_layer_norm/" , R"/2/layer_norm/" , lowerCAmelCase )
# 2. Convert other classic mappings
for old_key, temp_key in MOE_LAYER_NAME_MAPPING.items():
if old_key in new_key:
SCREAMING_SNAKE_CASE_ : List[Any] = new_key.replace(lowerCAmelCase , lowerCAmelCase )
print(f'{key} -> {new_key}' )
SCREAMING_SNAKE_CASE_ : List[Any] = s_dict.pop(lowerCAmelCase )
if "encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
SCREAMING_SNAKE_CASE_ : str = s_dict[
"encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight"
].T
if "decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
SCREAMING_SNAKE_CASE_ : Optional[int] = s_dict[
"decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight"
].T
# 3. Take extra care of the EXPERTS layer
for key in list(s_dict.keys() ):
if "expert" in key:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = s_dict[key].shape[0]
SCREAMING_SNAKE_CASE_ : List[Any] = s_dict[key]
for idx in range(lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : Tuple = expert_weihts[idx]
print(f'{key} -> {key.replace("expert/" , "nested fstring" )}' )
s_dict.pop(lowerCAmelCase )
return s_dict
__lowerCamelCase : List[Any] = {
'''NUM_ENCODER_LAYERS''': '''num_layers''',
'''NUM_DECODER_LAYERS''': '''num_decoder_layers''',
'''NUM_HEADS''': '''num_heads''',
'''HEAD_DIM''': '''d_kv''',
'''EMBED_DIM''': '''d_model''',
'''MLP_DIM''': '''d_ff''',
'''NUM_SELECTED_EXPERTS''': '''num_selected_experts''',
'''NUM_ENCODER_SPARSE_LAYERS''': '''num_sparse_encoder_layers''',
'''NUM_DECODER_SPARSE_LAYERS''': '''num_sparse_decoder_layers''',
'''dense.MlpBlock.activations''': '''feed_forward_proj''',
}
def _snake_case ( lowerCAmelCase : Dict , lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
import regex as re
with open(lowerCAmelCase , "r" ) as f:
SCREAMING_SNAKE_CASE_ : Optional[Any] = f.read()
SCREAMING_SNAKE_CASE_ : List[str] = re.findall(R"(.*) = ([0-9.]*)" , lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Any = {}
for param, value in regex_match:
if param in GIN_TO_CONFIG_MAPPING and value != "":
SCREAMING_SNAKE_CASE_ : int = float(lowerCAmelCase ) if "." in value else int(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = re.findall(R"(.*activations) = \(\'(.*)\',\)" , lowerCAmelCase )[0]
SCREAMING_SNAKE_CASE_ : List[str] = str(activation[1] )
SCREAMING_SNAKE_CASE_ : str = num_experts
SCREAMING_SNAKE_CASE_ : Tuple = SwitchTransformersConfig(**lowerCAmelCase )
return config
def _snake_case ( lowerCAmelCase : str , lowerCAmelCase : str , lowerCAmelCase : str=None , lowerCAmelCase : Optional[Any]="./" , lowerCAmelCase : Dict=8 ):
"""simple docstring"""
print(f'Loading flax weights from : {flax_checkpoint_path}' )
SCREAMING_SNAKE_CASE_ : int = checkpoints.load_tax_checkpoint(lowerCAmelCase )
if gin_file is not None:
SCREAMING_SNAKE_CASE_ : int = convert_gin_to_config(lowerCAmelCase , lowerCAmelCase )
else:
SCREAMING_SNAKE_CASE_ : Dict = SwitchTransformersConfig.from_pretrained(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : str = SwitchTransformersForConditionalGeneration(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[int] = flax_params["target"]
SCREAMING_SNAKE_CASE_ : List[str] = flatten_dict(lowerCAmelCase , sep="/" )
SCREAMING_SNAKE_CASE_ : List[str] = rename_keys(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[int] = unflatten_dict(lowerCAmelCase , sep="/" )
# Load the flax params in the PT model
load_flax_weights_in_pytorch_model(lowerCAmelCase , lowerCAmelCase )
print(f'Save PyTorch model to {pytorch_dump_path}' )
pt_model.save_pretrained(lowerCAmelCase )
if __name__ == "__main__":
__lowerCamelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--switch_t5x_checkpoint_path''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained SwitchTransformers model. \nThis specifies the'''
''' model architecture. If not provided, a `gin_file` has to be provided.'''
),
)
parser.add_argument(
'''--gin_file''',
default=None,
type=str,
required=False,
help='''Path to the gin config file. If not provided, a `config_file` has to be passed ''',
)
parser.add_argument(
'''--config_name''', default=None, type=str, required=False, help='''Config name of SwitchTransformers model.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output pytorch model.'''
)
parser.add_argument('''--num_experts''', default=8, type=int, required=False, help='''Number of experts''')
__lowerCamelCase : Any = parser.parse_args()
convert_flax_checkpoint_to_pytorch(
args.switch_tax_checkpoint_path,
args.config_name,
args.gin_file,
args.pytorch_dump_folder_path,
args.num_experts,
)
| 18 | 1 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase : Tuple = logging.get_logger(__name__)
__lowerCamelCase : str = {
'''microsoft/git-base''': '''https://huggingface.co/microsoft/git-base/resolve/main/config.json''',
}
class a__ ( A__ ):
A = 'git_vision_model'
def __init__( self : Any,_A : str=768,_A : Union[str, Any]=3072,_A : List[Any]=12,_A : Dict=12,_A : List[Any]=3,_A : Union[str, Any]=224,_A : Any=16,_A : Union[str, Any]="quick_gelu",_A : List[str]=1E-5,_A : str=0.0,_A : Tuple=0.02,**_A : Tuple,):
"""simple docstring"""
super().__init__(**_A )
SCREAMING_SNAKE_CASE_ : List[Any] = hidden_size
SCREAMING_SNAKE_CASE_ : Optional[Any] = intermediate_size
SCREAMING_SNAKE_CASE_ : Tuple = num_hidden_layers
SCREAMING_SNAKE_CASE_ : List[Any] = num_attention_heads
SCREAMING_SNAKE_CASE_ : Optional[int] = num_channels
SCREAMING_SNAKE_CASE_ : List[str] = patch_size
SCREAMING_SNAKE_CASE_ : Tuple = image_size
SCREAMING_SNAKE_CASE_ : Dict = initializer_range
SCREAMING_SNAKE_CASE_ : Optional[Any] = attention_dropout
SCREAMING_SNAKE_CASE_ : Dict = layer_norm_eps
SCREAMING_SNAKE_CASE_ : str = hidden_act
@classmethod
def __UpperCamelCase ( cls : int,_A : Union[str, os.PathLike],**_A : str ):
"""simple docstring"""
cls._set_token_in_kwargs(_A )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple = cls.get_config_dict(_A,**_A )
# get the vision config dict if we are loading from GITConfig
if config_dict.get("model_type" ) == "git":
SCREAMING_SNAKE_CASE_ : Optional[int] = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls,"model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(_A,**_A )
class a__ ( A__ ):
A = 'git'
def __init__( self : Dict,_A : Optional[Any]=None,_A : int=3_0522,_A : Dict=768,_A : str=6,_A : str=12,_A : Optional[int]=3072,_A : List[str]="gelu",_A : Tuple=0.1,_A : Optional[int]=0.1,_A : Any=1024,_A : Optional[Any]=0.02,_A : Optional[int]=1E-12,_A : Any=0,_A : List[str]="absolute",_A : List[str]=True,_A : List[str]=False,_A : Optional[int]=101,_A : Optional[int]=102,_A : List[str]=None,**_A : str,):
"""simple docstring"""
super().__init__(bos_token_id=_A,eos_token_id=_A,pad_token_id=_A,**_A )
if vision_config is None:
SCREAMING_SNAKE_CASE_ : Optional[int] = {}
logger.info("vision_config is None. initializing the GitVisionConfig with default values." )
SCREAMING_SNAKE_CASE_ : List[str] = GitVisionConfig(**_A )
SCREAMING_SNAKE_CASE_ : List[str] = vocab_size
SCREAMING_SNAKE_CASE_ : Optional[Any] = hidden_size
SCREAMING_SNAKE_CASE_ : List[str] = num_hidden_layers
SCREAMING_SNAKE_CASE_ : str = num_attention_heads
SCREAMING_SNAKE_CASE_ : List[Any] = hidden_act
SCREAMING_SNAKE_CASE_ : Any = intermediate_size
SCREAMING_SNAKE_CASE_ : int = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ : Dict = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : Dict = max_position_embeddings
SCREAMING_SNAKE_CASE_ : List[Any] = initializer_range
SCREAMING_SNAKE_CASE_ : str = layer_norm_eps
SCREAMING_SNAKE_CASE_ : Tuple = position_embedding_type
SCREAMING_SNAKE_CASE_ : Tuple = use_cache
SCREAMING_SNAKE_CASE_ : List[Any] = tie_word_embeddings
SCREAMING_SNAKE_CASE_ : Any = num_image_with_embedding
SCREAMING_SNAKE_CASE_ : List[str] = bos_token_id
SCREAMING_SNAKE_CASE_ : Dict = eos_token_id
def __UpperCamelCase ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = copy.deepcopy(self.__dict__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.vision_config.to_dict()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.__class__.model_type
return output
| 18 | from math import factorial, radians
def _snake_case ( lowerCAmelCase : float , lowerCAmelCase : int = 1_8 , lowerCAmelCase : int = 1_0 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = angle_in_degrees - ((angle_in_degrees // 360.0) * 360.0)
# Converting from degrees to radians
SCREAMING_SNAKE_CASE_ : Tuple = radians(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[Any] = angle_in_radians
SCREAMING_SNAKE_CASE_ : List[str] = 3
SCREAMING_SNAKE_CASE_ : str = -1
for _ in range(lowerCAmelCase ):
result += (b * (angle_in_radians**a)) / factorial(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Dict = -b # One positive term and the next will be negative and so on...
a += 2 # Increased by 2 for every term.
return round(lowerCAmelCase , lowerCAmelCase )
if __name__ == "__main__":
__import__('''doctest''').testmod()
| 18 | 1 |
import argparse
import collections
import os
import re
import tempfile
import pandas as pd
from datasets import Dataset
from huggingface_hub import hf_hub_download, upload_folder
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/update_metadata.py
__lowerCamelCase : Tuple = '''src/transformers'''
# This is to make sure the transformers module imported is the one in the repo.
__lowerCamelCase : Union[str, Any] = direct_transformers_import(TRANSFORMERS_PATH)
# Regexes that match TF/Flax/PT model names.
__lowerCamelCase : int = re.compile(R'''TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)''')
__lowerCamelCase : List[str] = re.compile(R'''Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)''')
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
__lowerCamelCase : int = re.compile(R'''(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)''')
# Fill this with tuples (pipeline_tag, model_mapping, auto_model)
__lowerCamelCase : Any = [
('''pretraining''', '''MODEL_FOR_PRETRAINING_MAPPING_NAMES''', '''AutoModelForPreTraining'''),
('''feature-extraction''', '''MODEL_MAPPING_NAMES''', '''AutoModel'''),
('''audio-classification''', '''MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES''', '''AutoModelForAudioClassification'''),
('''text-generation''', '''MODEL_FOR_CAUSAL_LM_MAPPING_NAMES''', '''AutoModelForCausalLM'''),
('''automatic-speech-recognition''', '''MODEL_FOR_CTC_MAPPING_NAMES''', '''AutoModelForCTC'''),
('''image-classification''', '''MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES''', '''AutoModelForImageClassification'''),
('''image-segmentation''', '''MODEL_FOR_IMAGE_SEGMENTATION_MAPPING_NAMES''', '''AutoModelForImageSegmentation'''),
('''fill-mask''', '''MODEL_FOR_MASKED_LM_MAPPING_NAMES''', '''AutoModelForMaskedLM'''),
('''object-detection''', '''MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES''', '''AutoModelForObjectDetection'''),
(
'''zero-shot-object-detection''',
'''MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING_NAMES''',
'''AutoModelForZeroShotObjectDetection''',
),
('''question-answering''', '''MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES''', '''AutoModelForQuestionAnswering'''),
('''text2text-generation''', '''MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES''', '''AutoModelForSeq2SeqLM'''),
('''text-classification''', '''MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES''', '''AutoModelForSequenceClassification'''),
('''automatic-speech-recognition''', '''MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES''', '''AutoModelForSpeechSeq2Seq'''),
(
'''table-question-answering''',
'''MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING_NAMES''',
'''AutoModelForTableQuestionAnswering''',
),
('''token-classification''', '''MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES''', '''AutoModelForTokenClassification'''),
('''multiple-choice''', '''MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES''', '''AutoModelForMultipleChoice'''),
(
'''next-sentence-prediction''',
'''MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES''',
'''AutoModelForNextSentencePrediction''',
),
(
'''audio-frame-classification''',
'''MODEL_FOR_AUDIO_FRAME_CLASSIFICATION_MAPPING_NAMES''',
'''AutoModelForAudioFrameClassification''',
),
('''audio-xvector''', '''MODEL_FOR_AUDIO_XVECTOR_MAPPING_NAMES''', '''AutoModelForAudioXVector'''),
(
'''document-question-answering''',
'''MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES''',
'''AutoModelForDocumentQuestionAnswering''',
),
(
'''visual-question-answering''',
'''MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING_NAMES''',
'''AutoModelForVisualQuestionAnswering''',
),
('''image-to-text''', '''MODEL_FOR_FOR_VISION_2_SEQ_MAPPING_NAMES''', '''AutoModelForVision2Seq'''),
(
'''zero-shot-image-classification''',
'''MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING_NAMES''',
'''AutoModelForZeroShotImageClassification''',
),
('''depth-estimation''', '''MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES''', '''AutoModelForDepthEstimation'''),
('''video-classification''', '''MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES''', '''AutoModelForVideoClassification'''),
('''mask-generation''', '''MODEL_FOR_MASK_GENERATION_MAPPING_NAMES''', '''AutoModelForMaskGeneration'''),
]
def _snake_case ( lowerCAmelCase : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = re.finditer(".+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)" , lowerCAmelCase )
return [m.group(0 ) for m in matches]
def _snake_case ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
SCREAMING_SNAKE_CASE_ : List[str] = {
config.replace("Config" , "" ): model_type for model_type, config in config_maping_names.items()
}
# Dictionaries flagging if each model prefix has a backend in PT/TF/Flax.
SCREAMING_SNAKE_CASE_ : Any = collections.defaultdict(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : List[str] = collections.defaultdict(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = collections.defaultdict(lowerCAmelCase )
# Let's lookup through all transformers object (once) and find if models are supported by a given backend.
for attr_name in dir(lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : List[Any] = None
if _re_tf_models.match(lowerCAmelCase ) is not None:
SCREAMING_SNAKE_CASE_ : Optional[int] = tf_models
SCREAMING_SNAKE_CASE_ : List[str] = _re_tf_models.match(lowerCAmelCase ).groups()[0]
elif _re_flax_models.match(lowerCAmelCase ) is not None:
SCREAMING_SNAKE_CASE_ : Tuple = flax_models
SCREAMING_SNAKE_CASE_ : Tuple = _re_flax_models.match(lowerCAmelCase ).groups()[0]
elif _re_pt_models.match(lowerCAmelCase ) is not None:
SCREAMING_SNAKE_CASE_ : int = pt_models
SCREAMING_SNAKE_CASE_ : Union[str, Any] = _re_pt_models.match(lowerCAmelCase ).groups()[0]
if lookup_dict is not None:
while len(lowerCAmelCase ) > 0:
if attr_name in model_prefix_to_model_type:
SCREAMING_SNAKE_CASE_ : Any = True
break
# Try again after removing the last word in the name
SCREAMING_SNAKE_CASE_ : Union[str, Any] = "".join(camel_case_split(lowerCAmelCase )[:-1] )
SCREAMING_SNAKE_CASE_ : int = set(list(pt_models.keys() ) + list(tf_models.keys() ) + list(flax_models.keys() ) )
SCREAMING_SNAKE_CASE_ : List[Any] = list(lowerCAmelCase )
all_models.sort()
SCREAMING_SNAKE_CASE_ : Dict = {"model_type": all_models}
SCREAMING_SNAKE_CASE_ : str = [pt_models[t] for t in all_models]
SCREAMING_SNAKE_CASE_ : Any = [tf_models[t] for t in all_models]
SCREAMING_SNAKE_CASE_ : Tuple = [flax_models[t] for t in all_models]
# Now let's use the auto-mapping names to make sure
SCREAMING_SNAKE_CASE_ : Tuple = {}
for t in all_models:
if t in transformers_module.models.auto.processing_auto.PROCESSOR_MAPPING_NAMES:
SCREAMING_SNAKE_CASE_ : List[str] = "AutoProcessor"
elif t in transformers_module.models.auto.tokenization_auto.TOKENIZER_MAPPING_NAMES:
SCREAMING_SNAKE_CASE_ : Dict = "AutoTokenizer"
elif t in transformers_module.models.auto.feature_extraction_auto.FEATURE_EXTRACTOR_MAPPING_NAMES:
SCREAMING_SNAKE_CASE_ : Optional[int] = "AutoFeatureExtractor"
else:
# Default to AutoTokenizer if a model has nothing, for backward compatibility.
SCREAMING_SNAKE_CASE_ : Dict = "AutoTokenizer"
SCREAMING_SNAKE_CASE_ : int = [processors[t] for t in all_models]
return pd.DataFrame(lowerCAmelCase )
def _snake_case ( lowerCAmelCase : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = [
transformers_module.models.auto.modeling_auto,
transformers_module.models.auto.modeling_tf_auto,
transformers_module.models.auto.modeling_flax_auto,
]
for pipeline_tag, model_mapping, auto_class in PIPELINE_TAGS_AND_AUTO_MODELS:
SCREAMING_SNAKE_CASE_ : Any = [model_mapping, f'TF_{model_mapping}', f'FLAX_{model_mapping}']
SCREAMING_SNAKE_CASE_ : Optional[Any] = [auto_class, f'TF_{auto_class}', f'Flax_{auto_class}']
# Loop through all three frameworks
for module, cls, mapping in zip(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
# The type of pipeline may not exist in this framework
if not hasattr(lowerCAmelCase , lowerCAmelCase ):
continue
# First extract all model_names
SCREAMING_SNAKE_CASE_ : Union[str, Any] = []
for name in getattr(lowerCAmelCase , lowerCAmelCase ).values():
if isinstance(lowerCAmelCase , lowerCAmelCase ):
model_names.append(lowerCAmelCase )
else:
model_names.extend(list(lowerCAmelCase ) )
# Add pipeline tag and auto model class for those models
table.update({model_name: (pipeline_tag, cls) for model_name in model_names} )
return table
def _snake_case ( lowerCAmelCase : str , lowerCAmelCase : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = get_frameworks_table()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = Dataset.from_pandas(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : List[Any] = hf_hub_download(
"huggingface/transformers-metadata" , "pipeline_tags.json" , repo_type="dataset" , token=lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : List[Any] = Dataset.from_json(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[Any] = {
tags_dataset[i]["model_class"]: (tags_dataset[i]["pipeline_tag"], tags_dataset[i]["auto_class"])
for i in range(len(lowerCAmelCase ) )
}
SCREAMING_SNAKE_CASE_ : Optional[int] = update_pipeline_and_auto_class_table(lowerCAmelCase )
# Sort the model classes to avoid some nondeterministic updates to create false update commits.
SCREAMING_SNAKE_CASE_ : List[str] = sorted(table.keys() )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = pd.DataFrame(
{
"model_class": model_classes,
"pipeline_tag": [table[m][0] for m in model_classes],
"auto_class": [table[m][1] for m in model_classes],
} )
SCREAMING_SNAKE_CASE_ : Dict = Dataset.from_pandas(lowerCAmelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
frameworks_dataset.to_json(os.path.join(lowerCAmelCase , "frameworks.json" ) )
tags_dataset.to_json(os.path.join(lowerCAmelCase , "pipeline_tags.json" ) )
if commit_sha is not None:
SCREAMING_SNAKE_CASE_ : Tuple = (
f'Update with commit {commit_sha}\n\nSee: '
f'https://github.com/huggingface/transformers/commit/{commit_sha}'
)
else:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = "Update"
upload_folder(
repo_id="huggingface/transformers-metadata" , folder_path=lowerCAmelCase , repo_type="dataset" , token=lowerCAmelCase , commit_message=lowerCAmelCase , )
def _snake_case ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = {tag: cls for tag, _, cls in PIPELINE_TAGS_AND_AUTO_MODELS}
SCREAMING_SNAKE_CASE_ : Union[str, Any] = transformers_module.pipelines.SUPPORTED_TASKS
SCREAMING_SNAKE_CASE_ : List[Any] = []
for key in pipeline_tasks:
if key not in in_table:
SCREAMING_SNAKE_CASE_ : List[Any] = pipeline_tasks[key]["pt"]
if isinstance(lowerCAmelCase , (list, tuple) ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = model[0]
SCREAMING_SNAKE_CASE_ : List[Any] = model.__name__
if model not in in_table.values():
missing.append(lowerCAmelCase )
if len(lowerCAmelCase ) > 0:
SCREAMING_SNAKE_CASE_ : List[str] = ", ".join(lowerCAmelCase )
raise ValueError(
"The following pipeline tags are not present in the `PIPELINE_TAGS_AND_AUTO_MODELS` constant inside "
f'`utils/update_metadata.py`: {msg}. Please add them!' )
if __name__ == "__main__":
__lowerCamelCase : Any = argparse.ArgumentParser()
parser.add_argument('''--token''', type=str, help='''The token to use to push to the transformers-metadata dataset.''')
parser.add_argument('''--commit_sha''', type=str, help='''The sha of the commit going with this update.''')
parser.add_argument('''--check-only''', action='''store_true''', help='''Activate to just check all pipelines are present.''')
__lowerCamelCase : List[Any] = parser.parse_args()
if args.check_only:
check_pipeline_tags()
else:
update_metadata(args.token, args.commit_sha)
| 18 | from functools import lru_cache
@lru_cache
def _snake_case ( lowerCAmelCase : int ):
"""simple docstring"""
if num < 0:
raise ValueError("Number should not be negative." )
return 1 if num in (0, 1) else num * factorial(num - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 18 | 1 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AlignProcessor, EfficientNetImageProcessor
@require_vision
class a__ ( unittest.TestCase ):
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE_ : Optional[Any] = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
SCREAMING_SNAKE_CASE_ : Tuple = os.path.join(self.tmpdirname,VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file,"w",encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
SCREAMING_SNAKE_CASE_ : str = {
"do_resize": True,
"size": 20,
"do_center_crop": True,
"crop_size": 18,
"do_normalize": True,
"image_mean": [0.48145466, 0.4578275, 0.40821073],
"image_std": [0.26862954, 0.26130258, 0.27577711],
}
SCREAMING_SNAKE_CASE_ : List[str] = os.path.join(self.tmpdirname,_A )
with open(self.image_processor_file,"w",encoding="utf-8" ) as fp:
json.dump(_A,_A )
def __UpperCamelCase ( self : Optional[Any],**_A : Any ):
"""simple docstring"""
return BertTokenizer.from_pretrained(self.tmpdirname,**_A )
def __UpperCamelCase ( self : List[str],**_A : List[Any] ):
"""simple docstring"""
return BertTokenizerFast.from_pretrained(self.tmpdirname,**_A )
def __UpperCamelCase ( self : Optional[int],**_A : Dict ):
"""simple docstring"""
return EfficientNetImageProcessor.from_pretrained(self.tmpdirname,**_A )
def __UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def __UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = [np.random.randint(255,size=(3, 30, 400),dtype=np.uinta )]
SCREAMING_SNAKE_CASE_ : Optional[Any] = [Image.fromarray(np.moveaxis(_A,0,-1 ) ) for x in image_inputs]
return image_inputs
def __UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE_ : List[str] = self.get_image_processor()
SCREAMING_SNAKE_CASE_ : Any = AlignProcessor(tokenizer=_A,image_processor=_A )
processor_slow.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE_ : Tuple = AlignProcessor.from_pretrained(self.tmpdirname,use_fast=_A )
SCREAMING_SNAKE_CASE_ : Optional[Any] = AlignProcessor(tokenizer=_A,image_processor=_A )
processor_fast.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE_ : Tuple = AlignProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab(),tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab(),tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab(),tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer,_A )
self.assertIsInstance(processor_fast.tokenizer,_A )
self.assertEqual(processor_slow.image_processor.to_json_string(),image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string(),image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor,_A )
self.assertIsInstance(processor_fast.image_processor,_A )
def __UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = AlignProcessor(tokenizer=self.get_tokenizer(),image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE_ : Any = self.get_tokenizer(bos_token="(BOS)",eos_token="(EOS)" )
SCREAMING_SNAKE_CASE_ : List[Any] = self.get_image_processor(do_normalize=_A,padding_value=1.0 )
SCREAMING_SNAKE_CASE_ : List[Any] = AlignProcessor.from_pretrained(
self.tmpdirname,bos_token="(BOS)",eos_token="(EOS)",do_normalize=_A,padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab(),tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer,_A )
self.assertEqual(processor.image_processor.to_json_string(),image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor,_A )
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = self.get_image_processor()
SCREAMING_SNAKE_CASE_ : Any = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ : Dict = AlignProcessor(tokenizer=_A,image_processor=_A )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE_ : List[str] = image_processor(_A,return_tensors="np" )
SCREAMING_SNAKE_CASE_ : str = processor(images=_A,return_tensors="np" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum(),input_processor[key].sum(),delta=1E-2 )
def __UpperCamelCase ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = self.get_image_processor()
SCREAMING_SNAKE_CASE_ : int = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ : Optional[int] = AlignProcessor(tokenizer=_A,image_processor=_A )
SCREAMING_SNAKE_CASE_ : Optional[int] = "lower newer"
SCREAMING_SNAKE_CASE_ : List[str] = processor(text=_A )
SCREAMING_SNAKE_CASE_ : List[Any] = tokenizer(_A,padding="max_length",max_length=64 )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key],encoded_processor[key] )
def __UpperCamelCase ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = self.get_image_processor()
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ : Optional[Any] = AlignProcessor(tokenizer=_A,image_processor=_A )
SCREAMING_SNAKE_CASE_ : List[Any] = "lower newer"
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = processor(text=_A,images=_A )
self.assertListEqual(list(inputs.keys() ),["input_ids", "token_type_ids", "attention_mask", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(_A ):
processor()
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = self.get_image_processor()
SCREAMING_SNAKE_CASE_ : Any = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ : Any = AlignProcessor(tokenizer=_A,image_processor=_A )
SCREAMING_SNAKE_CASE_ : Optional[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
SCREAMING_SNAKE_CASE_ : Any = processor.batch_decode(_A )
SCREAMING_SNAKE_CASE_ : List[Any] = tokenizer.batch_decode(_A )
self.assertListEqual(_A,_A )
def __UpperCamelCase ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = self.get_image_processor()
SCREAMING_SNAKE_CASE_ : Tuple = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ : Any = AlignProcessor(tokenizer=_A,image_processor=_A )
SCREAMING_SNAKE_CASE_ : str = "lower newer"
SCREAMING_SNAKE_CASE_ : Optional[int] = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE_ : Optional[Any] = processor(text=_A,images=_A )
self.assertListEqual(list(inputs.keys() ),processor.model_input_names )
| 18 | from collections import defaultdict
def _snake_case ( lowerCAmelCase : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = 1
SCREAMING_SNAKE_CASE_ : Tuple = True
for v in tree[start]:
if v not in visited:
ret += dfs(lowerCAmelCase )
if ret % 2 == 0:
cuts.append(lowerCAmelCase )
return ret
def _snake_case ( ):
"""simple docstring"""
dfs(1 )
if __name__ == "__main__":
__lowerCamelCase , __lowerCamelCase : Union[str, Any] = 10, 9
__lowerCamelCase : Optional[int] = defaultdict(list)
__lowerCamelCase : dict[int, bool] = {}
__lowerCamelCase : list[int] = []
__lowerCamelCase : Optional[Any] = 0
__lowerCamelCase : Any = [(2, 1), (3, 1), (4, 3), (5, 2), (6, 1), (7, 2), (8, 6), (9, 8), (10, 8)]
for u, v in edges:
tree[u].append(v)
tree[v].append(u)
even_tree()
print(len(cuts) - 1)
| 18 | 1 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor
from .base import PipelineTool
class a__ ( A__ ):
A = 'openai/whisper-base'
A = (
'This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the '
'transcribed text.'
)
A = 'transcriber'
A = WhisperProcessor
A = WhisperForConditionalGeneration
A = ['audio']
A = ['text']
def __UpperCamelCase ( self : List[str],_A : Any ):
"""simple docstring"""
return self.pre_processor(_A,return_tensors="pt" ).input_features
def __UpperCamelCase ( self : Optional[int],_A : Optional[Any] ):
"""simple docstring"""
return self.model.generate(inputs=_A )
def __UpperCamelCase ( self : Optional[int],_A : Optional[int] ):
"""simple docstring"""
return self.pre_processor.batch_decode(_A,skip_special_tokens=_A )[0]
| 18 | # Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def _snake_case ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = ArgumentParser("Accelerate CLI tool" , usage="accelerate <command> [<args>]" , allow_abbrev=lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[int] = parser.add_subparsers(help="accelerate command helpers" )
# Register commands
get_config_parser(subparsers=lowerCAmelCase )
env_command_parser(subparsers=lowerCAmelCase )
launch_command_parser(subparsers=lowerCAmelCase )
tpu_command_parser(subparsers=lowerCAmelCase )
test_command_parser(subparsers=lowerCAmelCase )
# Let's go
SCREAMING_SNAKE_CASE_ : Dict = parser.parse_args()
if not hasattr(lowerCAmelCase , "func" ):
parser.print_help()
exit(1 )
# Run
args.func(lowerCAmelCase )
if __name__ == "__main__":
main()
| 18 | 1 |
import numpy as np
from cva import destroyAllWindows, imread, imshow, waitKey
class a__ :
def __init__( self : Union[str, Any],_A : Tuple,_A : int,_A : int ):
"""simple docstring"""
if dst_width < 0 or dst_height < 0:
raise ValueError("Destination width/height should be > 0" )
SCREAMING_SNAKE_CASE_ : Optional[Any] = img
SCREAMING_SNAKE_CASE_ : Union[str, Any] = img.shape[1]
SCREAMING_SNAKE_CASE_ : Tuple = img.shape[0]
SCREAMING_SNAKE_CASE_ : Optional[Any] = dst_width
SCREAMING_SNAKE_CASE_ : Optional[int] = dst_height
SCREAMING_SNAKE_CASE_ : Dict = self.src_w / self.dst_w
SCREAMING_SNAKE_CASE_ : str = self.src_h / self.dst_h
SCREAMING_SNAKE_CASE_ : Dict = (
np.ones((self.dst_h, self.dst_w, 3),np.uinta ) * 255
)
def __UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
for i in range(self.dst_h ):
for j in range(self.dst_w ):
SCREAMING_SNAKE_CASE_ : List[str] = self.img[self.get_y(_A )][self.get_x(_A )]
def __UpperCamelCase ( self : str,_A : int ):
"""simple docstring"""
return int(self.ratio_x * x )
def __UpperCamelCase ( self : Tuple,_A : int ):
"""simple docstring"""
return int(self.ratio_y * y )
if __name__ == "__main__":
__lowerCamelCase , __lowerCamelCase : Dict = 8_00, 6_00
__lowerCamelCase : Dict = imread('''image_data/lena.jpg''', 1)
__lowerCamelCase : Any = NearestNeighbour(im, dst_w, dst_h)
n.process()
imshow(
f'''Image resized from: {im.shape[1]}x{im.shape[0]} to {dst_w}x{dst_h}''', n.output
)
waitKey(0)
destroyAllWindows()
| 18 | import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from tokenizers.pre_tokenizers import BertPreTokenizer, PreTokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roformer import RoFormerTokenizer
from .tokenization_utils import JiebaPreTokenizer
__lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
__lowerCamelCase : Dict = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
__lowerCamelCase : int = {
'''vocab_file''': {
'''junnyu/roformer_chinese_small''': '''https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/vocab.txt''',
'''junnyu/roformer_chinese_base''': '''https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/vocab.txt''',
'''junnyu/roformer_chinese_char_small''': (
'''https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/vocab.txt'''
),
'''junnyu/roformer_chinese_char_base''': (
'''https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/vocab.txt'''
),
'''junnyu/roformer_small_discriminator''': (
'''https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/vocab.txt'''
),
'''junnyu/roformer_small_generator''': (
'''https://huggingface.co/junnyu/roformer_small_generator/resolve/main/vocab.txt'''
),
}
}
__lowerCamelCase : Any = {
'''junnyu/roformer_chinese_small''': 15_36,
'''junnyu/roformer_chinese_base''': 15_36,
'''junnyu/roformer_chinese_char_small''': 5_12,
'''junnyu/roformer_chinese_char_base''': 5_12,
'''junnyu/roformer_small_discriminator''': 1_28,
'''junnyu/roformer_small_generator''': 1_28,
}
__lowerCamelCase : Union[str, Any] = {
'''junnyu/roformer_chinese_small''': {'''do_lower_case''': True},
'''junnyu/roformer_chinese_base''': {'''do_lower_case''': True},
'''junnyu/roformer_chinese_char_small''': {'''do_lower_case''': True},
'''junnyu/roformer_chinese_char_base''': {'''do_lower_case''': True},
'''junnyu/roformer_small_discriminator''': {'''do_lower_case''': True},
'''junnyu/roformer_small_generator''': {'''do_lower_case''': True},
}
class a__ ( A__ ):
A = VOCAB_FILES_NAMES
A = PRETRAINED_VOCAB_FILES_MAP
A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A = PRETRAINED_INIT_CONFIGURATION
A = RoFormerTokenizer
def __init__( self : List[str],_A : int=None,_A : int=None,_A : int=True,_A : List[Any]="[UNK]",_A : Tuple="[SEP]",_A : List[Any]="[PAD]",_A : Optional[int]="[CLS]",_A : Optional[Any]="[MASK]",_A : Optional[int]=True,_A : List[str]=None,**_A : List[Any],):
"""simple docstring"""
super().__init__(
_A,tokenizer_file=_A,do_lower_case=_A,unk_token=_A,sep_token=_A,pad_token=_A,cls_token=_A,mask_token=_A,tokenize_chinese_chars=_A,strip_accents=_A,**_A,)
SCREAMING_SNAKE_CASE_ : Tuple = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
pre_tok_state.get("lowercase",_A ) != do_lower_case
or pre_tok_state.get("strip_accents",_A ) != strip_accents
):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = getattr(_A,pre_tok_state.pop("type" ) )
SCREAMING_SNAKE_CASE_ : Any = do_lower_case
SCREAMING_SNAKE_CASE_ : List[str] = strip_accents
SCREAMING_SNAKE_CASE_ : str = pre_tok_class(**_A )
SCREAMING_SNAKE_CASE_ : List[str] = do_lower_case
def __getstate__( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = self.__dict__.copy()
SCREAMING_SNAKE_CASE_ : Optional[Any] = BertPreTokenizer()
return state
def __setstate__( self : List[Any],_A : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = d
SCREAMING_SNAKE_CASE_ : List[str] = self.__dict__["_tokenizer"].get_vocab()
SCREAMING_SNAKE_CASE_ : Any = PreTokenizer.custom(JiebaPreTokenizer(_A ) )
def __UpperCamelCase ( self : Union[str, Any],_A : List[Any],_A : str=None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __UpperCamelCase ( self : str,_A : List[int],_A : Optional[List[int]] = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = [self.sep_token_id]
SCREAMING_SNAKE_CASE_ : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __UpperCamelCase ( self : int,_A : str,_A : Optional[str] = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self._tokenizer.model.save(_A,name=_A )
return tuple(_A )
def __UpperCamelCase ( self : int,_A : Optional[int],_A : List[Any]=None,_A : Tuple=None,_A : str=False,**_A : List[Any],):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = BertPreTokenizer()
return super().save_pretrained(_A,_A,_A,_A,**_A )
| 18 | 1 |
def _snake_case ( lowerCAmelCase : int , lowerCAmelCase : list[int] , lowerCAmelCase : int ):
"""simple docstring"""
def count_of_possible_combinations(lowerCAmelCase : int ) -> int:
if target < 0:
return 0
if target == 0:
return 1
return sum(count_of_possible_combinations(target - item ) for item in array )
return count_of_possible_combinations(lowerCAmelCase )
def _snake_case ( lowerCAmelCase : int , lowerCAmelCase : list[int] , lowerCAmelCase : int ):
"""simple docstring"""
def count_of_possible_combinations_with_dp_array(
lowerCAmelCase : int , lowerCAmelCase : list[int] ) -> int:
if target < 0:
return 0
if target == 0:
return 1
if dp_array[target] != -1:
return dp_array[target]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = sum(
count_of_possible_combinations_with_dp_array(target - item , lowerCAmelCase )
for item in array )
SCREAMING_SNAKE_CASE_ : Optional[int] = answer
return answer
SCREAMING_SNAKE_CASE_ : str = [-1] * (target + 1)
return count_of_possible_combinations_with_dp_array(lowerCAmelCase , lowerCAmelCase )
def _snake_case ( lowerCAmelCase : int , lowerCAmelCase : list[int] , lowerCAmelCase : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [0] * (target + 1)
SCREAMING_SNAKE_CASE_ : Tuple = 1
for i in range(1 , target + 1 ):
for j in range(lowerCAmelCase ):
if i - array[j] >= 0:
dp_array[i] += dp_array[i - array[j]]
return dp_array[target]
if __name__ == "__main__":
import doctest
doctest.testmod()
__lowerCamelCase : Dict = 3
__lowerCamelCase : Optional[int] = 5
__lowerCamelCase : Optional[int] = [1, 2, 5]
print(combination_sum_iv(n, array, target))
| 18 | import unittest
from transformers import SqueezeBertConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
)
class a__ ( A__ ):
def __init__( self : Tuple,_A : Optional[int],_A : Any=13,_A : List[str]=7,_A : int=True,_A : Dict=True,_A : Dict=False,_A : List[Any]=True,_A : Any=99,_A : Optional[int]=32,_A : Any=5,_A : List[Any]=4,_A : Dict=64,_A : Optional[Any]="gelu",_A : Tuple=0.1,_A : Any=0.1,_A : List[Any]=512,_A : Dict=16,_A : Optional[Any]=2,_A : Union[str, Any]=0.02,_A : List[str]=3,_A : Optional[Any]=4,_A : Union[str, Any]=None,_A : Tuple=2,_A : List[str]=2,_A : str=2,_A : Dict=2,_A : Optional[Any]=4,_A : Union[str, Any]=1,):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = parent
SCREAMING_SNAKE_CASE_ : Optional[int] = batch_size
SCREAMING_SNAKE_CASE_ : Dict = seq_length
SCREAMING_SNAKE_CASE_ : Dict = is_training
SCREAMING_SNAKE_CASE_ : Optional[int] = use_input_mask
SCREAMING_SNAKE_CASE_ : int = use_token_type_ids
SCREAMING_SNAKE_CASE_ : Optional[int] = use_labels
SCREAMING_SNAKE_CASE_ : Tuple = vocab_size
SCREAMING_SNAKE_CASE_ : Any = hidden_size
SCREAMING_SNAKE_CASE_ : Union[str, Any] = num_hidden_layers
SCREAMING_SNAKE_CASE_ : Tuple = num_attention_heads
SCREAMING_SNAKE_CASE_ : List[Any] = intermediate_size
SCREAMING_SNAKE_CASE_ : List[str] = hidden_act
SCREAMING_SNAKE_CASE_ : List[str] = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ : Optional[int] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : str = max_position_embeddings
SCREAMING_SNAKE_CASE_ : str = type_vocab_size
SCREAMING_SNAKE_CASE_ : List[str] = type_sequence_label_size
SCREAMING_SNAKE_CASE_ : Optional[Any] = initializer_range
SCREAMING_SNAKE_CASE_ : Tuple = num_labels
SCREAMING_SNAKE_CASE_ : List[Any] = num_choices
SCREAMING_SNAKE_CASE_ : Dict = scope
SCREAMING_SNAKE_CASE_ : int = q_groups
SCREAMING_SNAKE_CASE_ : Tuple = k_groups
SCREAMING_SNAKE_CASE_ : List[Any] = v_groups
SCREAMING_SNAKE_CASE_ : Tuple = post_attention_groups
SCREAMING_SNAKE_CASE_ : int = intermediate_groups
SCREAMING_SNAKE_CASE_ : List[Any] = output_groups
def __UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length],self.vocab_size )
SCREAMING_SNAKE_CASE_ : List[Any] = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE_ : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE_ : Optional[Any] = None
SCREAMING_SNAKE_CASE_ : Any = None
SCREAMING_SNAKE_CASE_ : str = None
if self.use_labels:
SCREAMING_SNAKE_CASE_ : str = ids_tensor([self.batch_size],self.type_sequence_label_size )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length],self.num_labels )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ids_tensor([self.batch_size],self.num_choices )
SCREAMING_SNAKE_CASE_ : Optional[int] = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def __UpperCamelCase ( self : str ):
"""simple docstring"""
return SqueezeBertConfig(
embedding_size=self.hidden_size,vocab_size=self.vocab_size,hidden_size=self.hidden_size,num_hidden_layers=self.num_hidden_layers,num_attention_heads=self.num_attention_heads,intermediate_size=self.intermediate_size,hidden_act=self.hidden_act,attention_probs_dropout_prob=self.hidden_dropout_prob,attention_dropout=self.attention_probs_dropout_prob,max_position_embeddings=self.max_position_embeddings,initializer_range=self.initializer_range,q_groups=self.q_groups,k_groups=self.k_groups,v_groups=self.v_groups,post_attention_groups=self.post_attention_groups,intermediate_groups=self.intermediate_groups,output_groups=self.output_groups,)
def __UpperCamelCase ( self : Tuple,_A : Union[str, Any],_A : Union[str, Any],_A : int,_A : Optional[int],_A : Union[str, Any],_A : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = SqueezeBertModel(config=_A )
model.to(_A )
model.eval()
SCREAMING_SNAKE_CASE_ : Any = model(_A,_A )
SCREAMING_SNAKE_CASE_ : List[str] = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape,(self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCamelCase ( self : Dict,_A : Any,_A : Tuple,_A : str,_A : Any,_A : Union[str, Any],_A : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = SqueezeBertForMaskedLM(config=_A )
model.to(_A )
model.eval()
SCREAMING_SNAKE_CASE_ : List[str] = model(_A,attention_mask=_A,labels=_A )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCamelCase ( self : Optional[int],_A : Union[str, Any],_A : Union[str, Any],_A : Any,_A : Tuple,_A : int,_A : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = SqueezeBertForQuestionAnswering(config=_A )
model.to(_A )
model.eval()
SCREAMING_SNAKE_CASE_ : Dict = model(
_A,attention_mask=_A,start_positions=_A,end_positions=_A )
self.parent.assertEqual(result.start_logits.shape,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape,(self.batch_size, self.seq_length) )
def __UpperCamelCase ( self : List[Any],_A : List[str],_A : Tuple,_A : List[Any],_A : List[str],_A : List[str],_A : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = self.num_labels
SCREAMING_SNAKE_CASE_ : List[str] = SqueezeBertForSequenceClassification(_A )
model.to(_A )
model.eval()
SCREAMING_SNAKE_CASE_ : Dict = model(_A,attention_mask=_A,labels=_A )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.num_labels) )
def __UpperCamelCase ( self : str,_A : Optional[int],_A : str,_A : List[Any],_A : List[str],_A : str,_A : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.num_labels
SCREAMING_SNAKE_CASE_ : Optional[int] = SqueezeBertForTokenClassification(config=_A )
model.to(_A )
model.eval()
SCREAMING_SNAKE_CASE_ : Optional[int] = model(_A,attention_mask=_A,labels=_A )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.seq_length, self.num_labels) )
def __UpperCamelCase ( self : List[Any],_A : Tuple,_A : str,_A : Optional[Any],_A : int,_A : str,_A : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = self.num_choices
SCREAMING_SNAKE_CASE_ : Union[str, Any] = SqueezeBertForMultipleChoice(config=_A )
model.to(_A )
model.eval()
SCREAMING_SNAKE_CASE_ : Dict = input_ids.unsqueeze(1 ).expand(-1,self.num_choices,-1 ).contiguous()
SCREAMING_SNAKE_CASE_ : str = input_mask.unsqueeze(1 ).expand(-1,self.num_choices,-1 ).contiguous()
SCREAMING_SNAKE_CASE_ : Optional[int] = model(
_A,attention_mask=_A,labels=_A,)
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.num_choices) )
def __UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.prepare_config_and_inputs()
((SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_)) : Dict = config_and_inputs
SCREAMING_SNAKE_CASE_ : Dict = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class a__ ( A__ , A__ , unittest.TestCase ):
A = (
(
SqueezeBertModel,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
)
if is_torch_available()
else None
)
A = (
{
'feature-extraction': SqueezeBertModel,
'fill-mask': SqueezeBertForMaskedLM,
'question-answering': SqueezeBertForQuestionAnswering,
'text-classification': SqueezeBertForSequenceClassification,
'token-classification': SqueezeBertForTokenClassification,
'zero-shot': SqueezeBertForSequenceClassification,
}
if is_torch_available()
else {}
)
A = False
A = True
A = False
def __UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = SqueezeBertModelTester(self )
SCREAMING_SNAKE_CASE_ : List[str] = ConfigTester(self,config_class=_A,dim=37 )
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_model(*_A )
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_masked_lm(*_A )
def __UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_question_answering(*_A )
def __UpperCamelCase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_sequence_classification(*_A )
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_token_classification(*_A )
def __UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_multiple_choice(*_A )
@slow
def __UpperCamelCase ( self : Any ):
"""simple docstring"""
for model_name in SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE_ : Tuple = SqueezeBertModel.from_pretrained(_A )
self.assertIsNotNone(_A )
@require_sentencepiece
@require_tokenizers
@require_torch
class a__ ( unittest.TestCase ):
@slow
def __UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = SqueezeBertForSequenceClassification.from_pretrained("squeezebert/squeezebert-mnli" )
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.tensor([[1, 2_9414, 232, 328, 740, 1140, 1_2695, 69, 13, 1588, 2]] )
SCREAMING_SNAKE_CASE_ : List[Any] = model(_A )[0]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.Size((1, 3) )
self.assertEqual(output.shape,_A )
SCREAMING_SNAKE_CASE_ : int = torch.tensor([[0.6401, -0.0349, -0.6041]] )
self.assertTrue(torch.allclose(_A,_A,atol=1E-4 ) )
| 18 | 1 |
__lowerCamelCase : Dict = {
'''Pillow''': '''Pillow<10.0.0''',
'''accelerate''': '''accelerate>=0.20.3''',
'''av''': '''av==9.2.0''',
'''beautifulsoup4''': '''beautifulsoup4''',
'''black''': '''black~=23.1''',
'''codecarbon''': '''codecarbon==1.2.0''',
'''cookiecutter''': '''cookiecutter==1.7.3''',
'''dataclasses''': '''dataclasses''',
'''datasets''': '''datasets!=2.5.0''',
'''decord''': '''decord==0.6.0''',
'''deepspeed''': '''deepspeed>=0.9.3''',
'''diffusers''': '''diffusers''',
'''dill''': '''dill<0.3.5''',
'''evaluate''': '''evaluate>=0.2.0''',
'''fairscale''': '''fairscale>0.3''',
'''faiss-cpu''': '''faiss-cpu''',
'''fastapi''': '''fastapi''',
'''filelock''': '''filelock''',
'''flax''': '''flax>=0.4.1,<=0.7.0''',
'''ftfy''': '''ftfy''',
'''fugashi''': '''fugashi>=1.0''',
'''GitPython''': '''GitPython<3.1.19''',
'''hf-doc-builder''': '''hf-doc-builder>=0.3.0''',
'''huggingface-hub''': '''huggingface-hub>=0.14.1,<1.0''',
'''importlib_metadata''': '''importlib_metadata''',
'''ipadic''': '''ipadic>=1.0.0,<2.0''',
'''isort''': '''isort>=5.5.4''',
'''jax''': '''jax>=0.2.8,!=0.3.2,<=0.4.13''',
'''jaxlib''': '''jaxlib>=0.1.65,<=0.4.13''',
'''jieba''': '''jieba''',
'''kenlm''': '''kenlm''',
'''keras-nlp''': '''keras-nlp>=0.3.1''',
'''librosa''': '''librosa''',
'''nltk''': '''nltk''',
'''natten''': '''natten>=0.14.6''',
'''numpy''': '''numpy>=1.17''',
'''onnxconverter-common''': '''onnxconverter-common''',
'''onnxruntime-tools''': '''onnxruntime-tools>=1.4.2''',
'''onnxruntime''': '''onnxruntime>=1.4.0''',
'''opencv-python''': '''opencv-python''',
'''optuna''': '''optuna''',
'''optax''': '''optax>=0.0.8,<=0.1.4''',
'''packaging''': '''packaging>=20.0''',
'''parameterized''': '''parameterized''',
'''phonemizer''': '''phonemizer''',
'''protobuf''': '''protobuf''',
'''psutil''': '''psutil''',
'''pyyaml''': '''pyyaml>=5.1''',
'''pydantic''': '''pydantic<2''',
'''pytest''': '''pytest>=7.2.0''',
'''pytest-timeout''': '''pytest-timeout''',
'''pytest-xdist''': '''pytest-xdist''',
'''python''': '''python>=3.8.0''',
'''ray[tune]''': '''ray[tune]''',
'''regex''': '''regex!=2019.12.17''',
'''requests''': '''requests''',
'''rhoknp''': '''rhoknp>=1.1.0,<1.3.1''',
'''rjieba''': '''rjieba''',
'''rouge-score''': '''rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1''',
'''ruff''': '''ruff>=0.0.241,<=0.0.259''',
'''sacrebleu''': '''sacrebleu>=1.4.12,<2.0.0''',
'''sacremoses''': '''sacremoses''',
'''safetensors''': '''safetensors>=0.3.1''',
'''sagemaker''': '''sagemaker>=2.31.0''',
'''scikit-learn''': '''scikit-learn''',
'''sentencepiece''': '''sentencepiece>=0.1.91,!=0.1.92''',
'''sigopt''': '''sigopt''',
'''starlette''': '''starlette''',
'''sudachipy''': '''sudachipy>=0.6.6''',
'''sudachidict_core''': '''sudachidict_core>=20220729''',
'''tensorflow-cpu''': '''tensorflow-cpu>=2.6,<2.14''',
'''tensorflow''': '''tensorflow>=2.6,<2.14''',
'''tensorflow-text''': '''tensorflow-text<2.14''',
'''tf2onnx''': '''tf2onnx''',
'''timeout-decorator''': '''timeout-decorator''',
'''timm''': '''timm''',
'''tokenizers''': '''tokenizers>=0.11.1,!=0.11.3,<0.14''',
'''torch''': '''torch>=1.9,!=1.12.0''',
'''torchaudio''': '''torchaudio''',
'''torchvision''': '''torchvision''',
'''pyctcdecode''': '''pyctcdecode>=0.4.0''',
'''tqdm''': '''tqdm>=4.27''',
'''unidic''': '''unidic>=1.0.2''',
'''unidic_lite''': '''unidic_lite>=1.0.7''',
'''urllib3''': '''urllib3<2.0.0''',
'''uvicorn''': '''uvicorn''',
}
| 18 | import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.text import TextDatasetReader
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def _snake_case ( lowerCAmelCase : str , lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
assert isinstance(lowerCAmelCase , lowerCAmelCase )
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def _snake_case ( lowerCAmelCase : Tuple , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tmp_path / "cache"
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {"text": "string"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
SCREAMING_SNAKE_CASE_ : List[str] = TextDatasetReader(lowerCAmelCase , cache_dir=lowerCAmelCase , keep_in_memory=lowerCAmelCase ).read()
_check_text_dataset(lowerCAmelCase , lowerCAmelCase )
@pytest.mark.parametrize(
"features" , [
None,
{"text": "string"},
{"text": "int32"},
{"text": "float32"},
] , )
def _snake_case ( lowerCAmelCase : Tuple , lowerCAmelCase : Dict , lowerCAmelCase : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = tmp_path / "cache"
SCREAMING_SNAKE_CASE_ : Optional[Any] = {"text": "string"}
SCREAMING_SNAKE_CASE_ : Any = features.copy() if features else default_expected_features
SCREAMING_SNAKE_CASE_ : List[str] = (
Features({feature: Value(lowerCAmelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
SCREAMING_SNAKE_CASE_ : Optional[Any] = TextDatasetReader(lowerCAmelCase , features=lowerCAmelCase , cache_dir=lowerCAmelCase ).read()
_check_text_dataset(lowerCAmelCase , lowerCAmelCase )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def _snake_case ( lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tmp_path / "cache"
SCREAMING_SNAKE_CASE_ : List[str] = {"text": "string"}
SCREAMING_SNAKE_CASE_ : List[str] = TextDatasetReader(lowerCAmelCase , cache_dir=lowerCAmelCase , split=lowerCAmelCase ).read()
_check_text_dataset(lowerCAmelCase , lowerCAmelCase )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("path_type" , [str, list] )
def _snake_case ( lowerCAmelCase : int , lowerCAmelCase : int , lowerCAmelCase : Optional[int] ):
"""simple docstring"""
if issubclass(lowerCAmelCase , lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = text_path
elif issubclass(lowerCAmelCase , lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [text_path]
SCREAMING_SNAKE_CASE_ : int = tmp_path / "cache"
SCREAMING_SNAKE_CASE_ : Optional[int] = {"text": "string"}
SCREAMING_SNAKE_CASE_ : List[str] = TextDatasetReader(lowerCAmelCase , cache_dir=lowerCAmelCase ).read()
_check_text_dataset(lowerCAmelCase , lowerCAmelCase )
def _snake_case ( lowerCAmelCase : Dict , lowerCAmelCase : Dict , lowerCAmelCase : List[str]=("train",) ):
"""simple docstring"""
assert isinstance(lowerCAmelCase , lowerCAmelCase )
for split in splits:
SCREAMING_SNAKE_CASE_ : int = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def _snake_case ( lowerCAmelCase : List[str] , lowerCAmelCase : str , lowerCAmelCase : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = tmp_path / "cache"
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {"text": "string"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
SCREAMING_SNAKE_CASE_ : List[Any] = TextDatasetReader({"train": text_path} , cache_dir=lowerCAmelCase , keep_in_memory=lowerCAmelCase ).read()
_check_text_datasetdict(lowerCAmelCase , lowerCAmelCase )
@pytest.mark.parametrize(
"features" , [
None,
{"text": "string"},
{"text": "int32"},
{"text": "float32"},
] , )
def _snake_case ( lowerCAmelCase : Union[str, Any] , lowerCAmelCase : List[Any] , lowerCAmelCase : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = tmp_path / "cache"
# CSV file loses col_1 string dtype information: default now is "int64" instead of "string"
SCREAMING_SNAKE_CASE_ : Tuple = {"text": "string"}
SCREAMING_SNAKE_CASE_ : Any = features.copy() if features else default_expected_features
SCREAMING_SNAKE_CASE_ : Dict = (
Features({feature: Value(lowerCAmelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
SCREAMING_SNAKE_CASE_ : str = TextDatasetReader({"train": text_path} , features=lowerCAmelCase , cache_dir=lowerCAmelCase ).read()
_check_text_datasetdict(lowerCAmelCase , lowerCAmelCase )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def _snake_case ( lowerCAmelCase : Optional[Any] , lowerCAmelCase : Any , lowerCAmelCase : Dict ):
"""simple docstring"""
if split:
SCREAMING_SNAKE_CASE_ : Optional[int] = {split: text_path}
else:
SCREAMING_SNAKE_CASE_ : List[Any] = "train"
SCREAMING_SNAKE_CASE_ : Tuple = {"train": text_path, "test": text_path}
SCREAMING_SNAKE_CASE_ : Any = tmp_path / "cache"
SCREAMING_SNAKE_CASE_ : List[str] = {"text": "string"}
SCREAMING_SNAKE_CASE_ : str = TextDatasetReader(lowerCAmelCase , cache_dir=lowerCAmelCase ).read()
_check_text_datasetdict(lowerCAmelCase , lowerCAmelCase , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
| 18 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_torch_available,
is_vision_available,
)
__lowerCamelCase : Union[str, Any] = {'''configuration_beit''': ['''BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BeitConfig''', '''BeitOnnxConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Optional[Any] = ['''BeitFeatureExtractor''']
__lowerCamelCase : Union[str, Any] = ['''BeitImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : int = [
'''BEIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BeitForImageClassification''',
'''BeitForMaskedImageModeling''',
'''BeitForSemanticSegmentation''',
'''BeitModel''',
'''BeitPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Optional[Any] = [
'''FlaxBeitForImageClassification''',
'''FlaxBeitForMaskedImageModeling''',
'''FlaxBeitModel''',
'''FlaxBeitPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_beit import BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, BeitConfig, BeitOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_beit import BeitFeatureExtractor
from .image_processing_beit import BeitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_beit import (
BEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
BeitPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_beit import (
FlaxBeitForImageClassification,
FlaxBeitForMaskedImageModeling,
FlaxBeitModel,
FlaxBeitPreTrainedModel,
)
else:
import sys
__lowerCamelCase : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 18 | import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
import diffusers
from diffusers import (
AutoencoderKL,
EulerDiscreteScheduler,
StableDiffusionLatentUpscalePipeline,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.schedulers import KarrasDiffusionSchedulers
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
def _snake_case ( lowerCAmelCase : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = [tensor.shape for tensor in tensor_list]
return all(shape == shapes[0] for shape in shapes[1:] )
class a__ ( A__ , A__ , A__ , unittest.TestCase ):
A = StableDiffusionLatentUpscalePipeline
A = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
'height',
'width',
'cross_attention_kwargs',
'negative_prompt_embeds',
'prompt_embeds',
}
A = PipelineTesterMixin.required_optional_params - {'num_images_per_prompt'}
A = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
A = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
A = frozenset([] )
A = True
@property
def __UpperCamelCase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = 1
SCREAMING_SNAKE_CASE_ : Optional[int] = 4
SCREAMING_SNAKE_CASE_ : Optional[int] = (16, 16)
SCREAMING_SNAKE_CASE_ : Dict = floats_tensor((batch_size, num_channels) + sizes,rng=random.Random(0 ) ).to(_A )
return image
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ : str = UNetaDConditionModel(
act_fn="gelu",attention_head_dim=8,norm_num_groups=_A,block_out_channels=[32, 32, 64, 64],time_cond_proj_dim=160,conv_in_kernel=1,conv_out_kernel=1,cross_attention_dim=32,down_block_types=(
"KDownBlock2D",
"KCrossAttnDownBlock2D",
"KCrossAttnDownBlock2D",
"KCrossAttnDownBlock2D",
),in_channels=8,mid_block_type=_A,only_cross_attention=_A,out_channels=5,resnet_time_scale_shift="scale_shift",time_embedding_type="fourier",timestep_post_act="gelu",up_block_types=("KCrossAttnUpBlock2D", "KCrossAttnUpBlock2D", "KCrossAttnUpBlock2D", "KUpBlock2D"),)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = AutoencoderKL(
block_out_channels=[32, 32, 64, 64],in_channels=3,out_channels=3,down_block_types=[
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
],up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],latent_channels=4,)
SCREAMING_SNAKE_CASE_ : int = EulerDiscreteScheduler(prediction_type="sample" )
SCREAMING_SNAKE_CASE_ : List[Any] = CLIPTextConfig(
bos_token_id=0,eos_token_id=2,hidden_size=32,intermediate_size=37,layer_norm_eps=1E-05,num_attention_heads=4,num_hidden_layers=5,pad_token_id=1,vocab_size=1000,hidden_act="quick_gelu",projection_dim=512,)
SCREAMING_SNAKE_CASE_ : Tuple = CLIPTextModel(_A )
SCREAMING_SNAKE_CASE_ : int = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {
"unet": model.eval(),
"vae": vae.eval(),
"scheduler": scheduler,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
}
return components
def __UpperCamelCase ( self : List[Any],_A : int,_A : Tuple=0 ):
"""simple docstring"""
if str(_A ).startswith("mps" ):
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.manual_seed(_A )
else:
SCREAMING_SNAKE_CASE_ : Dict = torch.Generator(device=_A ).manual_seed(_A )
SCREAMING_SNAKE_CASE_ : Tuple = {
"prompt": "A painting of a squirrel eating a burger",
"image": self.dummy_image.cpu(),
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = "cpu"
SCREAMING_SNAKE_CASE_ : List[str] = self.get_dummy_components()
SCREAMING_SNAKE_CASE_ : List[str] = self.pipeline_class(**_A )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
SCREAMING_SNAKE_CASE_ : Tuple = self.get_dummy_inputs(_A )
SCREAMING_SNAKE_CASE_ : Dict = pipe(**_A ).images
SCREAMING_SNAKE_CASE_ : Optional[int] = image[0, -3:, -3:, -1]
self.assertEqual(image.shape,(1, 256, 256, 3) )
SCREAMING_SNAKE_CASE_ : Optional[Any] = np.array(
[0.47222412, 0.41921633, 0.44717434, 0.46874192, 0.42588258, 0.46150726, 0.4677534, 0.45583832, 0.48579055] )
SCREAMING_SNAKE_CASE_ : Optional[int] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_A,1E-3 )
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
super().test_attention_slicing_forward_pass(expected_max_diff=7E-3 )
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
super().test_cpu_offload_forward_pass(expected_max_diff=3E-3 )
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
def __UpperCamelCase ( self : int ):
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=7E-3 )
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=3E-3 )
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
super().test_save_load_local(expected_max_difference=3E-3 )
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
super().test_save_load_optional_components(expected_max_difference=3E-3 )
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = [
"DDIMScheduler",
"DDPMScheduler",
"PNDMScheduler",
"HeunDiscreteScheduler",
"EulerAncestralDiscreteScheduler",
"KDPM2DiscreteScheduler",
"KDPM2AncestralDiscreteScheduler",
"DPMSolverSDEScheduler",
]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.get_dummy_components()
SCREAMING_SNAKE_CASE_ : Any = self.pipeline_class(**_A )
# make sure that PNDM does not need warm-up
pipe.scheduler.register_to_config(skip_prk_steps=_A )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
SCREAMING_SNAKE_CASE_ : Any = self.get_dummy_inputs(_A )
SCREAMING_SNAKE_CASE_ : Optional[Any] = 2
SCREAMING_SNAKE_CASE_ : Optional[int] = []
for scheduler_enum in KarrasDiffusionSchedulers:
if scheduler_enum.name in skip_schedulers:
# no sigma schedulers are not supported
# no schedulers
continue
SCREAMING_SNAKE_CASE_ : Tuple = getattr(_A,scheduler_enum.name )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = scheduler_cls.from_config(pipe.scheduler.config )
SCREAMING_SNAKE_CASE_ : Optional[int] = pipe(**_A )[0]
outputs.append(_A )
assert check_same_shape(_A )
@require_torch_gpu
@slow
class a__ ( unittest.TestCase ):
def __UpperCamelCase ( self : Dict ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = torch.manual_seed(33 )
SCREAMING_SNAKE_CASE_ : Optional[int] = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4",torch_dtype=torch.floataa )
pipe.to("cuda" )
SCREAMING_SNAKE_CASE_ : List[Any] = StableDiffusionLatentUpscalePipeline.from_pretrained(
"stabilityai/sd-x2-latent-upscaler",torch_dtype=torch.floataa )
upscaler.to("cuda" )
SCREAMING_SNAKE_CASE_ : Tuple = "a photo of an astronaut high resolution, unreal engine, ultra realistic"
SCREAMING_SNAKE_CASE_ : str = pipe(_A,generator=_A,output_type="latent" ).images
SCREAMING_SNAKE_CASE_ : Optional[Any] = upscaler(
prompt=_A,image=_A,num_inference_steps=20,guidance_scale=0,generator=_A,output_type="np",).images[0]
SCREAMING_SNAKE_CASE_ : str = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/astronaut_1024.npy" )
assert np.abs((expected_image - image).mean() ) < 5E-2
def __UpperCamelCase ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = torch.manual_seed(33 )
SCREAMING_SNAKE_CASE_ : Optional[Any] = StableDiffusionLatentUpscalePipeline.from_pretrained(
"stabilityai/sd-x2-latent-upscaler",torch_dtype=torch.floataa )
upscaler.to("cuda" )
SCREAMING_SNAKE_CASE_ : Any = "the temple of fire by Ross Tran and Gerardo Dottori, oil on canvas"
SCREAMING_SNAKE_CASE_ : Optional[Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_512.png" )
SCREAMING_SNAKE_CASE_ : str = upscaler(
prompt=_A,image=_A,num_inference_steps=20,guidance_scale=0,generator=_A,output_type="np",).images[0]
SCREAMING_SNAKE_CASE_ : Any = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_1024.npy" )
assert np.abs((expected_image - image).max() ) < 5E-2
| 18 | 1 |
def _snake_case ( lowerCAmelCase : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = len(lowerCAmelCase )
for i in range(length - 1 ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = i
for k in range(i + 1 , lowerCAmelCase ):
if collection[k] < collection[least]:
SCREAMING_SNAKE_CASE_ : Optional[int] = k
if least != i:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[Any] = (collection[i], collection[least])
return collection
if __name__ == "__main__":
__lowerCamelCase : Tuple = input('''Enter numbers separated by a comma:\n''').strip()
__lowerCamelCase : Union[str, Any] = [int(item) for item in user_input.split(''',''')]
print(selection_sort(unsorted))
| 18 | from collections import OrderedDict
from typing import Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...feature_extraction_utils import FeatureExtractionMixin
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType, logging
__lowerCamelCase : Optional[int] = logging.get_logger(__name__)
__lowerCamelCase : Tuple = {
'''deepmind/language-perceiver''': '''https://huggingface.co/deepmind/language-perceiver/resolve/main/config.json''',
# See all Perceiver models at https://huggingface.co/models?filter=perceiver
}
class a__ ( A__ ):
A = 'perceiver'
def __init__( self : List[Any],_A : Tuple=256,_A : str=1280,_A : List[Any]=768,_A : Union[str, Any]=1,_A : Union[str, Any]=26,_A : List[str]=8,_A : List[Any]=8,_A : List[Any]=None,_A : List[Any]=None,_A : Union[str, Any]="kv",_A : Any=1,_A : int=1,_A : Dict="gelu",_A : Any=0.1,_A : int=0.02,_A : int=1E-12,_A : Any=True,_A : Optional[Any]=262,_A : List[Any]=2048,_A : str=56,_A : Optional[int]=[368, 496],_A : Dict=16,_A : Tuple=1920,_A : List[Any]=16,_A : str=[1, 16, 224, 224],**_A : Optional[Any],):
"""simple docstring"""
super().__init__(**_A )
SCREAMING_SNAKE_CASE_ : Dict = num_latents
SCREAMING_SNAKE_CASE_ : List[Any] = d_latents
SCREAMING_SNAKE_CASE_ : Union[str, Any] = d_model
SCREAMING_SNAKE_CASE_ : Optional[int] = num_blocks
SCREAMING_SNAKE_CASE_ : List[Any] = num_self_attends_per_block
SCREAMING_SNAKE_CASE_ : Tuple = num_self_attention_heads
SCREAMING_SNAKE_CASE_ : List[str] = num_cross_attention_heads
SCREAMING_SNAKE_CASE_ : List[Any] = qk_channels
SCREAMING_SNAKE_CASE_ : Any = v_channels
SCREAMING_SNAKE_CASE_ : Any = cross_attention_shape_for_attention
SCREAMING_SNAKE_CASE_ : List[str] = self_attention_widening_factor
SCREAMING_SNAKE_CASE_ : Any = cross_attention_widening_factor
SCREAMING_SNAKE_CASE_ : List[Any] = hidden_act
SCREAMING_SNAKE_CASE_ : List[str] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : Any = initializer_range
SCREAMING_SNAKE_CASE_ : List[Any] = layer_norm_eps
SCREAMING_SNAKE_CASE_ : Tuple = use_query_residual
# masked language modeling attributes
SCREAMING_SNAKE_CASE_ : List[str] = vocab_size
SCREAMING_SNAKE_CASE_ : Union[str, Any] = max_position_embeddings
# image classification attributes
SCREAMING_SNAKE_CASE_ : Dict = image_size
# flow attributes
SCREAMING_SNAKE_CASE_ : List[Any] = train_size
# multimodal autoencoding attributes
SCREAMING_SNAKE_CASE_ : str = num_frames
SCREAMING_SNAKE_CASE_ : Any = audio_samples_per_frame
SCREAMING_SNAKE_CASE_ : Tuple = samples_per_patch
SCREAMING_SNAKE_CASE_ : Optional[Any] = output_shape
class a__ ( A__ ):
@property
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE_ : List[str] = {0: "batch", 1: "choice", 2: "sequence"}
else:
SCREAMING_SNAKE_CASE_ : str = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("inputs", dynamic_axis),
("attention_mask", dynamic_axis),
] )
@property
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
return 1E-4
def __UpperCamelCase ( self : List[str],_A : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"],_A : int = -1,_A : int = -1,_A : int = -1,_A : bool = False,_A : Optional[TensorType] = None,_A : int = 3,_A : int = 40,_A : int = 40,):
"""simple docstring"""
if isinstance(_A,_A ):
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
SCREAMING_SNAKE_CASE_ : Tuple = compute_effective_axis_dimension(
_A,fixed_dimension=OnnxConfig.default_fixed_batch,num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
SCREAMING_SNAKE_CASE_ : Tuple = preprocessor.num_special_tokens_to_add(_A )
SCREAMING_SNAKE_CASE_ : Any = compute_effective_axis_dimension(
_A,fixed_dimension=OnnxConfig.default_fixed_sequence,num_token_to_add=_A )
# Generate dummy inputs according to compute batch and sequence
SCREAMING_SNAKE_CASE_ : Optional[Any] = [" ".join(["a"] ) * seq_length] * batch_size
SCREAMING_SNAKE_CASE_ : str = dict(preprocessor(_A,return_tensors=_A ) )
SCREAMING_SNAKE_CASE_ : List[str] = inputs.pop("input_ids" )
return inputs
elif isinstance(_A,_A ) and preprocessor.model_input_names[0] == "pixel_values":
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
SCREAMING_SNAKE_CASE_ : Any = compute_effective_axis_dimension(_A,fixed_dimension=OnnxConfig.default_fixed_batch )
SCREAMING_SNAKE_CASE_ : Optional[int] = self._generate_dummy_images(_A,_A,_A,_A )
SCREAMING_SNAKE_CASE_ : Any = dict(preprocessor(images=_A,return_tensors=_A ) )
SCREAMING_SNAKE_CASE_ : Any = inputs.pop("pixel_values" )
return inputs
else:
raise ValueError(
"Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor." )
| 18 | 1 |
# Usage:
# ./gen-card-facebook-wmt19.py
import os
from pathlib import Path
def _snake_case ( lowerCAmelCase : List[Any] , lowerCAmelCase : Tuple , lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = {
"en": "Machine learning is great, isn't it?",
"ru": "Машинное обучение - это здорово, не так ли?",
"de": "Maschinelles Lernen ist großartig, oder?",
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
SCREAMING_SNAKE_CASE_ : Optional[int] = {
"ru-en": ["[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)", "39.20"],
"en-ru": ["[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)", "33.47"],
"en-de": ["[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)", "42.83"],
"de-en": ["[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)", "41.35"],
}
SCREAMING_SNAKE_CASE_ : List[str] = f'{src_lang}-{tgt_lang}'
SCREAMING_SNAKE_CASE_ : List[Any] = f'\n---\nlanguage: \n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt19\n- facebook\nlicense: apache-2.0\ndatasets:\n- wmt19\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.\n\nFor more details, please see, [Facebook FAIR\'s WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).\n\nThe abbreviation FSMT stands for FairSeqMachineTranslation\n\nAll four models are available:\n\n* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)\n* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)\n* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)\n* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = "facebook/wmt19-{src_lang}-{tgt_lang}"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = "{texts[src_lang]}"\ninput_ids = tokenizer.encode(input, return_tensors="pt")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n- The original (and this ported model) doesn\'t seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)\n\n## Training data\n\nPretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).\n\n## Eval results\n\npair | fairseq | transformers\n-------|---------|----------\n{pair} | {scores[pair][0]} | {scores[pair][1]}\n\nThe score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn\'t support:\n- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).\n- re-ranking\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=15\nmkdir -p $DATA_DIR\nsacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\nnote: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt19/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)\n\n\n### BibTeX entry and citation info\n\n```bibtex\n@inproceedings{{...,\n year={{2020}},\n title={{Facebook FAIR\'s WMT19 News Translation Task Submission}},\n author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},\n booktitle={{Proc. of WMT}},\n}}\n```\n\n\n## TODO\n\n- port model ensemble (fairseq uses 4 model checkpoints)\n\n'
os.makedirs(lowerCAmelCase , exist_ok=lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Any = os.path.join(lowerCAmelCase , "README.md" )
print(f'Generating {path}' )
with open(lowerCAmelCase , "w" , encoding="utf-8" ) as f:
f.write(lowerCAmelCase )
# make sure we are under the root of the project
__lowerCamelCase : Any = Path(__file__).resolve().parent.parent.parent
__lowerCamelCase : Optional[Any] = repo_dir / '''model_cards'''
for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : int = model_name.split('''-''')
__lowerCamelCase : str = model_cards_dir / '''facebook''' / model_name
write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
| 18 | from collections import Counter
from pathlib import Path
from typing import Optional, Tuple
import yaml
class a__ ( yaml.SafeLoader ):
def __UpperCamelCase ( self : str,_A : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = [self.constructed_objects[key_node] for key_node, _ in node.value]
SCREAMING_SNAKE_CASE_ : List[str] = [tuple(_A ) if isinstance(_A,_A ) else key for key in keys]
SCREAMING_SNAKE_CASE_ : Optional[int] = Counter(_A )
SCREAMING_SNAKE_CASE_ : Tuple = [key for key in counter if counter[key] > 1]
if duplicate_keys:
raise TypeError(F'Got duplicate yaml keys: {duplicate_keys}' )
def __UpperCamelCase ( self : Tuple,_A : Dict,_A : List[Any]=False ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = super().construct_mapping(_A,deep=_A )
self._check_no_duplicates_on_constructed_node(_A )
return mapping
def _snake_case ( lowerCAmelCase : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = list(readme_content.splitlines() )
if full_content and full_content[0] == "---" and "---" in full_content[1:]:
SCREAMING_SNAKE_CASE_ : List[Any] = full_content[1:].index("---" ) + 1
SCREAMING_SNAKE_CASE_ : int = "\n".join(full_content[1:sep_idx] )
return yamlblock, "\n".join(full_content[sep_idx + 1 :] )
return None, "\n".join(lowerCAmelCase )
class a__ ( A__ ):
# class attributes
A = {'train_eval_index'} # train-eval-index in the YAML metadata
@classmethod
def __UpperCamelCase ( cls : Any,_A : Path ):
"""simple docstring"""
with open(_A,encoding="utf-8" ) as readme_file:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = _split_yaml_from_readme(readme_file.read() )
if yaml_string is not None:
return cls.from_yaml_string(_A )
else:
return cls()
def __UpperCamelCase ( self : Dict,_A : Path ):
"""simple docstring"""
if path.exists():
with open(_A,encoding="utf-8" ) as readme_file:
SCREAMING_SNAKE_CASE_ : int = readme_file.read()
else:
SCREAMING_SNAKE_CASE_ : Any = None
SCREAMING_SNAKE_CASE_ : int = self._to_readme(_A )
with open(_A,"w",encoding="utf-8" ) as readme_file:
readme_file.write(_A )
def __UpperCamelCase ( self : Optional[int],_A : Optional[str] = None ):
"""simple docstring"""
if readme_content is not None:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[Any] = _split_yaml_from_readme(_A )
SCREAMING_SNAKE_CASE_ : Tuple = "---\n" + self.to_yaml_string() + "---\n" + content
else:
SCREAMING_SNAKE_CASE_ : Dict = "---\n" + self.to_yaml_string() + "---\n"
return full_content
@classmethod
def __UpperCamelCase ( cls : Dict,_A : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = yaml.load(_A,Loader=_NoDuplicateSafeLoader ) or {}
# Convert the YAML keys to DatasetMetadata fields
SCREAMING_SNAKE_CASE_ : Any = {
(key.replace("-","_" ) if key.replace("-","_" ) in cls._FIELDS_WITH_DASHES else key): value
for key, value in metadata_dict.items()
}
return cls(**_A )
def __UpperCamelCase ( self : Dict ):
"""simple docstring"""
return yaml.safe_dump(
{
(key.replace("_","-" ) if key in self._FIELDS_WITH_DASHES else key): value
for key, value in self.items()
},sort_keys=_A,allow_unicode=_A,encoding="utf-8",).decode("utf-8" )
__lowerCamelCase : List[Any] = {
'''image-classification''': [],
'''translation''': [],
'''image-segmentation''': [],
'''fill-mask''': [],
'''automatic-speech-recognition''': [],
'''token-classification''': [],
'''sentence-similarity''': [],
'''audio-classification''': [],
'''question-answering''': [],
'''summarization''': [],
'''zero-shot-classification''': [],
'''table-to-text''': [],
'''feature-extraction''': [],
'''other''': [],
'''multiple-choice''': [],
'''text-classification''': [],
'''text-to-image''': [],
'''text2text-generation''': [],
'''zero-shot-image-classification''': [],
'''tabular-classification''': [],
'''tabular-regression''': [],
'''image-to-image''': [],
'''tabular-to-text''': [],
'''unconditional-image-generation''': [],
'''text-retrieval''': [],
'''text-to-speech''': [],
'''object-detection''': [],
'''audio-to-audio''': [],
'''text-generation''': [],
'''conversational''': [],
'''table-question-answering''': [],
'''visual-question-answering''': [],
'''image-to-text''': [],
'''reinforcement-learning''': [],
'''voice-activity-detection''': [],
'''time-series-forecasting''': [],
'''document-question-answering''': [],
}
if __name__ == "__main__":
from argparse import ArgumentParser
__lowerCamelCase : List[Any] = ArgumentParser(usage='''Validate the yaml metadata block of a README.md file.''')
ap.add_argument('''readme_filepath''')
__lowerCamelCase : Dict = ap.parse_args()
__lowerCamelCase : List[Any] = Path(args.readme_filepath)
__lowerCamelCase : Optional[int] = DatasetMetadata.from_readme(readme_filepath)
print(dataset_metadata)
dataset_metadata.to_readme(readme_filepath)
| 18 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase : List[Any] = logging.get_logger(__name__)
__lowerCamelCase : List[str] = {
'''RWKV/rwkv-4-169m-pile''': '''https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-430m-pile''': '''https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-1b5-pile''': '''https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-3b-pile''': '''https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-7b-pile''': '''https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-14b-pile''': '''https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json''',
'''RWKV/rwkv-raven-1b5''': '''https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json''',
'''RWKV/rwkv-raven-3b''': '''https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json''',
'''RWKV/rwkv-raven-7b''': '''https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json''',
'''RWKV/rwkv-raven-14b''': '''https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json''',
}
class a__ ( A__ ):
A = 'rwkv'
A = {'max_position_embeddings': 'context_length'}
def __init__( self : List[Any],_A : int=5_0277,_A : Any=1024,_A : List[str]=4096,_A : Union[str, Any]=32,_A : List[str]=None,_A : List[str]=None,_A : Optional[int]=1E-5,_A : Any=0,_A : Any=0,_A : Any=6,_A : Any=False,_A : Any=True,**_A : Optional[Any],):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = vocab_size
SCREAMING_SNAKE_CASE_ : List[Any] = context_length
SCREAMING_SNAKE_CASE_ : int = hidden_size
SCREAMING_SNAKE_CASE_ : List[Any] = num_hidden_layers
SCREAMING_SNAKE_CASE_ : str = attention_hidden_size if attention_hidden_size is not None else hidden_size
SCREAMING_SNAKE_CASE_ : str = intermediate_size if intermediate_size is not None else 4 * hidden_size
SCREAMING_SNAKE_CASE_ : Tuple = layer_norm_epsilon
SCREAMING_SNAKE_CASE_ : Optional[Any] = rescale_every
SCREAMING_SNAKE_CASE_ : str = use_cache
SCREAMING_SNAKE_CASE_ : Any = bos_token_id
SCREAMING_SNAKE_CASE_ : int = eos_token_id
super().__init__(
tie_word_embeddings=_A,bos_token_id=_A,eos_token_id=_A,**_A )
| 18 | from __future__ import annotations
from math import pi, sqrt
def _snake_case ( lowerCAmelCase : float , lowerCAmelCase : float ):
"""simple docstring"""
if inductance <= 0:
raise ValueError("Inductance cannot be 0 or negative" )
elif capacitance <= 0:
raise ValueError("Capacitance cannot be 0 or negative" )
else:
return (
"Resonant frequency",
float(1 / (2 * pi * (sqrt(inductance * capacitance ))) ),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 18 | 1 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Image
from .base import TaskTemplate
@dataclass(frozen=A__ )
class a__ ( A__ ):
A = field(default='image-classification' , metadata={'include_in_asdict_even_if_is_default': True} )
A = Features({'image': Image()} )
A = Features({'labels': ClassLabel} )
A = "image"
A = "labels"
def __UpperCamelCase ( self : Any,_A : Optional[Any] ):
"""simple docstring"""
if self.label_column not in features:
raise ValueError(F'Column {self.label_column} is not present in features.' )
if not isinstance(features[self.label_column],_A ):
raise ValueError(F'Column {self.label_column} is not a ClassLabel.' )
SCREAMING_SNAKE_CASE_ : Optional[Any] = copy.deepcopy(self )
SCREAMING_SNAKE_CASE_ : Tuple = self.label_schema.copy()
SCREAMING_SNAKE_CASE_ : int = features[self.label_column]
SCREAMING_SNAKE_CASE_ : List[Any] = label_schema
return task_template
@property
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
return {
self.image_column: "image",
self.label_column: "labels",
}
| 18 | def _snake_case ( lowerCAmelCase : list ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = len(lowerCAmelCase )
for i in range(1 , lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : int = collection[i]
SCREAMING_SNAKE_CASE_ : Any = 0
SCREAMING_SNAKE_CASE_ : Tuple = i - 1
while low <= high:
SCREAMING_SNAKE_CASE_ : int = (low + high) // 2
if val < collection[mid]:
SCREAMING_SNAKE_CASE_ : Optional[Any] = mid - 1
else:
SCREAMING_SNAKE_CASE_ : Tuple = mid + 1
for j in range(lowerCAmelCase , lowerCAmelCase , -1 ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = collection[j - 1]
SCREAMING_SNAKE_CASE_ : int = val
return collection
if __name__ == "__main__":
__lowerCamelCase : Dict = input('''Enter numbers separated by a comma:\n''').strip()
__lowerCamelCase : List[str] = [int(item) for item in user_input.split(''',''')]
print(binary_insertion_sort(unsorted))
| 18 | 1 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def _snake_case ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = ArgumentParser("Accelerate CLI tool" , usage="accelerate <command> [<args>]" , allow_abbrev=lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[int] = parser.add_subparsers(help="accelerate command helpers" )
# Register commands
get_config_parser(subparsers=lowerCAmelCase )
env_command_parser(subparsers=lowerCAmelCase )
launch_command_parser(subparsers=lowerCAmelCase )
tpu_command_parser(subparsers=lowerCAmelCase )
test_command_parser(subparsers=lowerCAmelCase )
# Let's go
SCREAMING_SNAKE_CASE_ : Dict = parser.parse_args()
if not hasattr(lowerCAmelCase , "func" ):
parser.print_help()
exit(1 )
# Run
args.func(lowerCAmelCase )
if __name__ == "__main__":
main()
| 18 | from collections.abc import Sequence
from queue import Queue
class a__ :
def __init__( self : int,_A : List[Any],_A : Optional[Any],_A : Optional[int],_A : int=None,_A : List[str]=None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = start
SCREAMING_SNAKE_CASE_ : List[str] = end
SCREAMING_SNAKE_CASE_ : Tuple = val
SCREAMING_SNAKE_CASE_ : List[str] = (start + end) // 2
SCREAMING_SNAKE_CASE_ : Optional[int] = left
SCREAMING_SNAKE_CASE_ : str = right
def __repr__( self : Tuple ):
"""simple docstring"""
return F'SegmentTreeNode(start={self.start}, end={self.end}, val={self.val})'
class a__ :
def __init__( self : Any,_A : Sequence,_A : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = collection
SCREAMING_SNAKE_CASE_ : Optional[int] = function
if self.collection:
SCREAMING_SNAKE_CASE_ : List[str] = self._build_tree(0,len(_A ) - 1 )
def __UpperCamelCase ( self : int,_A : Any,_A : List[Any] ):
"""simple docstring"""
self._update_tree(self.root,_A,_A )
def __UpperCamelCase ( self : str,_A : Any,_A : List[Any] ):
"""simple docstring"""
return self._query_range(self.root,_A,_A )
def __UpperCamelCase ( self : Any,_A : Optional[int],_A : int ):
"""simple docstring"""
if start == end:
return SegmentTreeNode(_A,_A,self.collection[start] )
SCREAMING_SNAKE_CASE_ : List[Any] = (start + end) // 2
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self._build_tree(_A,_A )
SCREAMING_SNAKE_CASE_ : Optional[int] = self._build_tree(mid + 1,_A )
return SegmentTreeNode(_A,_A,self.fn(left.val,right.val ),_A,_A )
def __UpperCamelCase ( self : int,_A : int,_A : Tuple,_A : Dict ):
"""simple docstring"""
if node.start == i and node.end == i:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = val
return
if i <= node.mid:
self._update_tree(node.left,_A,_A )
else:
self._update_tree(node.right,_A,_A )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.fn(node.left.val,node.right.val )
def __UpperCamelCase ( self : str,_A : List[str],_A : Optional[int],_A : Optional[Any] ):
"""simple docstring"""
if node.start == i and node.end == j:
return node.val
if i <= node.mid:
if j <= node.mid:
# range in left child tree
return self._query_range(node.left,_A,_A )
else:
# range in left child tree and right child tree
return self.fn(
self._query_range(node.left,_A,node.mid ),self._query_range(node.right,node.mid + 1,_A ),)
else:
# range in right child tree
return self._query_range(node.right,_A,_A )
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
if self.root is not None:
SCREAMING_SNAKE_CASE_ : int = Queue()
queue.put(self.root )
while not queue.empty():
SCREAMING_SNAKE_CASE_ : Tuple = queue.get()
yield node
if node.left is not None:
queue.put(node.left )
if node.right is not None:
queue.put(node.right )
if __name__ == "__main__":
import operator
for fn in [operator.add, max, min]:
print('''*''' * 50)
__lowerCamelCase : int = SegmentTree([2, 1, 5, 3, 4], fn)
for node in arr.traverse():
print(node)
print()
arr.update(1, 5)
for node in arr.traverse():
print(node)
print()
print(arr.query_range(3, 4)) # 7
print(arr.query_range(2, 2)) # 5
print(arr.query_range(1, 3)) # 13
print()
| 18 | 1 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast
@require_vision
class a__ ( unittest.TestCase ):
def __UpperCamelCase ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE_ : Optional[int] = BlipImageProcessor()
SCREAMING_SNAKE_CASE_ : Tuple = BertTokenizer.from_pretrained("hf-internal-testing/tiny-random-BertModel" )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = BlipProcessor(_A,_A )
processor.save_pretrained(self.tmpdirname )
def __UpperCamelCase ( self : Dict,**_A : str ):
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname,**_A ).tokenizer
def __UpperCamelCase ( self : Any,**_A : int ):
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname,**_A ).image_processor
def __UpperCamelCase ( self : int ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def __UpperCamelCase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = [np.random.randint(255,size=(3, 30, 400),dtype=np.uinta )]
SCREAMING_SNAKE_CASE_ : List[Any] = [Image.fromarray(np.moveaxis(_A,0,-1 ) ) for x in image_inputs]
return image_inputs
def __UpperCamelCase ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = BlipProcessor(tokenizer=self.get_tokenizer(),image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.get_tokenizer(bos_token="(BOS)",eos_token="(EOS)" )
SCREAMING_SNAKE_CASE_ : Dict = self.get_image_processor(do_normalize=_A,padding_value=1.0 )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = BlipProcessor.from_pretrained(
self.tmpdirname,bos_token="(BOS)",eos_token="(EOS)",do_normalize=_A,padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab(),tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer,_A )
self.assertEqual(processor.image_processor.to_json_string(),image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor,_A )
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.get_image_processor()
SCREAMING_SNAKE_CASE_ : List[str] = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ : List[Any] = BlipProcessor(tokenizer=_A,image_processor=_A )
SCREAMING_SNAKE_CASE_ : Optional[int] = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = image_processor(_A,return_tensors="np" )
SCREAMING_SNAKE_CASE_ : Tuple = processor(images=_A,return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum(),input_processor[key].sum(),delta=1E-2 )
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = self.get_image_processor()
SCREAMING_SNAKE_CASE_ : str = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ : List[str] = BlipProcessor(tokenizer=_A,image_processor=_A )
SCREAMING_SNAKE_CASE_ : List[str] = "lower newer"
SCREAMING_SNAKE_CASE_ : int = processor(text=_A )
SCREAMING_SNAKE_CASE_ : List[Any] = tokenizer(_A,return_token_type_ids=_A )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key],encoded_processor[key] )
def __UpperCamelCase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.get_image_processor()
SCREAMING_SNAKE_CASE_ : str = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ : str = BlipProcessor(tokenizer=_A,image_processor=_A )
SCREAMING_SNAKE_CASE_ : Tuple = "lower newer"
SCREAMING_SNAKE_CASE_ : Optional[int] = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE_ : Any = processor(text=_A,images=_A )
self.assertListEqual(list(inputs.keys() ),["pixel_values", "input_ids", "attention_mask"] )
# test if it raises when no input is passed
with pytest.raises(_A ):
processor()
def __UpperCamelCase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = self.get_image_processor()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ : str = BlipProcessor(tokenizer=_A,image_processor=_A )
SCREAMING_SNAKE_CASE_ : str = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
SCREAMING_SNAKE_CASE_ : List[str] = processor.batch_decode(_A )
SCREAMING_SNAKE_CASE_ : Tuple = tokenizer.batch_decode(_A )
self.assertListEqual(_A,_A )
def __UpperCamelCase ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = self.get_image_processor()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ : int = BlipProcessor(tokenizer=_A,image_processor=_A )
SCREAMING_SNAKE_CASE_ : str = "lower newer"
SCREAMING_SNAKE_CASE_ : Tuple = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE_ : Optional[int] = processor(text=_A,images=_A )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ),["pixel_values", "input_ids", "attention_mask"] )
| 18 | def _snake_case ( lowerCAmelCase : int , lowerCAmelCase : int ):
"""simple docstring"""
while b:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int = b, a % b
return a
def _snake_case ( lowerCAmelCase : int , lowerCAmelCase : int ):
"""simple docstring"""
return a if b == 0 else euclidean_gcd_recursive(lowerCAmelCase , a % b )
def _snake_case ( ):
"""simple docstring"""
print(f'euclidean_gcd(3, 5) = {euclidean_gcd(3 , 5 )}' )
print(f'euclidean_gcd(5, 3) = {euclidean_gcd(5 , 3 )}' )
print(f'euclidean_gcd(1, 3) = {euclidean_gcd(1 , 3 )}' )
print(f'euclidean_gcd(3, 6) = {euclidean_gcd(3 , 6 )}' )
print(f'euclidean_gcd(6, 3) = {euclidean_gcd(6 , 3 )}' )
print(f'euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 , 5 )}' )
print(f'euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 , 3 )}' )
print(f'euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 , 3 )}' )
print(f'euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 , 6 )}' )
print(f'euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 , 3 )}' )
if __name__ == "__main__":
main()
| 18 | 1 |
from __future__ import annotations
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTForImageClassification, TFViTModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class a__ :
def __init__( self : Any,_A : Union[str, Any],_A : Tuple=13,_A : List[str]=30,_A : List[Any]=2,_A : Optional[int]=3,_A : str=True,_A : Optional[Any]=True,_A : Optional[int]=32,_A : int=2,_A : List[Any]=4,_A : str=37,_A : Optional[Any]="gelu",_A : List[str]=0.1,_A : Dict=0.1,_A : int=10,_A : List[Any]=0.02,_A : int=3,_A : Optional[Any]=None,):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = parent
SCREAMING_SNAKE_CASE_ : str = batch_size
SCREAMING_SNAKE_CASE_ : Any = image_size
SCREAMING_SNAKE_CASE_ : Optional[int] = patch_size
SCREAMING_SNAKE_CASE_ : Optional[int] = num_channels
SCREAMING_SNAKE_CASE_ : Any = is_training
SCREAMING_SNAKE_CASE_ : Optional[Any] = use_labels
SCREAMING_SNAKE_CASE_ : str = hidden_size
SCREAMING_SNAKE_CASE_ : Optional[Any] = num_hidden_layers
SCREAMING_SNAKE_CASE_ : Tuple = num_attention_heads
SCREAMING_SNAKE_CASE_ : List[Any] = intermediate_size
SCREAMING_SNAKE_CASE_ : Dict = hidden_act
SCREAMING_SNAKE_CASE_ : Optional[int] = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ : str = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : Optional[int] = type_sequence_label_size
SCREAMING_SNAKE_CASE_ : str = initializer_range
SCREAMING_SNAKE_CASE_ : Optional[int] = scope
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
SCREAMING_SNAKE_CASE_ : str = (image_size // patch_size) ** 2
SCREAMING_SNAKE_CASE_ : int = num_patches + 1
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE_ : Tuple = None
if self.use_labels:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ids_tensor([self.batch_size],self.type_sequence_label_size )
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.get_config()
return config, pixel_values, labels
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
return ViTConfig(
image_size=self.image_size,patch_size=self.patch_size,num_channels=self.num_channels,hidden_size=self.hidden_size,num_hidden_layers=self.num_hidden_layers,num_attention_heads=self.num_attention_heads,intermediate_size=self.intermediate_size,hidden_act=self.hidden_act,hidden_dropout_prob=self.hidden_dropout_prob,attention_probs_dropout_prob=self.attention_probs_dropout_prob,is_decoder=_A,initializer_range=self.initializer_range,)
def __UpperCamelCase ( self : Union[str, Any],_A : Optional[int],_A : List[Any],_A : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = TFViTModel(config=_A )
SCREAMING_SNAKE_CASE_ : List[Any] = model(_A,training=_A )
self.parent.assertEqual(result.last_hidden_state.shape,(self.batch_size, self.seq_length, self.hidden_size) )
# Test with an image with different size than the one specified in config.
SCREAMING_SNAKE_CASE_ : List[Any] = self.image_size // 2
SCREAMING_SNAKE_CASE_ : Tuple = pixel_values[:, :, :image_size, :image_size]
SCREAMING_SNAKE_CASE_ : Optional[int] = model(_A,interpolate_pos_encoding=_A,training=_A )
SCREAMING_SNAKE_CASE_ : Optional[int] = (image_size // self.patch_size) ** 2 + 1
self.parent.assertEqual(result.last_hidden_state.shape,(self.batch_size, seq_length, self.hidden_size) )
def __UpperCamelCase ( self : Optional[Any],_A : str,_A : Optional[int],_A : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = self.type_sequence_label_size
SCREAMING_SNAKE_CASE_ : List[str] = TFViTForImageClassification(_A )
SCREAMING_SNAKE_CASE_ : List[str] = model(_A,labels=_A,training=_A )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.type_sequence_label_size) )
# Test with an image with different size than the one specified in config.
SCREAMING_SNAKE_CASE_ : List[str] = self.image_size // 2
SCREAMING_SNAKE_CASE_ : Tuple = pixel_values[:, :, :image_size, :image_size]
SCREAMING_SNAKE_CASE_ : Optional[Any] = model(_A,interpolate_pos_encoding=_A,training=_A )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.type_sequence_label_size) )
# test greyscale images
SCREAMING_SNAKE_CASE_ : List[str] = 1
SCREAMING_SNAKE_CASE_ : List[Any] = TFViTForImageClassification(_A )
SCREAMING_SNAKE_CASE_ : Tuple = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE_ : Any = model(_A )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.type_sequence_label_size) )
def __UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[str] = config_and_inputs
SCREAMING_SNAKE_CASE_ : int = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class a__ ( A__ , A__ , unittest.TestCase ):
A = (TFViTModel, TFViTForImageClassification) if is_tf_available() else ()
A = (
{'feature-extraction': TFViTModel, 'image-classification': TFViTForImageClassification}
if is_tf_available()
else {}
)
A = False
A = False
A = False
def __UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = TFViTModelTester(self )
SCREAMING_SNAKE_CASE_ : Tuple = ConfigTester(self,config_class=_A,has_text_modality=_A,hidden_size=37 )
def __UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="ViT does not use inputs_embeds" )
def __UpperCamelCase ( self : Dict ):
"""simple docstring"""
pass
@unittest.skip(reason="ViT does not use inputs_embeds" )
def __UpperCamelCase ( self : int ):
"""simple docstring"""
pass
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = model_class(_A )
self.assertIsInstance(model.get_input_embeddings(),(tf.keras.layers.Layer) )
SCREAMING_SNAKE_CASE_ : int = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_A,tf.keras.layers.Layer ) )
def __UpperCamelCase ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ : Tuple = model_class(_A )
SCREAMING_SNAKE_CASE_ : Optional[int] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE_ : Optional[Any] = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ["pixel_values"]
self.assertListEqual(arg_names[:1],_A )
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def __UpperCamelCase ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_A )
@slow
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = TFViTModel.from_pretrained("google/vit-base-patch16-224" )
self.assertIsNotNone(_A )
def _snake_case ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class a__ ( unittest.TestCase ):
@cached_property
def __UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
return ViTImageProcessor.from_pretrained("google/vit-base-patch16-224" ) if is_vision_available() else None
@slow
def __UpperCamelCase ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = TFViTForImageClassification.from_pretrained("google/vit-base-patch16-224" )
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.default_image_processor
SCREAMING_SNAKE_CASE_ : Tuple = prepare_img()
SCREAMING_SNAKE_CASE_ : Dict = image_processor(images=_A,return_tensors="tf" )
# forward pass
SCREAMING_SNAKE_CASE_ : Dict = model(**_A )
# verify the logits
SCREAMING_SNAKE_CASE_ : int = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape,_A )
SCREAMING_SNAKE_CASE_ : int = tf.constant([-0.2744, 0.8215, -0.0836] )
tf.debugging.assert_near(outputs.logits[0, :3],_A,atol=1E-4 )
| 18 | from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__lowerCamelCase : Dict = {'''configuration_vit_mae''': ['''VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTMAEConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Any = [
'''VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ViTMAEForPreTraining''',
'''ViTMAELayer''',
'''ViTMAEModel''',
'''ViTMAEPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Optional[Any] = [
'''TFViTMAEForPreTraining''',
'''TFViTMAEModel''',
'''TFViTMAEPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_mae import (
VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMAEForPreTraining,
ViTMAELayer,
ViTMAEModel,
ViTMAEPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel
else:
import sys
__lowerCamelCase : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 18 | 1 |
import copy
import random
from transformers import CLIPTokenizer
class a__ ( A__ ):
def __init__( self : Tuple,*_A : int,**_A : str ):
"""simple docstring"""
super().__init__(*_A,**_A )
SCREAMING_SNAKE_CASE_ : Any = {}
def __UpperCamelCase ( self : int,_A : Tuple,*_A : Optional[int],**_A : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = super().add_tokens(_A,*_A,**_A )
if num_added_tokens == 0:
raise ValueError(
F'The tokenizer already contains the token {placeholder_token}. Please pass a different'
" `placeholder_token` that is not already in the tokenizer." )
def __UpperCamelCase ( self : int,_A : Any,*_A : Union[str, Any],_A : Union[str, Any]=1,**_A : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = []
if num_vec_per_token == 1:
self.try_adding_tokens(_A,*_A,**_A )
output.append(_A )
else:
SCREAMING_SNAKE_CASE_ : int = []
for i in range(_A ):
SCREAMING_SNAKE_CASE_ : Tuple = placeholder_token + F'_{i}'
self.try_adding_tokens(_A,*_A,**_A )
output.append(_A )
# handle cases where there is a new placeholder token that contains the current placeholder token but is larger
for token in self.token_map:
if token in placeholder_token:
raise ValueError(
F'The tokenizer already has placeholder token {token} that can get confused with'
F' {placeholder_token}keep placeholder tokens independent' )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = output
def __UpperCamelCase ( self : Any,_A : List[Any],_A : str=False,_A : List[Any]=1.0 ):
"""simple docstring"""
if isinstance(_A,_A ):
SCREAMING_SNAKE_CASE_ : Dict = []
for i in range(len(_A ) ):
output.append(self.replace_placeholder_tokens_in_text(text[i],vector_shuffle=_A ) )
return output
for placeholder_token in self.token_map:
if placeholder_token in text:
SCREAMING_SNAKE_CASE_ : str = self.token_map[placeholder_token]
SCREAMING_SNAKE_CASE_ : Dict = tokens[: 1 + int(len(_A ) * prop_tokens_to_load )]
if vector_shuffle:
SCREAMING_SNAKE_CASE_ : List[str] = copy.copy(_A )
random.shuffle(_A )
SCREAMING_SNAKE_CASE_ : List[str] = text.replace(_A," ".join(_A ) )
return text
def __call__( self : List[Any],_A : Any,*_A : List[str],_A : List[str]=False,_A : Dict=1.0,**_A : Dict ):
"""simple docstring"""
return super().__call__(
self.replace_placeholder_tokens_in_text(
_A,vector_shuffle=_A,prop_tokens_to_load=_A ),*_A,**_A,)
def __UpperCamelCase ( self : Any,_A : Tuple,*_A : Dict,_A : str=False,_A : List[str]=1.0,**_A : Union[str, Any] ):
"""simple docstring"""
return super().encode(
self.replace_placeholder_tokens_in_text(
_A,vector_shuffle=_A,prop_tokens_to_load=_A ),*_A,**_A,)
| 18 | import argparse
from pathlib import Path
from transformers import AutoConfig, AutoTokenizer, RagConfig, RagSequenceForGeneration, RagTokenForGeneration
def _snake_case ( lowerCAmelCase : int , lowerCAmelCase : str , lowerCAmelCase : str , lowerCAmelCase : Path , lowerCAmelCase : str = None , lowerCAmelCase : str = None , lowerCAmelCase : str = None , ):
"""simple docstring"""
if config_name_or_path is None:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = "facebook/rag-token-base" if model_type == "rag_token" else "facebook/rag-sequence-base"
if generator_tokenizer_name_or_path is None:
SCREAMING_SNAKE_CASE_ : Dict = generator_name_or_path
if question_encoder_tokenizer_name_or_path is None:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = question_encoder_name_or_path
SCREAMING_SNAKE_CASE_ : Union[str, Any] = RagTokenForGeneration if model_type == "rag_token" else RagSequenceForGeneration
# Save model.
SCREAMING_SNAKE_CASE_ : List[Any] = RagConfig.from_pretrained(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Tuple = AutoConfig.from_pretrained(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : int = AutoConfig.from_pretrained(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = gen_config
SCREAMING_SNAKE_CASE_ : Optional[Any] = question_encoder_config
SCREAMING_SNAKE_CASE_ : Dict = model_class.from_pretrained_question_encoder_generator(
lowerCAmelCase , lowerCAmelCase , config=lowerCAmelCase )
rag_model.save_pretrained(lowerCAmelCase )
# Sanity check.
model_class.from_pretrained(lowerCAmelCase )
# Save tokenizers.
SCREAMING_SNAKE_CASE_ : Optional[Any] = AutoTokenizer.from_pretrained(lowerCAmelCase )
gen_tokenizer.save_pretrained(dest_dir / "generator_tokenizer/" )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = AutoTokenizer.from_pretrained(lowerCAmelCase )
question_encoder_tokenizer.save_pretrained(dest_dir / "question_encoder_tokenizer/" )
if __name__ == "__main__":
__lowerCamelCase : List[Any] = argparse.ArgumentParser()
parser.add_argument(
'''--model_type''',
choices=['''rag_sequence''', '''rag_token'''],
required=True,
type=str,
help='''RAG model type: rag_sequence, rag_token''',
)
parser.add_argument('''--dest''', type=str, required=True, help='''Path to the output checkpoint directory.''')
parser.add_argument('''--generator_name_or_path''', type=str, required=True, help='''Generator model identifier''')
parser.add_argument(
'''--question_encoder_name_or_path''', type=str, required=True, help='''Question encoder model identifier'''
)
parser.add_argument(
'''--generator_tokenizer_name_or_path''',
type=str,
help='''Generator tokenizer identifier, if not specified, resolves to ``generator_name_or_path``''',
)
parser.add_argument(
'''--question_encoder_tokenizer_name_or_path''',
type=str,
help='''Question encoder tokenizer identifier, if not specified, resolves to ``question_encoder_name_or_path``''',
)
parser.add_argument(
'''--config_name_or_path''',
type=str,
help=(
'''Identifier of the model config to use, if not provided, resolves to a base config for a given'''
''' ``model_type``'''
),
)
__lowerCamelCase : str = parser.parse_args()
__lowerCamelCase : int = Path(args.dest)
dest_dir.mkdir(exist_ok=True)
consolidate(
args.model_type,
args.generator_name_or_path,
args.question_encoder_name_or_path,
dest_dir,
args.config_name_or_path,
args.generator_tokenizer_name_or_path,
args.question_encoder_tokenizer_name_or_path,
)
| 18 | 1 |
from __future__ import annotations
# This is the precision for this function which can be altered.
# It is recommended for users to keep this number greater than or equal to 10.
__lowerCamelCase : Dict = 10
def _snake_case ( lowerCAmelCase : int , lowerCAmelCase : int , lowerCAmelCase : list[int] , lowerCAmelCase : int ):
"""simple docstring"""
for i in range(lowerCAmelCase , lowerCAmelCase ):
if array[i] == target:
return i
return -1
def _snake_case ( lowerCAmelCase : list[int] , lowerCAmelCase : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = 0
SCREAMING_SNAKE_CASE_ : str = len(lowerCAmelCase )
while left <= right:
if right - left < precision:
return lin_search(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Dict = (left + right) // 3 + 1
SCREAMING_SNAKE_CASE_ : Dict = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
SCREAMING_SNAKE_CASE_ : Dict = one_third - 1
elif array[two_third] < target:
SCREAMING_SNAKE_CASE_ : Any = two_third + 1
else:
SCREAMING_SNAKE_CASE_ : List[str] = one_third + 1
SCREAMING_SNAKE_CASE_ : int = two_third - 1
else:
return -1
def _snake_case ( lowerCAmelCase : int , lowerCAmelCase : int , lowerCAmelCase : list[int] , lowerCAmelCase : int ):
"""simple docstring"""
if left < right:
if right - left < precision:
return lin_search(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : str = (left + right) // 3 + 1
SCREAMING_SNAKE_CASE_ : Dict = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
return rec_ternary_search(lowerCAmelCase , one_third - 1 , lowerCAmelCase , lowerCAmelCase )
elif array[two_third] < target:
return rec_ternary_search(two_third + 1 , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
else:
return rec_ternary_search(one_third + 1 , two_third - 1 , lowerCAmelCase , lowerCAmelCase )
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
__lowerCamelCase : Optional[Any] = input('''Enter numbers separated by comma:\n''').strip()
__lowerCamelCase : str = [int(item.strip()) for item in user_input.split(''',''')]
assert collection == sorted(collection), f"List must be ordered.\n{collection}."
__lowerCamelCase : str = int(input('''Enter the number to be found in the list:\n''').strip())
__lowerCamelCase : int = ite_ternary_search(collection, target)
__lowerCamelCase : Tuple = rec_ternary_search(0, len(collection) - 1, collection, target)
if resulta != -1:
print(f'''Iterative search: {target} found at positions: {resulta}''')
print(f'''Recursive search: {target} found at positions: {resulta}''')
else:
print('''Not found''')
| 18 | import unittest
import numpy as np
from transformers.testing_utils import require_flax, require_tf, require_torch
from transformers.utils import (
expand_dims,
flatten_dict,
is_flax_available,
is_tf_available,
is_torch_available,
reshape,
squeeze,
transpose,
)
if is_flax_available():
import jax.numpy as jnp
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
class a__ ( unittest.TestCase ):
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = {
"task_specific_params": {
"summarization": {"length_penalty": 1.0, "max_length": 128, "min_length": 12, "num_beams": 4},
"summarization_cnn": {"length_penalty": 2.0, "max_length": 142, "min_length": 56, "num_beams": 4},
"summarization_xsum": {"length_penalty": 1.0, "max_length": 62, "min_length": 11, "num_beams": 6},
}
}
SCREAMING_SNAKE_CASE_ : Any = {
"task_specific_params.summarization.length_penalty": 1.0,
"task_specific_params.summarization.max_length": 128,
"task_specific_params.summarization.min_length": 12,
"task_specific_params.summarization.num_beams": 4,
"task_specific_params.summarization_cnn.length_penalty": 2.0,
"task_specific_params.summarization_cnn.max_length": 142,
"task_specific_params.summarization_cnn.min_length": 56,
"task_specific_params.summarization_cnn.num_beams": 4,
"task_specific_params.summarization_xsum.length_penalty": 1.0,
"task_specific_params.summarization_xsum.max_length": 62,
"task_specific_params.summarization_xsum.min_length": 11,
"task_specific_params.summarization_xsum.num_beams": 6,
}
self.assertEqual(flatten_dict(_A ),_A )
def __UpperCamelCase ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = np.random.randn(3,4 )
self.assertTrue(np.allclose(transpose(_A ),x.transpose() ) )
SCREAMING_SNAKE_CASE_ : Tuple = np.random.randn(3,4,5 )
self.assertTrue(np.allclose(transpose(_A,axes=(1, 2, 0) ),x.transpose((1, 2, 0) ) ) )
@require_torch
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = np.random.randn(3,4 )
SCREAMING_SNAKE_CASE_ : Any = torch.tensor(_A )
self.assertTrue(np.allclose(transpose(_A ),transpose(_A ).numpy() ) )
SCREAMING_SNAKE_CASE_ : Dict = np.random.randn(3,4,5 )
SCREAMING_SNAKE_CASE_ : Tuple = torch.tensor(_A )
self.assertTrue(np.allclose(transpose(_A,axes=(1, 2, 0) ),transpose(_A,axes=(1, 2, 0) ).numpy() ) )
@require_tf
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = np.random.randn(3,4 )
SCREAMING_SNAKE_CASE_ : Optional[Any] = tf.constant(_A )
self.assertTrue(np.allclose(transpose(_A ),transpose(_A ).numpy() ) )
SCREAMING_SNAKE_CASE_ : List[str] = np.random.randn(3,4,5 )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tf.constant(_A )
self.assertTrue(np.allclose(transpose(_A,axes=(1, 2, 0) ),transpose(_A,axes=(1, 2, 0) ).numpy() ) )
@require_flax
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = np.random.randn(3,4 )
SCREAMING_SNAKE_CASE_ : Tuple = jnp.array(_A )
self.assertTrue(np.allclose(transpose(_A ),np.asarray(transpose(_A ) ) ) )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = np.random.randn(3,4,5 )
SCREAMING_SNAKE_CASE_ : List[Any] = jnp.array(_A )
self.assertTrue(np.allclose(transpose(_A,axes=(1, 2, 0) ),np.asarray(transpose(_A,axes=(1, 2, 0) ) ) ) )
def __UpperCamelCase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = np.random.randn(3,4 )
self.assertTrue(np.allclose(reshape(_A,(4, 3) ),np.reshape(_A,(4, 3) ) ) )
SCREAMING_SNAKE_CASE_ : Any = np.random.randn(3,4,5 )
self.assertTrue(np.allclose(reshape(_A,(12, 5) ),np.reshape(_A,(12, 5) ) ) )
@require_torch
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = np.random.randn(3,4 )
SCREAMING_SNAKE_CASE_ : List[str] = torch.tensor(_A )
self.assertTrue(np.allclose(reshape(_A,(4, 3) ),reshape(_A,(4, 3) ).numpy() ) )
SCREAMING_SNAKE_CASE_ : Dict = np.random.randn(3,4,5 )
SCREAMING_SNAKE_CASE_ : int = torch.tensor(_A )
self.assertTrue(np.allclose(reshape(_A,(12, 5) ),reshape(_A,(12, 5) ).numpy() ) )
@require_tf
def __UpperCamelCase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = np.random.randn(3,4 )
SCREAMING_SNAKE_CASE_ : Optional[Any] = tf.constant(_A )
self.assertTrue(np.allclose(reshape(_A,(4, 3) ),reshape(_A,(4, 3) ).numpy() ) )
SCREAMING_SNAKE_CASE_ : int = np.random.randn(3,4,5 )
SCREAMING_SNAKE_CASE_ : Any = tf.constant(_A )
self.assertTrue(np.allclose(reshape(_A,(12, 5) ),reshape(_A,(12, 5) ).numpy() ) )
@require_flax
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = np.random.randn(3,4 )
SCREAMING_SNAKE_CASE_ : int = jnp.array(_A )
self.assertTrue(np.allclose(reshape(_A,(4, 3) ),np.asarray(reshape(_A,(4, 3) ) ) ) )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = np.random.randn(3,4,5 )
SCREAMING_SNAKE_CASE_ : Tuple = jnp.array(_A )
self.assertTrue(np.allclose(reshape(_A,(12, 5) ),np.asarray(reshape(_A,(12, 5) ) ) ) )
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = np.random.randn(1,3,4 )
self.assertTrue(np.allclose(squeeze(_A ),np.squeeze(_A ) ) )
SCREAMING_SNAKE_CASE_ : Any = np.random.randn(1,4,1,5 )
self.assertTrue(np.allclose(squeeze(_A,axis=2 ),np.squeeze(_A,axis=2 ) ) )
@require_torch
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = np.random.randn(1,3,4 )
SCREAMING_SNAKE_CASE_ : Any = torch.tensor(_A )
self.assertTrue(np.allclose(squeeze(_A ),squeeze(_A ).numpy() ) )
SCREAMING_SNAKE_CASE_ : Any = np.random.randn(1,4,1,5 )
SCREAMING_SNAKE_CASE_ : Dict = torch.tensor(_A )
self.assertTrue(np.allclose(squeeze(_A,axis=2 ),squeeze(_A,axis=2 ).numpy() ) )
@require_tf
def __UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = np.random.randn(1,3,4 )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tf.constant(_A )
self.assertTrue(np.allclose(squeeze(_A ),squeeze(_A ).numpy() ) )
SCREAMING_SNAKE_CASE_ : Any = np.random.randn(1,4,1,5 )
SCREAMING_SNAKE_CASE_ : Optional[int] = tf.constant(_A )
self.assertTrue(np.allclose(squeeze(_A,axis=2 ),squeeze(_A,axis=2 ).numpy() ) )
@require_flax
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = np.random.randn(1,3,4 )
SCREAMING_SNAKE_CASE_ : List[str] = jnp.array(_A )
self.assertTrue(np.allclose(squeeze(_A ),np.asarray(squeeze(_A ) ) ) )
SCREAMING_SNAKE_CASE_ : str = np.random.randn(1,4,1,5 )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = jnp.array(_A )
self.assertTrue(np.allclose(squeeze(_A,axis=2 ),np.asarray(squeeze(_A,axis=2 ) ) ) )
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = np.random.randn(3,4 )
self.assertTrue(np.allclose(expand_dims(_A,axis=1 ),np.expand_dims(_A,axis=1 ) ) )
@require_torch
def __UpperCamelCase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = np.random.randn(3,4 )
SCREAMING_SNAKE_CASE_ : List[Any] = torch.tensor(_A )
self.assertTrue(np.allclose(expand_dims(_A,axis=1 ),expand_dims(_A,axis=1 ).numpy() ) )
@require_tf
def __UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = np.random.randn(3,4 )
SCREAMING_SNAKE_CASE_ : Optional[int] = tf.constant(_A )
self.assertTrue(np.allclose(expand_dims(_A,axis=1 ),expand_dims(_A,axis=1 ).numpy() ) )
@require_flax
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = np.random.randn(3,4 )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = jnp.array(_A )
self.assertTrue(np.allclose(expand_dims(_A,axis=1 ),np.asarray(expand_dims(_A,axis=1 ) ) ) )
| 18 | 1 |
import argparse
import math
import os
from copy import deepcopy
import torch
from audio_diffusion.models import DiffusionAttnUnetaD
from diffusion import sampling
from torch import nn
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
__lowerCamelCase : int = {
'''gwf-440k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/gwf-440k.ckpt''',
'''sample_rate''': 4_80_00,
'''sample_size''': 6_55_36,
},
'''jmann-small-190k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/jmann-small-190k.ckpt''',
'''sample_rate''': 4_80_00,
'''sample_size''': 6_55_36,
},
'''jmann-large-580k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/jmann-large-580k.ckpt''',
'''sample_rate''': 4_80_00,
'''sample_size''': 13_10_72,
},
'''maestro-uncond-150k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/maestro-uncond-150k.ckpt''',
'''sample_rate''': 1_60_00,
'''sample_size''': 6_55_36,
},
'''unlocked-uncond-250k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/unlocked-uncond-250k.ckpt''',
'''sample_rate''': 1_60_00,
'''sample_size''': 6_55_36,
},
'''honk-140k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/honk-140k.ckpt''',
'''sample_rate''': 1_60_00,
'''sample_size''': 6_55_36,
},
}
def _snake_case ( lowerCAmelCase : Tuple , lowerCAmelCase : str ):
"""simple docstring"""
return torch.atana(lowerCAmelCase , lowerCAmelCase ) / math.pi * 2
def _snake_case ( lowerCAmelCase : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = torch.sin(t * math.pi / 2 ) ** 2
SCREAMING_SNAKE_CASE_ : Optional[int] = (1 - sigma**2) ** 0.5
return alpha_sigma_to_t(lowerCAmelCase , lowerCAmelCase )
class a__ ( A__ ):
pass
class a__ ( nn.Module ):
def __init__( self : Any,_A : List[Any] ):
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE_ : int = DiffusionAttnUnetaD(_A,n_attn_layers=4 )
SCREAMING_SNAKE_CASE_ : List[Any] = deepcopy(self.diffusion )
SCREAMING_SNAKE_CASE_ : int = torch.quasirandom.SobolEngine(1,scramble=_A )
def _snake_case ( lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = MODELS_MAP[model_name]["url"]
os.system(f'wget {url} ./' )
return f'./{model_name}.ckpt'
__lowerCamelCase : List[Any] = {
'''1''': '''resnets.0''',
'''2''': '''attentions.0''',
'''3''': '''resnets.1''',
'''4''': '''attentions.1''',
'''5''': '''resnets.2''',
'''6''': '''attentions.2''',
}
__lowerCamelCase : int = {
'''8''': '''resnets.0''',
'''9''': '''attentions.0''',
'''10''': '''resnets.1''',
'''11''': '''attentions.1''',
'''12''': '''resnets.2''',
'''13''': '''attentions.2''',
}
__lowerCamelCase : List[Any] = {
'''1''': '''resnets.0''',
'''2''': '''attentions.0''',
'''3''': '''resnets.1''',
'''4''': '''attentions.1''',
'''5''': '''resnets.2''',
'''6''': '''attentions.2''',
'''8''': '''resnets.3''',
'''9''': '''attentions.3''',
'''10''': '''resnets.4''',
'''11''': '''attentions.4''',
'''12''': '''resnets.5''',
'''13''': '''attentions.5''',
}
__lowerCamelCase : Optional[int] = {
'''0''': '''resnets.0''',
'''1''': '''resnets.1''',
'''2''': '''resnets.2''',
'''4''': '''resnets.0''',
'''5''': '''resnets.1''',
'''6''': '''resnets.2''',
}
__lowerCamelCase : Tuple = {
'''skip''': '''conv_skip''',
'''main.0''': '''conv_1''',
'''main.1''': '''group_norm_1''',
'''main.3''': '''conv_2''',
'''main.4''': '''group_norm_2''',
}
__lowerCamelCase : Any = {
'''norm''': '''group_norm''',
'''qkv_proj''': ['''query''', '''key''', '''value'''],
'''out_proj''': ['''proj_attn'''],
}
def _snake_case ( lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
if name.startswith("skip" ):
return name.replace("skip" , RES_CONV_MAP["skip"] )
# name has to be of format main.{digit}
if not name.startswith("main." ):
raise ValueError(f'ResConvBlock error with {name}' )
return name.replace(name[:6] , RES_CONV_MAP[name[:6]] )
def _snake_case ( lowerCAmelCase : Any ):
"""simple docstring"""
for key, value in ATTN_MAP.items():
if name.startswith(lowerCAmelCase ) and not isinstance(lowerCAmelCase , lowerCAmelCase ):
return name.replace(lowerCAmelCase , lowerCAmelCase )
elif name.startswith(lowerCAmelCase ):
return [name.replace(lowerCAmelCase , lowerCAmelCase ) for v in value]
raise ValueError(f'Attn error with {name}' )
def _snake_case ( lowerCAmelCase : Tuple , lowerCAmelCase : str=1_3 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = input_string
if string.split("." )[0] == "timestep_embed":
return string.replace("timestep_embed" , "time_proj" )
SCREAMING_SNAKE_CASE_ : Tuple = 0
if string.startswith("net.3." ):
depth += 1
SCREAMING_SNAKE_CASE_ : Dict = string[6:]
elif string.startswith("net." ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = string[4:]
while string.startswith("main.7." ):
depth += 1
SCREAMING_SNAKE_CASE_ : Union[str, Any] = string[7:]
if string.startswith("main." ):
SCREAMING_SNAKE_CASE_ : Tuple = string[5:]
# mid block
if string[:2].isdigit():
SCREAMING_SNAKE_CASE_ : Any = string[:2]
SCREAMING_SNAKE_CASE_ : Optional[int] = string[2:]
else:
SCREAMING_SNAKE_CASE_ : int = string[0]
SCREAMING_SNAKE_CASE_ : Dict = string[1:]
if depth == max_depth:
SCREAMING_SNAKE_CASE_ : List[str] = MID_NUM_TO_LAYER[layer_num]
SCREAMING_SNAKE_CASE_ : Tuple = "mid_block"
elif depth > 0 and int(lowerCAmelCase ) < 7:
SCREAMING_SNAKE_CASE_ : Dict = DOWN_NUM_TO_LAYER[layer_num]
SCREAMING_SNAKE_CASE_ : Optional[Any] = f'down_blocks.{depth}'
elif depth > 0 and int(lowerCAmelCase ) > 7:
SCREAMING_SNAKE_CASE_ : List[Any] = UP_NUM_TO_LAYER[layer_num]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = f'up_blocks.{max_depth - depth - 1}'
elif depth == 0:
SCREAMING_SNAKE_CASE_ : Tuple = DEPTH_0_TO_LAYER[layer_num]
SCREAMING_SNAKE_CASE_ : int = f'up_blocks.{max_depth - 1}' if int(lowerCAmelCase ) > 3 else "down_blocks.0"
if not string_left.startswith("." ):
raise ValueError(f'Naming error with {input_string} and string_left: {string_left}.' )
SCREAMING_SNAKE_CASE_ : List[str] = string_left[1:]
if "resnets" in new_layer:
SCREAMING_SNAKE_CASE_ : str = convert_resconv_naming(lowerCAmelCase )
elif "attentions" in new_layer:
SCREAMING_SNAKE_CASE_ : str = convert_attn_naming(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[int] = new_string_left
if not isinstance(lowerCAmelCase , lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : int = prefix + "." + new_layer + "." + string_left
else:
SCREAMING_SNAKE_CASE_ : List[str] = [prefix + "." + new_layer + "." + s for s in string_left]
return new_string
def _snake_case ( lowerCAmelCase : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = {}
for k, v in state_dict.items():
if k.endswith("kernel" ):
# up- and downsample layers, don't have trainable weights
continue
SCREAMING_SNAKE_CASE_ : List[Any] = rename(lowerCAmelCase )
# check if we need to transform from Conv => Linear for attention
if isinstance(lowerCAmelCase , lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : List[str] = transform_conv_attns(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
else:
SCREAMING_SNAKE_CASE_ : Dict = v
return new_state_dict
def _snake_case ( lowerCAmelCase : Tuple , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Optional[int] ):
"""simple docstring"""
if len(lowerCAmelCase ) == 1:
if len(v.shape ) == 3:
# weight
SCREAMING_SNAKE_CASE_ : str = v[:, :, 0]
else:
# bias
SCREAMING_SNAKE_CASE_ : Optional[int] = v
else:
# qkv matrices
SCREAMING_SNAKE_CASE_ : List[Any] = v.shape[0]
SCREAMING_SNAKE_CASE_ : Optional[Any] = trippled_shape // 3
for i in range(3 ):
if len(v.shape ) == 3:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = v[i * single_shape : (i + 1) * single_shape, :, 0]
else:
SCREAMING_SNAKE_CASE_ : int = v[i * single_shape : (i + 1) * single_shape]
return new_state_dict
def _snake_case ( lowerCAmelCase : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = torch.device("cuda" if torch.cuda.is_available() else "cpu" )
SCREAMING_SNAKE_CASE_ : List[str] = args.model_path.split("/" )[-1].split("." )[0]
if not os.path.isfile(args.model_path ):
assert (
model_name == args.model_path
), f'Make sure to provide one of the official model names {MODELS_MAP.keys()}'
SCREAMING_SNAKE_CASE_ : str = download(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Dict = MODELS_MAP[model_name]["sample_rate"]
SCREAMING_SNAKE_CASE_ : List[Any] = MODELS_MAP[model_name]["sample_size"]
SCREAMING_SNAKE_CASE_ : str = Object()
SCREAMING_SNAKE_CASE_ : Any = sample_size
SCREAMING_SNAKE_CASE_ : Dict = sample_rate
SCREAMING_SNAKE_CASE_ : Any = 0
SCREAMING_SNAKE_CASE_ : Any = UNetaDModel(sample_size=lowerCAmelCase , sample_rate=lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = diffusers_model.state_dict()
SCREAMING_SNAKE_CASE_ : Optional[Any] = DiffusionUncond(lowerCAmelCase )
orig_model.load_state_dict(torch.load(args.model_path , map_location=lowerCAmelCase )["state_dict"] )
SCREAMING_SNAKE_CASE_ : Optional[Any] = orig_model.diffusion_ema.eval()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = orig_model.state_dict()
SCREAMING_SNAKE_CASE_ : int = rename_orig_weights(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : List[str] = set(renamed_state_dict.keys() ) - set(diffusers_state_dict.keys() )
SCREAMING_SNAKE_CASE_ : Optional[int] = set(diffusers_state_dict.keys() ) - set(renamed_state_dict.keys() )
assert len(lowerCAmelCase ) == 0, f'Problem with {renamed_minus_diffusers}'
assert all(k.endswith("kernel" ) for k in list(lowerCAmelCase ) ), f'Problem with {diffusers_minus_renamed}'
for key, value in renamed_state_dict.items():
assert (
diffusers_state_dict[key].squeeze().shape == value.squeeze().shape
), f'Shape for {key} doesn\'t match. Diffusers: {diffusers_state_dict[key].shape} vs. {value.shape}'
if key == "time_proj.weight":
SCREAMING_SNAKE_CASE_ : int = value.squeeze()
SCREAMING_SNAKE_CASE_ : Optional[int] = value
diffusers_model.load_state_dict(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : int = 1_0_0
SCREAMING_SNAKE_CASE_ : List[Any] = 3_3
SCREAMING_SNAKE_CASE_ : Union[str, Any] = IPNDMScheduler(num_train_timesteps=lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.manual_seed(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Dict = torch.randn([1, 2, config.sample_size] , generator=lowerCAmelCase ).to(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.linspace(1 , 0 , steps + 1 , device=lowerCAmelCase )[:-1]
SCREAMING_SNAKE_CASE_ : Dict = get_crash_schedule(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : int = DanceDiffusionPipeline(unet=lowerCAmelCase , scheduler=lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Tuple = torch.manual_seed(3_3 )
SCREAMING_SNAKE_CASE_ : Optional[Any] = pipe(num_inference_steps=lowerCAmelCase , generator=lowerCAmelCase ).audios
SCREAMING_SNAKE_CASE_ : Optional[int] = sampling.iplms_sample(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , {} )
SCREAMING_SNAKE_CASE_ : List[str] = generated.clamp(-1 , 1 )
SCREAMING_SNAKE_CASE_ : str = (generated - audio).abs().sum()
SCREAMING_SNAKE_CASE_ : Dict = (generated - audio).abs().max()
if args.save:
pipe.save_pretrained(args.checkpoint_path )
print("Diff sum" , lowerCAmelCase )
print("Diff max" , lowerCAmelCase )
assert diff_max < 1E-3, f'Diff max: {diff_max} is too much :-/'
print(f'Conversion for {model_name} successful!' )
if __name__ == "__main__":
__lowerCamelCase : Optional[Any] = argparse.ArgumentParser()
parser.add_argument('''--model_path''', default=None, type=str, required=True, help='''Path to the model to convert.''')
parser.add_argument(
'''--save''', default=True, type=bool, required=False, help='''Whether to save the converted model or not.'''
)
parser.add_argument('''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the output model.''')
__lowerCamelCase : Union[str, Any] = parser.parse_args()
main(args)
| 18 | import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__lowerCamelCase : List[str] = logging.get_logger(__name__)
__lowerCamelCase : Tuple = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt'''}
__lowerCamelCase : List[Any] = {
'''vocab_file''': {
'''allenai/longformer-base-4096''': '''https://huggingface.co/allenai/longformer-base-4096/resolve/main/vocab.json''',
'''allenai/longformer-large-4096''': (
'''https://huggingface.co/allenai/longformer-large-4096/resolve/main/vocab.json'''
),
'''allenai/longformer-large-4096-finetuned-triviaqa''': (
'''https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/vocab.json'''
),
'''allenai/longformer-base-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/vocab.json'''
),
'''allenai/longformer-large-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/vocab.json'''
),
},
'''merges_file''': {
'''allenai/longformer-base-4096''': '''https://huggingface.co/allenai/longformer-base-4096/resolve/main/merges.txt''',
'''allenai/longformer-large-4096''': (
'''https://huggingface.co/allenai/longformer-large-4096/resolve/main/merges.txt'''
),
'''allenai/longformer-large-4096-finetuned-triviaqa''': (
'''https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/merges.txt'''
),
'''allenai/longformer-base-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/merges.txt'''
),
'''allenai/longformer-large-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/merges.txt'''
),
},
}
__lowerCamelCase : int = {
'''allenai/longformer-base-4096''': 40_96,
'''allenai/longformer-large-4096''': 40_96,
'''allenai/longformer-large-4096-finetuned-triviaqa''': 40_96,
'''allenai/longformer-base-4096-extra.pos.embd.only''': 40_96,
'''allenai/longformer-large-4096-extra.pos.embd.only''': 40_96,
}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def _snake_case ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = (
list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) )
)
SCREAMING_SNAKE_CASE_ : str = bs[:]
SCREAMING_SNAKE_CASE_ : Optional[int] = 0
for b in range(2**8 ):
if b not in bs:
bs.append(lowerCAmelCase )
cs.append(2**8 + n )
n += 1
SCREAMING_SNAKE_CASE_ : List[str] = [chr(lowerCAmelCase ) for n in cs]
return dict(zip(lowerCAmelCase , lowerCAmelCase ) )
def _snake_case ( lowerCAmelCase : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = set()
SCREAMING_SNAKE_CASE_ : Tuple = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
SCREAMING_SNAKE_CASE_ : List[str] = char
return pairs
class a__ ( A__ ):
A = VOCAB_FILES_NAMES
A = PRETRAINED_VOCAB_FILES_MAP
A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A = ['input_ids', 'attention_mask']
def __init__( self : Union[str, Any],_A : List[Any],_A : Tuple,_A : str="replace",_A : Optional[int]="<s>",_A : Dict="</s>",_A : Any="</s>",_A : Optional[Any]="<s>",_A : Union[str, Any]="<unk>",_A : int="<pad>",_A : Dict="<mask>",_A : int=False,**_A : Dict,):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = AddedToken(_A,lstrip=_A,rstrip=_A ) if isinstance(_A,_A ) else bos_token
SCREAMING_SNAKE_CASE_ : Optional[int] = AddedToken(_A,lstrip=_A,rstrip=_A ) if isinstance(_A,_A ) else eos_token
SCREAMING_SNAKE_CASE_ : str = AddedToken(_A,lstrip=_A,rstrip=_A ) if isinstance(_A,_A ) else sep_token
SCREAMING_SNAKE_CASE_ : Union[str, Any] = AddedToken(_A,lstrip=_A,rstrip=_A ) if isinstance(_A,_A ) else cls_token
SCREAMING_SNAKE_CASE_ : List[str] = AddedToken(_A,lstrip=_A,rstrip=_A ) if isinstance(_A,_A ) else unk_token
SCREAMING_SNAKE_CASE_ : Optional[Any] = AddedToken(_A,lstrip=_A,rstrip=_A ) if isinstance(_A,_A ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
SCREAMING_SNAKE_CASE_ : Dict = AddedToken(_A,lstrip=_A,rstrip=_A ) if isinstance(_A,_A ) else mask_token
super().__init__(
errors=_A,bos_token=_A,eos_token=_A,unk_token=_A,sep_token=_A,cls_token=_A,pad_token=_A,mask_token=_A,add_prefix_space=_A,**_A,)
with open(_A,encoding="utf-8" ) as vocab_handle:
SCREAMING_SNAKE_CASE_ : Tuple = json.load(_A )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {v: k for k, v in self.encoder.items()}
SCREAMING_SNAKE_CASE_ : Any = errors # how to handle errors in decoding
SCREAMING_SNAKE_CASE_ : Optional[Any] = bytes_to_unicode()
SCREAMING_SNAKE_CASE_ : str = {v: k for k, v in self.byte_encoder.items()}
with open(_A,encoding="utf-8" ) as merges_handle:
SCREAMING_SNAKE_CASE_ : int = merges_handle.read().split("\n" )[1:-1]
SCREAMING_SNAKE_CASE_ : List[str] = [tuple(merge.split() ) for merge in bpe_merges]
SCREAMING_SNAKE_CASE_ : Optional[int] = dict(zip(_A,range(len(_A ) ) ) )
SCREAMING_SNAKE_CASE_ : Any = {}
SCREAMING_SNAKE_CASE_ : List[str] = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
SCREAMING_SNAKE_CASE_ : List[Any] = re.compile(R"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" )
@property
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
return len(self.encoder )
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
return dict(self.encoder,**self.added_tokens_encoder )
def __UpperCamelCase ( self : Any,_A : int ):
"""simple docstring"""
if token in self.cache:
return self.cache[token]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tuple(_A )
SCREAMING_SNAKE_CASE_ : str = get_pairs(_A )
if not pairs:
return token
while True:
SCREAMING_SNAKE_CASE_ : Tuple = min(_A,key=lambda _A : self.bpe_ranks.get(_A,float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple = bigram
SCREAMING_SNAKE_CASE_ : int = []
SCREAMING_SNAKE_CASE_ : Dict = 0
while i < len(_A ):
try:
SCREAMING_SNAKE_CASE_ : Tuple = word.index(_A,_A )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
SCREAMING_SNAKE_CASE_ : str = j
if word[i] == first and i < len(_A ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
SCREAMING_SNAKE_CASE_ : Dict = tuple(_A )
SCREAMING_SNAKE_CASE_ : List[str] = new_word
if len(_A ) == 1:
break
else:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = get_pairs(_A )
SCREAMING_SNAKE_CASE_ : List[str] = " ".join(_A )
SCREAMING_SNAKE_CASE_ : Any = word
return word
def __UpperCamelCase ( self : Dict,_A : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = []
for token in re.findall(self.pat,_A ):
SCREAMING_SNAKE_CASE_ : Any = "".join(
self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(_A ).split(" " ) )
return bpe_tokens
def __UpperCamelCase ( self : Optional[int],_A : str ):
"""simple docstring"""
return self.encoder.get(_A,self.encoder.get(self.unk_token ) )
def __UpperCamelCase ( self : Tuple,_A : str ):
"""simple docstring"""
return self.decoder.get(_A )
def __UpperCamelCase ( self : List[str],_A : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = "".join(_A )
SCREAMING_SNAKE_CASE_ : Tuple = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8",errors=self.errors )
return text
def __UpperCamelCase ( self : List[Any],_A : str,_A : Optional[str] = None ):
"""simple docstring"""
if not os.path.isdir(_A ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
SCREAMING_SNAKE_CASE_ : Tuple = os.path.join(
_A,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
SCREAMING_SNAKE_CASE_ : Any = os.path.join(
_A,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(_A,"w",encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder,indent=2,sort_keys=_A,ensure_ascii=_A ) + "\n" )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 0
with open(_A,"w",encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items(),key=lambda _A : kv[1] ):
if index != token_index:
logger.warning(
F'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
" Please check that the tokenizer is not corrupted!" )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = token_index
writer.write(" ".join(_A ) + "\n" )
index += 1
return vocab_file, merge_file
def __UpperCamelCase ( self : Optional[Any],_A : List[int],_A : Optional[List[int]] = None ):
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
SCREAMING_SNAKE_CASE_ : str = [self.cls_token_id]
SCREAMING_SNAKE_CASE_ : Tuple = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __UpperCamelCase ( self : str,_A : List[int],_A : Optional[List[int]] = None,_A : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_A,token_ids_a=_A,already_has_special_tokens=_A )
if token_ids_a is None:
return [1] + ([0] * len(_A )) + [1]
return [1] + ([0] * len(_A )) + [1, 1] + ([0] * len(_A )) + [1]
def __UpperCamelCase ( self : Any,_A : List[int],_A : Optional[List[int]] = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = [self.sep_token_id]
SCREAMING_SNAKE_CASE_ : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __UpperCamelCase ( self : Any,_A : Union[str, Any],_A : Any=False,**_A : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = kwargs.pop("add_prefix_space",self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(_A ) > 0 and not text[0].isspace()):
SCREAMING_SNAKE_CASE_ : str = " " + text
return (text, kwargs)
| 18 | 1 |
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotSmallConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
__lowerCamelCase : Optional[int] = '''platform'''
import jax
import jax.numpy as jnp
from transformers.models.blenderbot_small.modeling_flax_blenderbot_small import (
FlaxBlenderbotSmallForConditionalGeneration,
FlaxBlenderbotSmallModel,
shift_tokens_right,
)
def _snake_case ( lowerCAmelCase : Any , lowerCAmelCase : str , lowerCAmelCase : int=None , lowerCAmelCase : str=None , lowerCAmelCase : Tuple=None , lowerCAmelCase : Optional[Any]=None , lowerCAmelCase : Any=None , lowerCAmelCase : Union[str, Any]=None , ):
"""simple docstring"""
if attention_mask is None:
SCREAMING_SNAKE_CASE_ : int = np.where(input_ids != config.pad_token_id , 1 , 0 )
if decoder_attention_mask is None:
SCREAMING_SNAKE_CASE_ : Optional[int] = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 )
if head_mask is None:
SCREAMING_SNAKE_CASE_ : List[str] = np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
SCREAMING_SNAKE_CASE_ : List[str] = np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
SCREAMING_SNAKE_CASE_ : Tuple = np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class a__ :
def __init__( self : Optional[int],_A : str,_A : int=13,_A : Union[str, Any]=7,_A : List[Any]=True,_A : Optional[int]=False,_A : Tuple=99,_A : Optional[int]=16,_A : List[str]=2,_A : int=4,_A : Any=4,_A : Any="gelu",_A : Union[str, Any]=0.1,_A : List[Any]=0.1,_A : Union[str, Any]=32,_A : List[str]=2,_A : Tuple=1,_A : Optional[int]=0,_A : List[Any]=0.02,):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = parent
SCREAMING_SNAKE_CASE_ : List[str] = batch_size
SCREAMING_SNAKE_CASE_ : List[Any] = seq_length
SCREAMING_SNAKE_CASE_ : Optional[Any] = is_training
SCREAMING_SNAKE_CASE_ : int = use_labels
SCREAMING_SNAKE_CASE_ : Optional[int] = vocab_size
SCREAMING_SNAKE_CASE_ : Optional[Any] = hidden_size
SCREAMING_SNAKE_CASE_ : Optional[Any] = num_hidden_layers
SCREAMING_SNAKE_CASE_ : int = num_attention_heads
SCREAMING_SNAKE_CASE_ : List[str] = intermediate_size
SCREAMING_SNAKE_CASE_ : Union[str, Any] = hidden_act
SCREAMING_SNAKE_CASE_ : Union[str, Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ : List[str] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : List[Any] = max_position_embeddings
SCREAMING_SNAKE_CASE_ : Tuple = eos_token_id
SCREAMING_SNAKE_CASE_ : int = pad_token_id
SCREAMING_SNAKE_CASE_ : Any = bos_token_id
SCREAMING_SNAKE_CASE_ : Union[str, Any] = initializer_range
def __UpperCamelCase ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = np.clip(ids_tensor([self.batch_size, self.seq_length - 1],self.vocab_size ),3,self.vocab_size )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1),dtype=np.intaa )),-1 )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = shift_tokens_right(_A,1,2 )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = BlenderbotSmallConfig(
vocab_size=self.vocab_size,d_model=self.hidden_size,encoder_layers=self.num_hidden_layers,decoder_layers=self.num_hidden_layers,encoder_attention_heads=self.num_attention_heads,decoder_attention_heads=self.num_attention_heads,encoder_ffn_dim=self.intermediate_size,decoder_ffn_dim=self.intermediate_size,dropout=self.hidden_dropout_prob,attention_dropout=self.attention_probs_dropout_prob,max_position_embeddings=self.max_position_embeddings,eos_token_id=self.eos_token_id,bos_token_id=self.bos_token_id,pad_token_id=self.pad_token_id,initializer_range=self.initializer_range,use_cache=_A,)
SCREAMING_SNAKE_CASE_ : List[Any] = prepare_blenderbot_inputs_dict(_A,_A,_A )
return config, inputs_dict
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[int] = self.prepare_config_and_inputs()
return config, inputs_dict
def __UpperCamelCase ( self : List[Any],_A : Tuple,_A : Tuple,_A : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 20
SCREAMING_SNAKE_CASE_ : Any = model_class_name(_A )
SCREAMING_SNAKE_CASE_ : Optional[int] = model.encode(inputs_dict["input_ids"] )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = (
inputs_dict["decoder_input_ids"],
inputs_dict["decoder_attention_mask"],
)
SCREAMING_SNAKE_CASE_ : int = model.init_cache(decoder_input_ids.shape[0],_A,_A )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = jnp.ones((decoder_input_ids.shape[0], max_decoder_length),dtype="i4" )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :],(decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1),)
SCREAMING_SNAKE_CASE_ : Dict = model.decode(
decoder_input_ids[:, :-1],_A,decoder_attention_mask=_A,past_key_values=_A,decoder_position_ids=_A,)
SCREAMING_SNAKE_CASE_ : Dict = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]],dtype="i4" )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = model.decode(
decoder_input_ids[:, -1:],_A,decoder_attention_mask=_A,past_key_values=outputs_cache.past_key_values,decoder_position_ids=_A,)
SCREAMING_SNAKE_CASE_ : Optional[int] = model.decode(_A,_A )
SCREAMING_SNAKE_CASE_ : Any = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3,msg=F'Max diff is {diff}' )
def __UpperCamelCase ( self : Union[str, Any],_A : int,_A : Tuple,_A : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = 20
SCREAMING_SNAKE_CASE_ : int = model_class_name(_A )
SCREAMING_SNAKE_CASE_ : List[str] = model.encode(inputs_dict["input_ids"] )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : str = (
inputs_dict["decoder_input_ids"],
inputs_dict["decoder_attention_mask"],
)
SCREAMING_SNAKE_CASE_ : Dict = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
],axis=-1,)
SCREAMING_SNAKE_CASE_ : Optional[int] = model.init_cache(decoder_input_ids.shape[0],_A,_A )
SCREAMING_SNAKE_CASE_ : Any = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :],(decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1),)
SCREAMING_SNAKE_CASE_ : Dict = model.decode(
decoder_input_ids[:, :-1],_A,decoder_attention_mask=_A,past_key_values=_A,decoder_position_ids=_A,)
SCREAMING_SNAKE_CASE_ : str = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]],dtype="i4" )
SCREAMING_SNAKE_CASE_ : int = model.decode(
decoder_input_ids[:, -1:],_A,past_key_values=outputs_cache.past_key_values,decoder_attention_mask=_A,decoder_position_ids=_A,)
SCREAMING_SNAKE_CASE_ : Tuple = model.decode(_A,_A,decoder_attention_mask=_A )
SCREAMING_SNAKE_CASE_ : Optional[Any] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3,msg=F'Max diff is {diff}' )
@require_flax
class a__ ( unittest.TestCase ):
A = 99
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = np.array(
[
[71, 82, 18, 33, 46, 91, 2],
[68, 34, 26, 58, 30, 82, 2],
[5, 97, 17, 39, 94, 40, 2],
[76, 83, 94, 25, 70, 78, 2],
[87, 59, 41, 35, 48, 66, 2],
[55, 13, 16, 58, 5, 2, 1], # note padding
[64, 27, 31, 51, 12, 75, 2],
[52, 64, 86, 17, 83, 39, 2],
[48, 61, 9, 24, 71, 82, 2],
[26, 1, 60, 48, 22, 13, 2],
[21, 5, 62, 28, 14, 76, 2],
[45, 98, 37, 86, 59, 48, 2],
[70, 70, 50, 9, 28, 0, 2],
],dtype=np.intaa,)
SCREAMING_SNAKE_CASE_ : Any = input_ids.shape[0]
SCREAMING_SNAKE_CASE_ : Dict = BlenderbotSmallConfig(
vocab_size=self.vocab_size,d_model=24,encoder_layers=2,decoder_layers=2,encoder_attention_heads=2,decoder_attention_heads=2,encoder_ffn_dim=32,decoder_ffn_dim=32,max_position_embeddings=48,eos_token_id=2,pad_token_id=1,bos_token_id=0,)
return config, input_ids, batch_size
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = self._get_config_and_data()
SCREAMING_SNAKE_CASE_ : Dict = FlaxBlenderbotSmallForConditionalGeneration(_A )
SCREAMING_SNAKE_CASE_ : Dict = lm_model(input_ids=_A )
SCREAMING_SNAKE_CASE_ : List[Any] = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs["logits"].shape,_A )
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = BlenderbotSmallConfig(
vocab_size=self.vocab_size,d_model=14,encoder_layers=2,decoder_layers=2,encoder_attention_heads=2,decoder_attention_heads=2,encoder_ffn_dim=8,decoder_ffn_dim=8,max_position_embeddings=48,)
SCREAMING_SNAKE_CASE_ : Dict = FlaxBlenderbotSmallForConditionalGeneration(_A )
SCREAMING_SNAKE_CASE_ : int = np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]],dtype=np.intaa )
SCREAMING_SNAKE_CASE_ : Tuple = np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]],dtype=np.intaa )
SCREAMING_SNAKE_CASE_ : Any = lm_model(input_ids=_A,decoder_input_ids=_A )
SCREAMING_SNAKE_CASE_ : Tuple = (*summary.shape, config.vocab_size)
self.assertEqual(outputs["logits"].shape,_A )
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]],dtype=np.intaa )
SCREAMING_SNAKE_CASE_ : Optional[int] = shift_tokens_right(_A,1,2 )
SCREAMING_SNAKE_CASE_ : int = np.equal(_A,1 ).astype(np.floataa ).sum()
SCREAMING_SNAKE_CASE_ : Optional[int] = np.equal(_A,1 ).astype(np.floataa ).sum()
self.assertEqual(shifted.shape,input_ids.shape )
self.assertEqual(_A,n_pad_before - 1 )
self.assertTrue(np.equal(shifted[:, 0],2 ).all() )
@require_flax
class a__ ( A__ , unittest.TestCase , A__ ):
A = True
A = (
(
FlaxBlenderbotSmallModel,
FlaxBlenderbotSmallForConditionalGeneration,
)
if is_flax_available()
else ()
)
A = (FlaxBlenderbotSmallForConditionalGeneration,) if is_flax_available() else ()
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = FlaxBlenderbotSmallModelTester(self )
def __UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[str] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(_A,_A,_A )
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Any = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(_A,_A,_A )
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
SCREAMING_SNAKE_CASE_ : Optional[int] = self._prepare_for_class(_A,_A )
SCREAMING_SNAKE_CASE_ : List[str] = model_class(_A )
@jax.jit
def encode_jitted(_A : Dict,_A : Optional[int]=None,**_A : int ):
return model.encode(input_ids=_A,attention_mask=_A )
with self.subTest("JIT Enabled" ):
SCREAMING_SNAKE_CASE_ : Tuple = encode_jitted(**_A ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
SCREAMING_SNAKE_CASE_ : Dict = encode_jitted(**_A ).to_tuple()
self.assertEqual(len(_A ),len(_A ) )
for jitted_output, output in zip(_A,_A ):
self.assertEqual(jitted_output.shape,output.shape )
def __UpperCamelCase ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
SCREAMING_SNAKE_CASE_ : str = model_class(_A )
SCREAMING_SNAKE_CASE_ : int = model.encode(inputs_dict["input_ids"],inputs_dict["attention_mask"] )
SCREAMING_SNAKE_CASE_ : Dict = {
"decoder_input_ids": inputs_dict["decoder_input_ids"],
"decoder_attention_mask": inputs_dict["decoder_attention_mask"],
"encoder_outputs": encoder_outputs,
}
@jax.jit
def decode_jitted(_A : str,_A : Any,_A : int ):
return model.decode(
decoder_input_ids=_A,decoder_attention_mask=_A,encoder_outputs=_A,)
with self.subTest("JIT Enabled" ):
SCREAMING_SNAKE_CASE_ : Tuple = decode_jitted(**_A ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
SCREAMING_SNAKE_CASE_ : Optional[Any] = decode_jitted(**_A ).to_tuple()
self.assertEqual(len(_A ),len(_A ) )
for jitted_output, output in zip(_A,_A ):
self.assertEqual(jitted_output.shape,output.shape )
@slow
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
for model_class_name in self.all_model_classes:
SCREAMING_SNAKE_CASE_ : Dict = model_class_name.from_pretrained("facebook/blenderbot_small-90M" )
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
SCREAMING_SNAKE_CASE_ : str = np.ones((1, 1) ) * model.config.eos_token_id
SCREAMING_SNAKE_CASE_ : Any = model(_A )
self.assertIsNotNone(_A )
| 18 | from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class a__ :
def __init__( self : Optional[int],_A : Dict,_A : List[str]=13,_A : List[str]=7,_A : int=True,_A : str=True,_A : Union[str, Any]=True,_A : Tuple=True,_A : Dict=99,_A : Tuple=32,_A : Tuple=2,_A : Tuple=4,_A : Optional[Any]=37,_A : str="gelu",_A : Dict=0.1,_A : List[Any]=0.1,_A : List[str]=512,_A : str=16,_A : int=2,_A : Dict=0.02,_A : List[Any]=3,_A : Optional[Any]=4,_A : Optional[int]=None,):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = parent
SCREAMING_SNAKE_CASE_ : Any = 13
SCREAMING_SNAKE_CASE_ : List[str] = 7
SCREAMING_SNAKE_CASE_ : Dict = True
SCREAMING_SNAKE_CASE_ : Optional[Any] = True
SCREAMING_SNAKE_CASE_ : Tuple = True
SCREAMING_SNAKE_CASE_ : List[str] = True
SCREAMING_SNAKE_CASE_ : List[str] = 99
SCREAMING_SNAKE_CASE_ : Tuple = 384
SCREAMING_SNAKE_CASE_ : Optional[Any] = 2
SCREAMING_SNAKE_CASE_ : Any = 4
SCREAMING_SNAKE_CASE_ : str = 37
SCREAMING_SNAKE_CASE_ : Optional[Any] = "gelu"
SCREAMING_SNAKE_CASE_ : List[Any] = 0.1
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 0.1
SCREAMING_SNAKE_CASE_ : Dict = 512
SCREAMING_SNAKE_CASE_ : int = 16
SCREAMING_SNAKE_CASE_ : Optional[int] = 2
SCREAMING_SNAKE_CASE_ : Any = 0.02
SCREAMING_SNAKE_CASE_ : str = 3
SCREAMING_SNAKE_CASE_ : int = 4
SCREAMING_SNAKE_CASE_ : Dict = 128
SCREAMING_SNAKE_CASE_ : Any = 2
SCREAMING_SNAKE_CASE_ : Tuple = 9
SCREAMING_SNAKE_CASE_ : List[Any] = 1
SCREAMING_SNAKE_CASE_ : Any = None
def __UpperCamelCase ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = ids_tensor([self.batch_size, self.seq_length],self.vocab_size )
SCREAMING_SNAKE_CASE_ : Any = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE_ : List[str] = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE_ : List[Any] = ids_tensor([self.batch_size, self.seq_length],self.type_vocab_size )
SCREAMING_SNAKE_CASE_ : Dict = None
SCREAMING_SNAKE_CASE_ : Dict = None
SCREAMING_SNAKE_CASE_ : str = None
if self.use_labels:
SCREAMING_SNAKE_CASE_ : List[str] = ids_tensor([self.batch_size],self.type_sequence_label_size )
SCREAMING_SNAKE_CASE_ : Dict = ids_tensor([self.batch_size, self.seq_length],self.num_labels )
SCREAMING_SNAKE_CASE_ : List[str] = ids_tensor([self.batch_size],self.num_choices )
SCREAMING_SNAKE_CASE_ : Any = ConvBertConfig(
vocab_size=self.vocab_size,hidden_size=self.hidden_size,num_hidden_layers=self.num_hidden_layers,num_attention_heads=self.num_attention_heads,intermediate_size=self.intermediate_size,hidden_act=self.hidden_act,hidden_dropout_prob=self.hidden_dropout_prob,attention_probs_dropout_prob=self.attention_probs_dropout_prob,max_position_embeddings=self.max_position_embeddings,type_vocab_size=self.type_vocab_size,initializer_range=self.initializer_range,return_dict=_A,)
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __UpperCamelCase ( self : Optional[int],_A : List[Any],_A : int,_A : Tuple,_A : Optional[int],_A : Union[str, Any],_A : Union[str, Any],_A : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = TFConvBertModel(config=_A )
SCREAMING_SNAKE_CASE_ : Optional[int] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
SCREAMING_SNAKE_CASE_ : str = [input_ids, input_mask]
SCREAMING_SNAKE_CASE_ : List[str] = model(_A )
SCREAMING_SNAKE_CASE_ : Dict = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape,(self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCamelCase ( self : Dict,_A : Dict,_A : int,_A : Union[str, Any],_A : List[Any],_A : int,_A : str,_A : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = TFConvBertForMaskedLM(config=_A )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
SCREAMING_SNAKE_CASE_ : List[Any] = model(_A )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCamelCase ( self : Any,_A : Optional[int],_A : List[Any],_A : Union[str, Any],_A : List[Any],_A : Union[str, Any],_A : Optional[int],_A : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = self.num_labels
SCREAMING_SNAKE_CASE_ : Any = TFConvBertForSequenceClassification(config=_A )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
SCREAMING_SNAKE_CASE_ : Optional[Any] = model(_A )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.num_labels) )
def __UpperCamelCase ( self : int,_A : int,_A : Dict,_A : List[str],_A : Tuple,_A : Dict,_A : Optional[int],_A : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.num_choices
SCREAMING_SNAKE_CASE_ : Optional[int] = TFConvBertForMultipleChoice(config=_A )
SCREAMING_SNAKE_CASE_ : Any = tf.tile(tf.expand_dims(_A,1 ),(1, self.num_choices, 1) )
SCREAMING_SNAKE_CASE_ : Any = tf.tile(tf.expand_dims(_A,1 ),(1, self.num_choices, 1) )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tf.tile(tf.expand_dims(_A,1 ),(1, self.num_choices, 1) )
SCREAMING_SNAKE_CASE_ : int = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
SCREAMING_SNAKE_CASE_ : int = model(_A )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.num_choices) )
def __UpperCamelCase ( self : List[Any],_A : Union[str, Any],_A : int,_A : Optional[int],_A : str,_A : str,_A : Tuple,_A : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = self.num_labels
SCREAMING_SNAKE_CASE_ : Union[str, Any] = TFConvBertForTokenClassification(config=_A )
SCREAMING_SNAKE_CASE_ : Tuple = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
SCREAMING_SNAKE_CASE_ : str = model(_A )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.seq_length, self.num_labels) )
def __UpperCamelCase ( self : List[Any],_A : int,_A : List[str],_A : List[Any],_A : Any,_A : Optional[int],_A : List[str],_A : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = TFConvBertForQuestionAnswering(config=_A )
SCREAMING_SNAKE_CASE_ : Dict = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
SCREAMING_SNAKE_CASE_ : Any = model(_A )
self.parent.assertEqual(result.start_logits.shape,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape,(self.batch_size, self.seq_length) )
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) ,
) : List[Any] = config_and_inputs
SCREAMING_SNAKE_CASE_ : Any = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class a__ ( A__ , A__ , unittest.TestCase ):
A = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
A = (
{
'feature-extraction': TFConvBertModel,
'fill-mask': TFConvBertForMaskedLM,
'question-answering': TFConvBertForQuestionAnswering,
'text-classification': TFConvBertForSequenceClassification,
'token-classification': TFConvBertForTokenClassification,
'zero-shot': TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
A = False
A = False
A = False
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = TFConvBertModelTester(self )
SCREAMING_SNAKE_CASE_ : Tuple = ConfigTester(self,config_class=_A,hidden_size=37 )
def __UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def __UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_A )
def __UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_A )
def __UpperCamelCase ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_A )
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_A )
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_A )
@slow
def __UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ : List[str] = True
SCREAMING_SNAKE_CASE_ : Any = True
if hasattr(_A,"use_cache" ):
SCREAMING_SNAKE_CASE_ : List[Any] = True
SCREAMING_SNAKE_CASE_ : int = getattr(self.model_tester,"encoder_seq_length",self.model_tester.seq_length )
SCREAMING_SNAKE_CASE_ : Optional[Any] = getattr(self.model_tester,"key_length",_A )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ : List[str] = self._prepare_for_class(_A,_A )
SCREAMING_SNAKE_CASE_ : List[Any] = model_class(_A )
SCREAMING_SNAKE_CASE_ : Optional[int] = len(model(_A ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_A,saved_model=_A )
SCREAMING_SNAKE_CASE_ : Optional[Any] = os.path.join(_A,"saved_model","1" )
SCREAMING_SNAKE_CASE_ : Tuple = tf.keras.models.load_model(_A )
SCREAMING_SNAKE_CASE_ : str = model(_A )
if self.is_encoder_decoder:
SCREAMING_SNAKE_CASE_ : Optional[Any] = outputs["encoder_hidden_states"]
SCREAMING_SNAKE_CASE_ : str = outputs["encoder_attentions"]
else:
SCREAMING_SNAKE_CASE_ : Any = outputs["hidden_states"]
SCREAMING_SNAKE_CASE_ : List[str] = outputs["attentions"]
self.assertEqual(len(_A ),_A )
SCREAMING_SNAKE_CASE_ : Any = getattr(
self.model_tester,"expected_num_hidden_layers",self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(_A ),_A )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ),[self.model_tester.seq_length, self.model_tester.hidden_size],)
self.assertEqual(len(_A ),self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ),[self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length],)
@slow
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" )
self.assertIsNotNone(_A )
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ : List[str] = True
SCREAMING_SNAKE_CASE_ : List[str] = getattr(self.model_tester,"decoder_seq_length",self.model_tester.seq_length )
SCREAMING_SNAKE_CASE_ : Any = getattr(self.model_tester,"encoder_seq_length",self.model_tester.seq_length )
SCREAMING_SNAKE_CASE_ : Optional[int] = getattr(self.model_tester,"key_length",_A )
SCREAMING_SNAKE_CASE_ : int = getattr(self.model_tester,"key_length",_A )
def check_decoder_attentions_output(_A : Dict ):
SCREAMING_SNAKE_CASE_ : int = len(_A )
self.assertEqual(out_len % 2,0 )
SCREAMING_SNAKE_CASE_ : Tuple = outputs.decoder_attentions
self.assertEqual(len(_A ),self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ),[self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length],)
def check_encoder_attentions_output(_A : Tuple ):
SCREAMING_SNAKE_CASE_ : int = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(_A ),self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ),[self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length],)
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ : Optional[Any] = True
SCREAMING_SNAKE_CASE_ : Optional[Any] = False
SCREAMING_SNAKE_CASE_ : Tuple = model_class(_A )
SCREAMING_SNAKE_CASE_ : Any = model(self._prepare_for_class(_A,_A ) )
SCREAMING_SNAKE_CASE_ : Tuple = len(_A )
self.assertEqual(config.output_hidden_states,_A )
check_encoder_attentions_output(_A )
if self.is_encoder_decoder:
SCREAMING_SNAKE_CASE_ : Optional[Any] = model_class(_A )
SCREAMING_SNAKE_CASE_ : int = model(self._prepare_for_class(_A,_A ) )
self.assertEqual(config.output_hidden_states,_A )
check_decoder_attentions_output(_A )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
SCREAMING_SNAKE_CASE_ : str = True
SCREAMING_SNAKE_CASE_ : int = model_class(_A )
SCREAMING_SNAKE_CASE_ : List[str] = model(self._prepare_for_class(_A,_A ) )
self.assertEqual(config.output_hidden_states,_A )
check_encoder_attentions_output(_A )
# Check attention is always last and order is fine
SCREAMING_SNAKE_CASE_ : str = True
SCREAMING_SNAKE_CASE_ : int = True
SCREAMING_SNAKE_CASE_ : Dict = model_class(_A )
SCREAMING_SNAKE_CASE_ : str = model(self._prepare_for_class(_A,_A ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1),len(_A ) )
self.assertEqual(model.config.output_hidden_states,_A )
check_encoder_attentions_output(_A )
@require_tf
class a__ ( unittest.TestCase ):
@slow
def __UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" )
SCREAMING_SNAKE_CASE_ : int = tf.constant([[0, 1, 2, 3, 4, 5]] )
SCREAMING_SNAKE_CASE_ : Tuple = model(_A )[0]
SCREAMING_SNAKE_CASE_ : List[Any] = [1, 6, 768]
self.assertEqual(output.shape,_A )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tf.constant(
[
[
[-0.03475493, -0.4686034, -0.30638832],
[0.22637248, -0.26988646, -0.7423424],
[0.10324868, -0.45013508, -0.58280784],
]
] )
tf.debugging.assert_near(output[:, :3, :3],_A,atol=1E-4 )
| 18 | 1 |
import os
def _snake_case ( lowerCAmelCase : str = "matrix.txt" ):
"""simple docstring"""
with open(os.path.join(os.path.dirname(lowerCAmelCase ) , lowerCAmelCase ) ) as in_file:
SCREAMING_SNAKE_CASE_ : Dict = in_file.read()
SCREAMING_SNAKE_CASE_ : int = [[int(lowerCAmelCase ) for cell in row.split("," )] for row in data.strip().splitlines()]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [[0 for cell in row] for row in grid]
SCREAMING_SNAKE_CASE_ : Optional[int] = len(grid[0] )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [[0 for i in range(lowerCAmelCase )] for j in range(lowerCAmelCase )]
SCREAMING_SNAKE_CASE_ : Dict = grid[0][0]
for i in range(1 , lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = grid[0][i] + dp[0][i - 1]
for i in range(1 , lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : int = grid[i][0] + dp[i - 1][0]
for i in range(1 , lowerCAmelCase ):
for j in range(1 , lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : Optional[int] = grid[i][j] + min(dp[i - 1][j] , dp[i][j - 1] )
return dp[-1][-1]
if __name__ == "__main__":
print(f'''{solution() = }''')
| 18 | def _snake_case ( lowerCAmelCase : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = int(lowerCAmelCase )
if decimal in (0, 1): # Exit cases for the recursion
return str(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = divmod(lowerCAmelCase , 2 )
return binary_recursive(lowerCAmelCase ) + str(lowerCAmelCase )
def _snake_case ( lowerCAmelCase : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = str(lowerCAmelCase ).strip()
if not number:
raise ValueError("No input value was provided" )
SCREAMING_SNAKE_CASE_ : List[str] = "-" if number.startswith("-" ) else ""
SCREAMING_SNAKE_CASE_ : Optional[Any] = number.lstrip("-" )
if not number.isnumeric():
raise ValueError("Input value is not an integer" )
return f'{negative}0b{binary_recursive(int(lowerCAmelCase ) )}'
if __name__ == "__main__":
from doctest import testmod
testmod()
| 18 | 1 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from tokenizers.pre_tokenizers import BertPreTokenizer, PreTokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roformer import RoFormerTokenizer
from .tokenization_utils import JiebaPreTokenizer
__lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
__lowerCamelCase : Dict = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
__lowerCamelCase : int = {
'''vocab_file''': {
'''junnyu/roformer_chinese_small''': '''https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/vocab.txt''',
'''junnyu/roformer_chinese_base''': '''https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/vocab.txt''',
'''junnyu/roformer_chinese_char_small''': (
'''https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/vocab.txt'''
),
'''junnyu/roformer_chinese_char_base''': (
'''https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/vocab.txt'''
),
'''junnyu/roformer_small_discriminator''': (
'''https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/vocab.txt'''
),
'''junnyu/roformer_small_generator''': (
'''https://huggingface.co/junnyu/roformer_small_generator/resolve/main/vocab.txt'''
),
}
}
__lowerCamelCase : Any = {
'''junnyu/roformer_chinese_small''': 15_36,
'''junnyu/roformer_chinese_base''': 15_36,
'''junnyu/roformer_chinese_char_small''': 5_12,
'''junnyu/roformer_chinese_char_base''': 5_12,
'''junnyu/roformer_small_discriminator''': 1_28,
'''junnyu/roformer_small_generator''': 1_28,
}
__lowerCamelCase : Union[str, Any] = {
'''junnyu/roformer_chinese_small''': {'''do_lower_case''': True},
'''junnyu/roformer_chinese_base''': {'''do_lower_case''': True},
'''junnyu/roformer_chinese_char_small''': {'''do_lower_case''': True},
'''junnyu/roformer_chinese_char_base''': {'''do_lower_case''': True},
'''junnyu/roformer_small_discriminator''': {'''do_lower_case''': True},
'''junnyu/roformer_small_generator''': {'''do_lower_case''': True},
}
class a__ ( A__ ):
A = VOCAB_FILES_NAMES
A = PRETRAINED_VOCAB_FILES_MAP
A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A = PRETRAINED_INIT_CONFIGURATION
A = RoFormerTokenizer
def __init__( self : List[str],_A : int=None,_A : int=None,_A : int=True,_A : List[Any]="[UNK]",_A : Tuple="[SEP]",_A : List[Any]="[PAD]",_A : Optional[int]="[CLS]",_A : Optional[Any]="[MASK]",_A : Optional[int]=True,_A : List[str]=None,**_A : List[Any],):
"""simple docstring"""
super().__init__(
_A,tokenizer_file=_A,do_lower_case=_A,unk_token=_A,sep_token=_A,pad_token=_A,cls_token=_A,mask_token=_A,tokenize_chinese_chars=_A,strip_accents=_A,**_A,)
SCREAMING_SNAKE_CASE_ : Tuple = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
pre_tok_state.get("lowercase",_A ) != do_lower_case
or pre_tok_state.get("strip_accents",_A ) != strip_accents
):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = getattr(_A,pre_tok_state.pop("type" ) )
SCREAMING_SNAKE_CASE_ : Any = do_lower_case
SCREAMING_SNAKE_CASE_ : List[str] = strip_accents
SCREAMING_SNAKE_CASE_ : str = pre_tok_class(**_A )
SCREAMING_SNAKE_CASE_ : List[str] = do_lower_case
def __getstate__( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = self.__dict__.copy()
SCREAMING_SNAKE_CASE_ : Optional[Any] = BertPreTokenizer()
return state
def __setstate__( self : List[Any],_A : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = d
SCREAMING_SNAKE_CASE_ : List[str] = self.__dict__["_tokenizer"].get_vocab()
SCREAMING_SNAKE_CASE_ : Any = PreTokenizer.custom(JiebaPreTokenizer(_A ) )
def __UpperCamelCase ( self : Union[str, Any],_A : List[Any],_A : str=None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __UpperCamelCase ( self : str,_A : List[int],_A : Optional[List[int]] = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = [self.sep_token_id]
SCREAMING_SNAKE_CASE_ : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __UpperCamelCase ( self : int,_A : str,_A : Optional[str] = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self._tokenizer.model.save(_A,name=_A )
return tuple(_A )
def __UpperCamelCase ( self : int,_A : Optional[int],_A : List[Any]=None,_A : Tuple=None,_A : str=False,**_A : List[Any],):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = BertPreTokenizer()
return super().save_pretrained(_A,_A,_A,_A,**_A )
| 18 | from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__lowerCamelCase : Union[str, Any] = {
'''configuration_chinese_clip''': [
'''CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''ChineseCLIPConfig''',
'''ChineseCLIPOnnxConfig''',
'''ChineseCLIPTextConfig''',
'''ChineseCLIPVisionConfig''',
],
'''processing_chinese_clip''': ['''ChineseCLIPProcessor'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Any = ['''ChineseCLIPFeatureExtractor''']
__lowerCamelCase : Optional[int] = ['''ChineseCLIPImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : int = [
'''CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ChineseCLIPModel''',
'''ChineseCLIPPreTrainedModel''',
'''ChineseCLIPTextModel''',
'''ChineseCLIPVisionModel''',
]
if TYPE_CHECKING:
from .configuration_chinese_clip import (
CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
ChineseCLIPConfig,
ChineseCLIPOnnxConfig,
ChineseCLIPTextConfig,
ChineseCLIPVisionConfig,
)
from .processing_chinese_clip import ChineseCLIPProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_chinese_clip import ChineseCLIPFeatureExtractor, ChineseCLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_chinese_clip import (
CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
ChineseCLIPModel,
ChineseCLIPPreTrainedModel,
ChineseCLIPTextModel,
ChineseCLIPVisionModel,
)
else:
import sys
__lowerCamelCase : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 18 | 1 |
def _snake_case ( lowerCAmelCase : int = 1_0_0_0 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = 3
SCREAMING_SNAKE_CASE_ : List[str] = 0
while a < n:
if a % 3 == 0 or a % 5 == 0:
result += a
elif a % 1_5 == 0:
result -= a
a += 1
return result
if __name__ == "__main__":
print(f'''{solution() = }''')
| 18 | import argparse
import re
from flax.traverse_util import flatten_dict, unflatten_dict
from tax import checkpoints
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
from transformers.utils import logging
logging.set_verbosity_info()
# should not include what is already done by the `from_pt` argument
__lowerCamelCase : Any = {
'''/attention/''': '''/0/SelfAttention/''',
'''/self_attention/''': '''/0/SelfAttention/''',
'''/encoder_decoder_attention/''': '''/1/EncDecAttention/''',
'''value''': '''v''',
'''query''': '''q''',
'''key''': '''k''',
'''out''': '''o''',
'''pre_self_attention_layer_norm''': '''0/layer_norm''',
'''pre_cross_attention_layer_norm''': '''1/layer_norm''',
'''pre_attention_layer_norm''': '''0/layer_norm''', # previously 1, but seems wrong
'''token_embedder''': '''shared''',
'''encoder_norm''': '''final_layer_norm''',
'''decoder_norm''': '''final_layer_norm''',
'''relpos_bias/rel_embedding''': '''block/0/layer/0/SelfAttention/relative_attention_bias/weight''',
'''router/router_weights/w/''': '''router/classifier/''',
'''roer/roer_weights/w/''': '''router/classifier/''',
'''logits_dense''': '''lm_head''',
}
def _snake_case ( lowerCAmelCase : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = list(s_dict.keys() )
for key in keys:
SCREAMING_SNAKE_CASE_ : int = R".*/layers_(\d+)"
SCREAMING_SNAKE_CASE_ : List[Any] = key
if re.match(lowerCAmelCase , lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : Any = re.sub(R"layers_(\d+)" , R"block/\1/layer" , lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : List[Any] = R"(encoder|decoder)\/"
if re.match(lowerCAmelCase , lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : str = re.match(lowerCAmelCase , lowerCAmelCase ).groups()
if groups[0] == "encoder":
SCREAMING_SNAKE_CASE_ : Any = re.sub(R"/mlp/" , R"/1/mlp/" , lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : List[Any] = re.sub(R"/pre_mlp_layer_norm/" , R"/1/layer_norm/" , lowerCAmelCase )
elif groups[0] == "decoder":
SCREAMING_SNAKE_CASE_ : List[str] = re.sub(R"/mlp/" , R"/2/mlp/" , lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Dict = re.sub(R"/pre_mlp_layer_norm/" , R"/2/layer_norm/" , lowerCAmelCase )
# 2. Convert other classic mappings
for old_key, temp_key in MOE_LAYER_NAME_MAPPING.items():
if old_key in new_key:
SCREAMING_SNAKE_CASE_ : List[Any] = new_key.replace(lowerCAmelCase , lowerCAmelCase )
print(f'{key} -> {new_key}' )
SCREAMING_SNAKE_CASE_ : List[Any] = s_dict.pop(lowerCAmelCase )
if "encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
SCREAMING_SNAKE_CASE_ : str = s_dict[
"encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight"
].T
if "decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
SCREAMING_SNAKE_CASE_ : Optional[int] = s_dict[
"decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight"
].T
# 3. Take extra care of the EXPERTS layer
for key in list(s_dict.keys() ):
if "expert" in key:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = s_dict[key].shape[0]
SCREAMING_SNAKE_CASE_ : List[Any] = s_dict[key]
for idx in range(lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : Tuple = expert_weihts[idx]
print(f'{key} -> {key.replace("expert/" , "nested fstring" )}' )
s_dict.pop(lowerCAmelCase )
return s_dict
__lowerCamelCase : List[Any] = {
'''NUM_ENCODER_LAYERS''': '''num_layers''',
'''NUM_DECODER_LAYERS''': '''num_decoder_layers''',
'''NUM_HEADS''': '''num_heads''',
'''HEAD_DIM''': '''d_kv''',
'''EMBED_DIM''': '''d_model''',
'''MLP_DIM''': '''d_ff''',
'''NUM_SELECTED_EXPERTS''': '''num_selected_experts''',
'''NUM_ENCODER_SPARSE_LAYERS''': '''num_sparse_encoder_layers''',
'''NUM_DECODER_SPARSE_LAYERS''': '''num_sparse_decoder_layers''',
'''dense.MlpBlock.activations''': '''feed_forward_proj''',
}
def _snake_case ( lowerCAmelCase : Dict , lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
import regex as re
with open(lowerCAmelCase , "r" ) as f:
SCREAMING_SNAKE_CASE_ : Optional[Any] = f.read()
SCREAMING_SNAKE_CASE_ : List[str] = re.findall(R"(.*) = ([0-9.]*)" , lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Any = {}
for param, value in regex_match:
if param in GIN_TO_CONFIG_MAPPING and value != "":
SCREAMING_SNAKE_CASE_ : int = float(lowerCAmelCase ) if "." in value else int(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = re.findall(R"(.*activations) = \(\'(.*)\',\)" , lowerCAmelCase )[0]
SCREAMING_SNAKE_CASE_ : List[str] = str(activation[1] )
SCREAMING_SNAKE_CASE_ : str = num_experts
SCREAMING_SNAKE_CASE_ : Tuple = SwitchTransformersConfig(**lowerCAmelCase )
return config
def _snake_case ( lowerCAmelCase : str , lowerCAmelCase : str , lowerCAmelCase : str=None , lowerCAmelCase : Optional[Any]="./" , lowerCAmelCase : Dict=8 ):
"""simple docstring"""
print(f'Loading flax weights from : {flax_checkpoint_path}' )
SCREAMING_SNAKE_CASE_ : int = checkpoints.load_tax_checkpoint(lowerCAmelCase )
if gin_file is not None:
SCREAMING_SNAKE_CASE_ : int = convert_gin_to_config(lowerCAmelCase , lowerCAmelCase )
else:
SCREAMING_SNAKE_CASE_ : Dict = SwitchTransformersConfig.from_pretrained(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : str = SwitchTransformersForConditionalGeneration(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[int] = flax_params["target"]
SCREAMING_SNAKE_CASE_ : List[str] = flatten_dict(lowerCAmelCase , sep="/" )
SCREAMING_SNAKE_CASE_ : List[str] = rename_keys(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[int] = unflatten_dict(lowerCAmelCase , sep="/" )
# Load the flax params in the PT model
load_flax_weights_in_pytorch_model(lowerCAmelCase , lowerCAmelCase )
print(f'Save PyTorch model to {pytorch_dump_path}' )
pt_model.save_pretrained(lowerCAmelCase )
if __name__ == "__main__":
__lowerCamelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--switch_t5x_checkpoint_path''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained SwitchTransformers model. \nThis specifies the'''
''' model architecture. If not provided, a `gin_file` has to be provided.'''
),
)
parser.add_argument(
'''--gin_file''',
default=None,
type=str,
required=False,
help='''Path to the gin config file. If not provided, a `config_file` has to be passed ''',
)
parser.add_argument(
'''--config_name''', default=None, type=str, required=False, help='''Config name of SwitchTransformers model.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output pytorch model.'''
)
parser.add_argument('''--num_experts''', default=8, type=int, required=False, help='''Number of experts''')
__lowerCamelCase : Any = parser.parse_args()
convert_flax_checkpoint_to_pytorch(
args.switch_tax_checkpoint_path,
args.config_name,
args.gin_file,
args.pytorch_dump_folder_path,
args.num_experts,
)
| 18 | 1 |
import os
def _snake_case ( ):
"""simple docstring"""
with open(os.path.dirname(lowerCAmelCase ) + "/p022_names.txt" ) as file:
SCREAMING_SNAKE_CASE_ : List[str] = str(file.readlines()[0] )
SCREAMING_SNAKE_CASE_ : Any = names.replace("\"" , "" ).split("," )
names.sort()
SCREAMING_SNAKE_CASE_ : Optional[int] = 0
SCREAMING_SNAKE_CASE_ : Optional[int] = 0
for i, name in enumerate(lowerCAmelCase ):
for letter in name:
name_score += ord(lowerCAmelCase ) - 6_4
total_score += (i + 1) * name_score
SCREAMING_SNAKE_CASE_ : Tuple = 0
return total_score
if __name__ == "__main__":
print(solution())
| 18 | from math import factorial, radians
def _snake_case ( lowerCAmelCase : float , lowerCAmelCase : int = 1_8 , lowerCAmelCase : int = 1_0 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = angle_in_degrees - ((angle_in_degrees // 360.0) * 360.0)
# Converting from degrees to radians
SCREAMING_SNAKE_CASE_ : Tuple = radians(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[Any] = angle_in_radians
SCREAMING_SNAKE_CASE_ : List[str] = 3
SCREAMING_SNAKE_CASE_ : str = -1
for _ in range(lowerCAmelCase ):
result += (b * (angle_in_radians**a)) / factorial(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Dict = -b # One positive term and the next will be negative and so on...
a += 2 # Increased by 2 for every term.
return round(lowerCAmelCase , lowerCAmelCase )
if __name__ == "__main__":
__import__('''doctest''').testmod()
| 18 | 1 |
class a__ :
def __init__( self : Optional[Any],_A : str,_A : List[str],_A : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = None
SCREAMING_SNAKE_CASE_ : Any = None
SCREAMING_SNAKE_CASE_ : Any = graph
self._normalize_graph(_A,_A )
SCREAMING_SNAKE_CASE_ : str = len(_A )
SCREAMING_SNAKE_CASE_ : Optional[Any] = None
def __UpperCamelCase ( self : List[Any],_A : int,_A : Any ):
"""simple docstring"""
if sources is int:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [sources]
if sinks is int:
SCREAMING_SNAKE_CASE_ : List[str] = [sinks]
if len(_A ) == 0 or len(_A ) == 0:
return
SCREAMING_SNAKE_CASE_ : List[str] = sources[0]
SCREAMING_SNAKE_CASE_ : Optional[Any] = sinks[0]
# make fake vertex if there are more
# than one source or sink
if len(_A ) > 1 or len(_A ) > 1:
SCREAMING_SNAKE_CASE_ : List[str] = 0
for i in sources:
max_input_flow += sum(self.graph[i] )
SCREAMING_SNAKE_CASE_ : Optional[int] = len(self.graph ) + 1
for room in self.graph:
room.insert(0,0 )
self.graph.insert(0,[0] * size )
for i in sources:
SCREAMING_SNAKE_CASE_ : int = max_input_flow
SCREAMING_SNAKE_CASE_ : Optional[Any] = 0
SCREAMING_SNAKE_CASE_ : Dict = len(self.graph ) + 1
for room in self.graph:
room.append(0 )
self.graph.append([0] * size )
for i in sinks:
SCREAMING_SNAKE_CASE_ : str = max_input_flow
SCREAMING_SNAKE_CASE_ : Dict = size - 1
def __UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
if self.maximum_flow_algorithm is None:
raise Exception("You need to set maximum flow algorithm before." )
if self.source_index is None or self.sink_index is None:
return 0
self.maximum_flow_algorithm.execute()
return self.maximum_flow_algorithm.getMaximumFlow()
def __UpperCamelCase ( self : List[str],_A : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = algorithm(self )
class a__ :
def __init__( self : Union[str, Any],_A : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = flow_network
SCREAMING_SNAKE_CASE_ : str = flow_network.verticesCount
SCREAMING_SNAKE_CASE_ : Dict = flow_network.sourceIndex
SCREAMING_SNAKE_CASE_ : List[str] = flow_network.sinkIndex
# it's just a reference, so you shouldn't change
# it in your algorithms, use deep copy before doing that
SCREAMING_SNAKE_CASE_ : Optional[int] = flow_network.graph
SCREAMING_SNAKE_CASE_ : List[Any] = False
def __UpperCamelCase ( self : Any ):
"""simple docstring"""
if not self.executed:
self._algorithm()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = True
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
pass
class a__ ( A__ ):
def __init__( self : int,_A : Union[str, Any] ):
"""simple docstring"""
super().__init__(_A )
# use this to save your result
SCREAMING_SNAKE_CASE_ : Union[str, Any] = -1
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
if not self.executed:
raise Exception("You should execute algorithm before using its result!" )
return self.maximum_flow
class a__ ( A__ ):
def __init__( self : List[str],_A : int ):
"""simple docstring"""
super().__init__(_A )
SCREAMING_SNAKE_CASE_ : int = [[0] * self.verticies_count for i in range(self.verticies_count )]
SCREAMING_SNAKE_CASE_ : Optional[Any] = [0] * self.verticies_count
SCREAMING_SNAKE_CASE_ : Tuple = [0] * self.verticies_count
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = self.verticies_count
# push some substance to graph
for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index] ):
self.preflow[self.source_index][nextvertex_index] += bandwidth
self.preflow[nextvertex_index][self.source_index] -= bandwidth
self.excesses[nextvertex_index] += bandwidth
# Relabel-to-front selection rule
SCREAMING_SNAKE_CASE_ : List[str] = [
i
for i in range(self.verticies_count )
if i != self.source_index and i != self.sink_index
]
# move through list
SCREAMING_SNAKE_CASE_ : Optional[int] = 0
while i < len(_A ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = vertices_list[i]
SCREAMING_SNAKE_CASE_ : Dict = self.heights[vertex_index]
self.process_vertex(_A )
if self.heights[vertex_index] > previous_height:
# if it was relabeled, swap elements
# and start from 0 index
vertices_list.insert(0,vertices_list.pop(_A ) )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 0
else:
i += 1
SCREAMING_SNAKE_CASE_ : Union[str, Any] = sum(self.preflow[self.source_index] )
def __UpperCamelCase ( self : Union[str, Any],_A : Optional[int] ):
"""simple docstring"""
while self.excesses[vertex_index] > 0:
for neighbour_index in range(self.verticies_count ):
# if it's neighbour and current vertex is higher
if (
self.graph[vertex_index][neighbour_index]
- self.preflow[vertex_index][neighbour_index]
> 0
and self.heights[vertex_index] > self.heights[neighbour_index]
):
self.push(_A,_A )
self.relabel(_A )
def __UpperCamelCase ( self : str,_A : Union[str, Any],_A : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = min(
self.excesses[from_index],self.graph[from_index][to_index] - self.preflow[from_index][to_index],)
self.preflow[from_index][to_index] += preflow_delta
self.preflow[to_index][from_index] -= preflow_delta
self.excesses[from_index] -= preflow_delta
self.excesses[to_index] += preflow_delta
def __UpperCamelCase ( self : Tuple,_A : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = None
for to_index in range(self.verticies_count ):
if (
self.graph[vertex_index][to_index]
- self.preflow[vertex_index][to_index]
> 0
) and (min_height is None or self.heights[to_index] < min_height):
SCREAMING_SNAKE_CASE_ : Any = self.heights[to_index]
if min_height is not None:
SCREAMING_SNAKE_CASE_ : Optional[Any] = min_height + 1
if __name__ == "__main__":
__lowerCamelCase : int = [0]
__lowerCamelCase : str = [3]
# graph = [
# [0, 0, 4, 6, 0, 0],
# [0, 0, 5, 2, 0, 0],
# [0, 0, 0, 0, 4, 4],
# [0, 0, 0, 0, 6, 6],
# [0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0],
# ]
__lowerCamelCase : Any = [[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]]
# prepare our network
__lowerCamelCase : List[str] = FlowNetwork(graph, entrances, exits)
# set algorithm
flow_network.set_maximum_flow_algorithm(PushRelabelExecutor)
# and calculate
__lowerCamelCase : str = flow_network.find_maximum_flow()
print(f'''maximum flow is {maximum_flow}''')
| 18 | from functools import lru_cache
@lru_cache
def _snake_case ( lowerCAmelCase : int ):
"""simple docstring"""
if num < 0:
raise ValueError("Number should not be negative." )
return 1 if num in (0, 1) else num * factorial(num - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 18 | 1 |
import argparse
import os
from io import BytesIO
from pathlib import Path
import requests
from clip_retrieval.clip_client import ClipClient
from PIL import Image
from tqdm import tqdm
def _snake_case ( lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Tuple , lowerCAmelCase : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = 1.5
SCREAMING_SNAKE_CASE_ : Union[str, Any] = int(factor * num_class_images )
SCREAMING_SNAKE_CASE_ : Dict = ClipClient(
url="https://knn.laion.ai/knn-service" , indice_name="laion_400m" , num_images=lowerCAmelCase , aesthetic_weight=0.1 )
os.makedirs(f'{class_data_dir}/images' , exist_ok=lowerCAmelCase )
if len(list(Path(f'{class_data_dir}/images' ).iterdir() ) ) >= num_class_images:
return
while True:
SCREAMING_SNAKE_CASE_ : Optional[Any] = client.query(text=lowerCAmelCase )
if len(lowerCAmelCase ) >= factor * num_class_images or num_images > 1E4:
break
else:
SCREAMING_SNAKE_CASE_ : Tuple = int(factor * num_images )
SCREAMING_SNAKE_CASE_ : Dict = ClipClient(
url="https://knn.laion.ai/knn-service" , indice_name="laion_400m" , num_images=lowerCAmelCase , aesthetic_weight=0.1 , )
SCREAMING_SNAKE_CASE_ : int = 0
SCREAMING_SNAKE_CASE_ : Dict = 0
SCREAMING_SNAKE_CASE_ : Tuple = tqdm(desc="downloading real regularization images" , total=lowerCAmelCase )
with open(f'{class_data_dir}/caption.txt' , "w" ) as fa, open(f'{class_data_dir}/urls.txt' , "w" ) as fa, open(
f'{class_data_dir}/images.txt' , "w" ) as fa:
while total < num_class_images:
SCREAMING_SNAKE_CASE_ : List[str] = class_images[count]
count += 1
try:
SCREAMING_SNAKE_CASE_ : Any = requests.get(images["url"] )
if img.status_code == 2_0_0:
SCREAMING_SNAKE_CASE_ : str = Image.open(BytesIO(img.content ) )
with open(f'{class_data_dir}/images/{total}.jpg' , "wb" ) as f:
f.write(img.content )
fa.write(images["caption"] + "\n" )
fa.write(images["url"] + "\n" )
fa.write(f'{class_data_dir}/images/{total}.jpg' + "\n" )
total += 1
pbar.update(1 )
else:
continue
except Exception:
continue
return
def _snake_case ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = argparse.ArgumentParser("" , add_help=lowerCAmelCase )
parser.add_argument("--class_prompt" , help="text prompt to retrieve images" , required=lowerCAmelCase , type=lowerCAmelCase )
parser.add_argument("--class_data_dir" , help="path to save images" , required=lowerCAmelCase , type=lowerCAmelCase )
parser.add_argument("--num_class_images" , help="number of images to download" , default=2_0_0 , type=lowerCAmelCase )
return parser.parse_args()
if __name__ == "__main__":
__lowerCamelCase : List[Any] = parse_args()
retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
| 18 | from collections import defaultdict
def _snake_case ( lowerCAmelCase : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = 1
SCREAMING_SNAKE_CASE_ : Tuple = True
for v in tree[start]:
if v not in visited:
ret += dfs(lowerCAmelCase )
if ret % 2 == 0:
cuts.append(lowerCAmelCase )
return ret
def _snake_case ( ):
"""simple docstring"""
dfs(1 )
if __name__ == "__main__":
__lowerCamelCase , __lowerCamelCase : Union[str, Any] = 10, 9
__lowerCamelCase : Optional[int] = defaultdict(list)
__lowerCamelCase : dict[int, bool] = {}
__lowerCamelCase : list[int] = []
__lowerCamelCase : Optional[Any] = 0
__lowerCamelCase : Any = [(2, 1), (3, 1), (4, 3), (5, 2), (6, 1), (7, 2), (8, 6), (9, 8), (10, 8)]
for u, v in edges:
tree[u].append(v)
tree[v].append(u)
even_tree()
print(len(cuts) - 1)
| 18 | 1 |
import argparse
import hashlib
import os
import urllib
import warnings
import torch
from torch import nn
from tqdm import tqdm
from transformers import WhisperConfig, WhisperForConditionalGeneration
__lowerCamelCase : int = {
'''tiny.en''': '''https://openaipublic.azureedge.net/main/whisper/models/d3dd57d32accea0b295c96e26691aa14d8822fac7d9d27d5dc00b4ca2826dd03/tiny.en.pt''',
'''tiny''': '''https://openaipublic.azureedge.net/main/whisper/models/65147644a518d12f04e32d6f3b26facc3f8dd46e5390956a9424a650c0ce22b9/tiny.pt''',
'''base.en''': '''https://openaipublic.azureedge.net/main/whisper/models/25a8566e1d0c1e2231d1c762132cd20e0f96a85d16145c3a00adf5d1ac670ead/base.en.pt''',
'''base''': '''https://openaipublic.azureedge.net/main/whisper/models/ed3a0b6b1c0edf879ad9b11b1af5a0e6ab5db9205f891f668f8b0e6c6326e34e/base.pt''',
'''small.en''': '''https://openaipublic.azureedge.net/main/whisper/models/f953ad0fd29cacd07d5a9eda5624af0f6bcf2258be67c92b79389873d91e0872/small.en.pt''',
'''small''': '''https://openaipublic.azureedge.net/main/whisper/models/9ecf779972d90ba49c06d968637d720dd632c55bbf19d441fb42bf17a411e794/small.pt''',
'''medium.en''': '''https://openaipublic.azureedge.net/main/whisper/models/d7440d1dc186f76616474e0ff0b3b6b879abc9d1a4926b7adfa41db2d497ab4f/medium.en.pt''',
'''medium''': '''https://openaipublic.azureedge.net/main/whisper/models/345ae4da62f9b3d59415adc60127b97c714f32e89e936602e85993674d08dcb1/medium.pt''',
'''large''': '''https://openaipublic.azureedge.net/main/whisper/models/e4b87e7e0bf463eb8e6956e646f1e277e901512310def2c24bf0e11bd3c28e9a/large.pt''',
'''large-v2''': '''https://openaipublic.azureedge.net/main/whisper/models/81f7c96c852ee8fc832187b0132e569d6c3065a3252ed18e56effd0b6a73e524/large-v2.pt''',
}
def _snake_case ( lowerCAmelCase : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = ["layers", "blocks"]
for k in ignore_keys:
state_dict.pop(lowerCAmelCase , lowerCAmelCase )
__lowerCamelCase : List[Any] = {
'''blocks''': '''layers''',
'''mlp.0''': '''fc1''',
'''mlp.2''': '''fc2''',
'''mlp_ln''': '''final_layer_norm''',
'''.attn.query''': '''.self_attn.q_proj''',
'''.attn.key''': '''.self_attn.k_proj''',
'''.attn.value''': '''.self_attn.v_proj''',
'''.attn_ln''': '''.self_attn_layer_norm''',
'''.attn.out''': '''.self_attn.out_proj''',
'''.cross_attn.query''': '''.encoder_attn.q_proj''',
'''.cross_attn.key''': '''.encoder_attn.k_proj''',
'''.cross_attn.value''': '''.encoder_attn.v_proj''',
'''.cross_attn_ln''': '''.encoder_attn_layer_norm''',
'''.cross_attn.out''': '''.encoder_attn.out_proj''',
'''decoder.ln.''': '''decoder.layer_norm.''',
'''encoder.ln.''': '''encoder.layer_norm.''',
'''token_embedding''': '''embed_tokens''',
'''encoder.positional_embedding''': '''encoder.embed_positions.weight''',
'''decoder.positional_embedding''': '''decoder.embed_positions.weight''',
'''ln_post''': '''layer_norm''',
}
def _snake_case ( lowerCAmelCase : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = list(s_dict.keys() )
for key in keys:
SCREAMING_SNAKE_CASE_ : str = key
for k, v in WHISPER_MAPPING.items():
if k in key:
SCREAMING_SNAKE_CASE_ : List[Any] = new_key.replace(lowerCAmelCase , lowerCAmelCase )
print(f'{key} -> {new_key}' )
SCREAMING_SNAKE_CASE_ : List[str] = s_dict.pop(lowerCAmelCase )
return s_dict
def _snake_case ( lowerCAmelCase : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int = emb.weight.shape
SCREAMING_SNAKE_CASE_ : Optional[Any] = nn.Linear(lowerCAmelCase , lowerCAmelCase , bias=lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[Any] = emb.weight.data
return lin_layer
def _snake_case ( lowerCAmelCase : str , lowerCAmelCase : str ):
"""simple docstring"""
os.makedirs(lowerCAmelCase , exist_ok=lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : List[Any] = os.path.basename(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : List[str] = url.split("/" )[-2]
SCREAMING_SNAKE_CASE_ : str = os.path.join(lowerCAmelCase , lowerCAmelCase )
if os.path.exists(lowerCAmelCase ) and not os.path.isfile(lowerCAmelCase ):
raise RuntimeError(f'{download_target} exists and is not a regular file' )
if os.path.isfile(lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : Dict = open(lowerCAmelCase , "rb" ).read()
if hashlib.shaaaa(lowerCAmelCase ).hexdigest() == expected_shaaaa:
return model_bytes
else:
warnings.warn(f'{download_target} exists, but the SHA256 checksum does not match; re-downloading the file' )
with urllib.request.urlopen(lowerCAmelCase ) as source, open(lowerCAmelCase , "wb" ) as output:
with tqdm(
total=int(source.info().get("Content-Length" ) ) , ncols=8_0 , unit="iB" , unit_scale=lowerCAmelCase , unit_divisor=1_0_2_4 ) as loop:
while True:
SCREAMING_SNAKE_CASE_ : int = source.read(8_1_9_2 )
if not buffer:
break
output.write(lowerCAmelCase )
loop.update(len(lowerCAmelCase ) )
SCREAMING_SNAKE_CASE_ : Any = open(lowerCAmelCase , "rb" ).read()
if hashlib.shaaaa(lowerCAmelCase ).hexdigest() != expected_shaaaa:
raise RuntimeError(
"Model has been downloaded but the SHA256 checksum does not not match. Please retry loading the model." )
return model_bytes
def _snake_case ( lowerCAmelCase : str , lowerCAmelCase : Optional[int] ):
"""simple docstring"""
if ".pt" not in checkpoint_path:
SCREAMING_SNAKE_CASE_ : Dict = _download(_MODELS[checkpoint_path] )
else:
SCREAMING_SNAKE_CASE_ : Any = torch.load(lowerCAmelCase , map_location="cpu" )
SCREAMING_SNAKE_CASE_ : List[str] = original_checkpoint["dims"]
SCREAMING_SNAKE_CASE_ : Any = original_checkpoint["model_state_dict"]
SCREAMING_SNAKE_CASE_ : Optional[Any] = state_dict["decoder.token_embedding.weight"]
remove_ignore_keys_(lowerCAmelCase )
rename_keys(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : int = True
SCREAMING_SNAKE_CASE_ : Union[str, Any] = state_dict["decoder.layers.0.fc1.weight"].shape[0]
SCREAMING_SNAKE_CASE_ : Optional[int] = WhisperConfig(
vocab_size=dimensions["n_vocab"] , encoder_ffn_dim=lowerCAmelCase , decoder_ffn_dim=lowerCAmelCase , num_mel_bins=dimensions["n_mels"] , d_model=dimensions["n_audio_state"] , max_target_positions=dimensions["n_text_ctx"] , encoder_layers=dimensions["n_audio_layer"] , encoder_attention_heads=dimensions["n_audio_head"] , decoder_layers=dimensions["n_text_layer"] , decoder_attention_heads=dimensions["n_text_state"] , max_source_positions=dimensions["n_audio_ctx"] , )
SCREAMING_SNAKE_CASE_ : Tuple = WhisperForConditionalGeneration(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[str] = model.model.load_state_dict(lowerCAmelCase , strict=lowerCAmelCase )
if len(lowerCAmelCase ) > 0 and not set(lowerCAmelCase ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
"Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,"
f' but all the following weights are missing {missing}' )
if tie_embeds:
SCREAMING_SNAKE_CASE_ : List[Any] = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
SCREAMING_SNAKE_CASE_ : Optional[int] = proj_out_weights
model.save_pretrained(lowerCAmelCase )
if __name__ == "__main__":
__lowerCamelCase : Optional[int] = argparse.ArgumentParser()
# # Required parameters
parser.add_argument('''--checkpoint_path''', type=str, help='''Patht to the downloaded checkpoints''')
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
__lowerCamelCase : str = parser.parse_args()
convert_openai_whisper_to_tfms(args.checkpoint_path, args.pytorch_dump_folder_path)
| 18 | # Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def _snake_case ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = ArgumentParser("Accelerate CLI tool" , usage="accelerate <command> [<args>]" , allow_abbrev=lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[int] = parser.add_subparsers(help="accelerate command helpers" )
# Register commands
get_config_parser(subparsers=lowerCAmelCase )
env_command_parser(subparsers=lowerCAmelCase )
launch_command_parser(subparsers=lowerCAmelCase )
tpu_command_parser(subparsers=lowerCAmelCase )
test_command_parser(subparsers=lowerCAmelCase )
# Let's go
SCREAMING_SNAKE_CASE_ : Dict = parser.parse_args()
if not hasattr(lowerCAmelCase , "func" ):
parser.print_help()
exit(1 )
# Run
args.func(lowerCAmelCase )
if __name__ == "__main__":
main()
| 18 | 1 |
# Logistic Regression from scratch
# In[62]:
# In[63]:
# importing all the required libraries
import numpy as np
from matplotlib import pyplot as plt
from sklearn import datasets
def _snake_case ( lowerCAmelCase : List[str] ):
"""simple docstring"""
return 1 / (1 + np.exp(-z ))
def _snake_case ( lowerCAmelCase : List[Any] , lowerCAmelCase : Dict ):
"""simple docstring"""
return (-y * np.log(lowerCAmelCase ) - (1 - y) * np.log(1 - h )).mean()
def _snake_case ( lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Any , lowerCAmelCase : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = np.dot(lowerCAmelCase , lowerCAmelCase )
return np.sum(y * scores - np.log(1 + np.exp(lowerCAmelCase ) ) )
def _snake_case ( lowerCAmelCase : List[str] , lowerCAmelCase : str , lowerCAmelCase : Tuple , lowerCAmelCase : Dict=7_0_0_0_0 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = np.zeros(x.shape[1] )
for iterations in range(lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : Any = np.dot(lowerCAmelCase , lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Any = sigmoid_function(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Any = np.dot(x.T , h - y ) / y.size
SCREAMING_SNAKE_CASE_ : Optional[Any] = theta - alpha * gradient # updating the weights
SCREAMING_SNAKE_CASE_ : List[str] = np.dot(lowerCAmelCase , lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : List[str] = sigmoid_function(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : int = cost_function(lowerCAmelCase , lowerCAmelCase )
if iterations % 1_0_0 == 0:
print(f'loss: {j} \t' ) # printing the loss after every 100 iterations
return theta
# In[68]:
if __name__ == "__main__":
__lowerCamelCase : Optional[Any] = datasets.load_iris()
__lowerCamelCase : List[Any] = iris.data[:, :2]
__lowerCamelCase : List[str] = (iris.target != 0) * 1
__lowerCamelCase : Tuple = 0.1
__lowerCamelCase : str = logistic_reg(alpha, x, y, max_iterations=7_00_00)
print('''theta: ''', theta) # printing the theta i.e our weights vector
def _snake_case ( lowerCAmelCase : Any ):
"""simple docstring"""
return sigmoid_function(
np.dot(lowerCAmelCase , lowerCAmelCase ) ) # predicting the value of probability from the logistic regression algorithm
plt.figure(figsize=(10, 6))
plt.scatter(x[y == 0][:, 0], x[y == 0][:, 1], color='''b''', label='''0''')
plt.scatter(x[y == 1][:, 0], x[y == 1][:, 1], color='''r''', label='''1''')
((__lowerCamelCase) , (__lowerCamelCase)) : List[Any] = (x[:, 0].min(), x[:, 0].max())
((__lowerCamelCase) , (__lowerCamelCase)) : Any = (x[:, 1].min(), x[:, 1].max())
((__lowerCamelCase) , (__lowerCamelCase)) : Optional[Any] = np.meshgrid(np.linspace(xa_min, xa_max), np.linspace(xa_min, xa_max))
__lowerCamelCase : Optional[int] = np.c_[xxa.ravel(), xxa.ravel()]
__lowerCamelCase : Union[str, Any] = predict_prob(grid).reshape(xxa.shape)
plt.contour(xxa, xxa, probs, [0.5], linewidths=1, colors='''black''')
plt.legend()
plt.show()
| 18 | import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from tokenizers.pre_tokenizers import BertPreTokenizer, PreTokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roformer import RoFormerTokenizer
from .tokenization_utils import JiebaPreTokenizer
__lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
__lowerCamelCase : Dict = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
__lowerCamelCase : int = {
'''vocab_file''': {
'''junnyu/roformer_chinese_small''': '''https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/vocab.txt''',
'''junnyu/roformer_chinese_base''': '''https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/vocab.txt''',
'''junnyu/roformer_chinese_char_small''': (
'''https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/vocab.txt'''
),
'''junnyu/roformer_chinese_char_base''': (
'''https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/vocab.txt'''
),
'''junnyu/roformer_small_discriminator''': (
'''https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/vocab.txt'''
),
'''junnyu/roformer_small_generator''': (
'''https://huggingface.co/junnyu/roformer_small_generator/resolve/main/vocab.txt'''
),
}
}
__lowerCamelCase : Any = {
'''junnyu/roformer_chinese_small''': 15_36,
'''junnyu/roformer_chinese_base''': 15_36,
'''junnyu/roformer_chinese_char_small''': 5_12,
'''junnyu/roformer_chinese_char_base''': 5_12,
'''junnyu/roformer_small_discriminator''': 1_28,
'''junnyu/roformer_small_generator''': 1_28,
}
__lowerCamelCase : Union[str, Any] = {
'''junnyu/roformer_chinese_small''': {'''do_lower_case''': True},
'''junnyu/roformer_chinese_base''': {'''do_lower_case''': True},
'''junnyu/roformer_chinese_char_small''': {'''do_lower_case''': True},
'''junnyu/roformer_chinese_char_base''': {'''do_lower_case''': True},
'''junnyu/roformer_small_discriminator''': {'''do_lower_case''': True},
'''junnyu/roformer_small_generator''': {'''do_lower_case''': True},
}
class a__ ( A__ ):
A = VOCAB_FILES_NAMES
A = PRETRAINED_VOCAB_FILES_MAP
A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A = PRETRAINED_INIT_CONFIGURATION
A = RoFormerTokenizer
def __init__( self : List[str],_A : int=None,_A : int=None,_A : int=True,_A : List[Any]="[UNK]",_A : Tuple="[SEP]",_A : List[Any]="[PAD]",_A : Optional[int]="[CLS]",_A : Optional[Any]="[MASK]",_A : Optional[int]=True,_A : List[str]=None,**_A : List[Any],):
"""simple docstring"""
super().__init__(
_A,tokenizer_file=_A,do_lower_case=_A,unk_token=_A,sep_token=_A,pad_token=_A,cls_token=_A,mask_token=_A,tokenize_chinese_chars=_A,strip_accents=_A,**_A,)
SCREAMING_SNAKE_CASE_ : Tuple = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
pre_tok_state.get("lowercase",_A ) != do_lower_case
or pre_tok_state.get("strip_accents",_A ) != strip_accents
):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = getattr(_A,pre_tok_state.pop("type" ) )
SCREAMING_SNAKE_CASE_ : Any = do_lower_case
SCREAMING_SNAKE_CASE_ : List[str] = strip_accents
SCREAMING_SNAKE_CASE_ : str = pre_tok_class(**_A )
SCREAMING_SNAKE_CASE_ : List[str] = do_lower_case
def __getstate__( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = self.__dict__.copy()
SCREAMING_SNAKE_CASE_ : Optional[Any] = BertPreTokenizer()
return state
def __setstate__( self : List[Any],_A : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = d
SCREAMING_SNAKE_CASE_ : List[str] = self.__dict__["_tokenizer"].get_vocab()
SCREAMING_SNAKE_CASE_ : Any = PreTokenizer.custom(JiebaPreTokenizer(_A ) )
def __UpperCamelCase ( self : Union[str, Any],_A : List[Any],_A : str=None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __UpperCamelCase ( self : str,_A : List[int],_A : Optional[List[int]] = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = [self.sep_token_id]
SCREAMING_SNAKE_CASE_ : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __UpperCamelCase ( self : int,_A : str,_A : Optional[str] = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self._tokenizer.model.save(_A,name=_A )
return tuple(_A )
def __UpperCamelCase ( self : int,_A : Optional[int],_A : List[Any]=None,_A : Tuple=None,_A : str=False,**_A : List[Any],):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = BertPreTokenizer()
return super().save_pretrained(_A,_A,_A,_A,**_A )
| 18 | 1 |
import re
import jax.numpy as jnp
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.random import PRNGKey
from ..utils import logging
__lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
def _snake_case ( lowerCAmelCase : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = R"\w+[.]\d+"
SCREAMING_SNAKE_CASE_ : List[str] = re.findall(lowerCAmelCase , lowerCAmelCase )
for pat in pats:
SCREAMING_SNAKE_CASE_ : int = key.replace(lowerCAmelCase , "_".join(pat.split("." ) ) )
return key
def _snake_case ( lowerCAmelCase : Optional[int] , lowerCAmelCase : Dict , lowerCAmelCase : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = pt_tuple_key[:-1] + ("scale",)
if (
any("norm" in str_ for str_ in pt_tuple_key )
and (pt_tuple_key[-1] == "bias")
and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict)
and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict)
):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = pt_tuple_key[:-1] + ("scale",)
return renamed_pt_tuple_key, pt_tensor
elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict:
SCREAMING_SNAKE_CASE_ : int = pt_tuple_key[:-1] + ("scale",)
return renamed_pt_tuple_key, pt_tensor
# embedding
if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict:
SCREAMING_SNAKE_CASE_ : Tuple = pt_tuple_key[:-1] + ("embedding",)
return renamed_pt_tuple_key, pt_tensor
# conv layer
SCREAMING_SNAKE_CASE_ : int = pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4:
SCREAMING_SNAKE_CASE_ : Optional[Any] = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
SCREAMING_SNAKE_CASE_ : Dict = pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight":
SCREAMING_SNAKE_CASE_ : str = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
SCREAMING_SNAKE_CASE_ : Tuple = pt_tuple_key[:-1] + ("weight",)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
SCREAMING_SNAKE_CASE_ : Optional[Any] = pt_tuple_key[:-1] + ("bias",)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def _snake_case ( lowerCAmelCase : Tuple , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Optional[Any]=4_2 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = {k: v.numpy() for k, v in pt_state_dict.items()}
# Step 2: Since the model is stateless, get random Flax params
SCREAMING_SNAKE_CASE_ : int = flax_model.init_weights(PRNGKey(lowerCAmelCase ) )
SCREAMING_SNAKE_CASE_ : int = flatten_dict(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : List[str] = {}
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
SCREAMING_SNAKE_CASE_ : Optional[Any] = rename_key(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : int = tuple(renamed_pt_key.split("." ) )
# Correctly rename weight parameters
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[Any] = rename_key_and_reshape_tensor(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
f'PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '
f'{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.' )
# also add unexpected weight so that warning is thrown
SCREAMING_SNAKE_CASE_ : int = jnp.asarray(lowerCAmelCase )
return unflatten_dict(lowerCAmelCase )
| 18 | import unittest
from transformers import SqueezeBertConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
)
class a__ ( A__ ):
def __init__( self : Tuple,_A : Optional[int],_A : Any=13,_A : List[str]=7,_A : int=True,_A : Dict=True,_A : Dict=False,_A : List[Any]=True,_A : Any=99,_A : Optional[int]=32,_A : Any=5,_A : List[Any]=4,_A : Dict=64,_A : Optional[Any]="gelu",_A : Tuple=0.1,_A : Any=0.1,_A : List[Any]=512,_A : Dict=16,_A : Optional[Any]=2,_A : Union[str, Any]=0.02,_A : List[str]=3,_A : Optional[Any]=4,_A : Union[str, Any]=None,_A : Tuple=2,_A : List[str]=2,_A : str=2,_A : Dict=2,_A : Optional[Any]=4,_A : Union[str, Any]=1,):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = parent
SCREAMING_SNAKE_CASE_ : Optional[int] = batch_size
SCREAMING_SNAKE_CASE_ : Dict = seq_length
SCREAMING_SNAKE_CASE_ : Dict = is_training
SCREAMING_SNAKE_CASE_ : Optional[int] = use_input_mask
SCREAMING_SNAKE_CASE_ : int = use_token_type_ids
SCREAMING_SNAKE_CASE_ : Optional[int] = use_labels
SCREAMING_SNAKE_CASE_ : Tuple = vocab_size
SCREAMING_SNAKE_CASE_ : Any = hidden_size
SCREAMING_SNAKE_CASE_ : Union[str, Any] = num_hidden_layers
SCREAMING_SNAKE_CASE_ : Tuple = num_attention_heads
SCREAMING_SNAKE_CASE_ : List[Any] = intermediate_size
SCREAMING_SNAKE_CASE_ : List[str] = hidden_act
SCREAMING_SNAKE_CASE_ : List[str] = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ : Optional[int] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : str = max_position_embeddings
SCREAMING_SNAKE_CASE_ : str = type_vocab_size
SCREAMING_SNAKE_CASE_ : List[str] = type_sequence_label_size
SCREAMING_SNAKE_CASE_ : Optional[Any] = initializer_range
SCREAMING_SNAKE_CASE_ : Tuple = num_labels
SCREAMING_SNAKE_CASE_ : List[Any] = num_choices
SCREAMING_SNAKE_CASE_ : Dict = scope
SCREAMING_SNAKE_CASE_ : int = q_groups
SCREAMING_SNAKE_CASE_ : Tuple = k_groups
SCREAMING_SNAKE_CASE_ : List[Any] = v_groups
SCREAMING_SNAKE_CASE_ : Tuple = post_attention_groups
SCREAMING_SNAKE_CASE_ : int = intermediate_groups
SCREAMING_SNAKE_CASE_ : List[Any] = output_groups
def __UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length],self.vocab_size )
SCREAMING_SNAKE_CASE_ : List[Any] = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE_ : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE_ : Optional[Any] = None
SCREAMING_SNAKE_CASE_ : Any = None
SCREAMING_SNAKE_CASE_ : str = None
if self.use_labels:
SCREAMING_SNAKE_CASE_ : str = ids_tensor([self.batch_size],self.type_sequence_label_size )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length],self.num_labels )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ids_tensor([self.batch_size],self.num_choices )
SCREAMING_SNAKE_CASE_ : Optional[int] = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def __UpperCamelCase ( self : str ):
"""simple docstring"""
return SqueezeBertConfig(
embedding_size=self.hidden_size,vocab_size=self.vocab_size,hidden_size=self.hidden_size,num_hidden_layers=self.num_hidden_layers,num_attention_heads=self.num_attention_heads,intermediate_size=self.intermediate_size,hidden_act=self.hidden_act,attention_probs_dropout_prob=self.hidden_dropout_prob,attention_dropout=self.attention_probs_dropout_prob,max_position_embeddings=self.max_position_embeddings,initializer_range=self.initializer_range,q_groups=self.q_groups,k_groups=self.k_groups,v_groups=self.v_groups,post_attention_groups=self.post_attention_groups,intermediate_groups=self.intermediate_groups,output_groups=self.output_groups,)
def __UpperCamelCase ( self : Tuple,_A : Union[str, Any],_A : Union[str, Any],_A : int,_A : Optional[int],_A : Union[str, Any],_A : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = SqueezeBertModel(config=_A )
model.to(_A )
model.eval()
SCREAMING_SNAKE_CASE_ : Any = model(_A,_A )
SCREAMING_SNAKE_CASE_ : List[str] = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape,(self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCamelCase ( self : Dict,_A : Any,_A : Tuple,_A : str,_A : Any,_A : Union[str, Any],_A : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = SqueezeBertForMaskedLM(config=_A )
model.to(_A )
model.eval()
SCREAMING_SNAKE_CASE_ : List[str] = model(_A,attention_mask=_A,labels=_A )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCamelCase ( self : Optional[int],_A : Union[str, Any],_A : Union[str, Any],_A : Any,_A : Tuple,_A : int,_A : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = SqueezeBertForQuestionAnswering(config=_A )
model.to(_A )
model.eval()
SCREAMING_SNAKE_CASE_ : Dict = model(
_A,attention_mask=_A,start_positions=_A,end_positions=_A )
self.parent.assertEqual(result.start_logits.shape,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape,(self.batch_size, self.seq_length) )
def __UpperCamelCase ( self : List[Any],_A : List[str],_A : Tuple,_A : List[Any],_A : List[str],_A : List[str],_A : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = self.num_labels
SCREAMING_SNAKE_CASE_ : List[str] = SqueezeBertForSequenceClassification(_A )
model.to(_A )
model.eval()
SCREAMING_SNAKE_CASE_ : Dict = model(_A,attention_mask=_A,labels=_A )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.num_labels) )
def __UpperCamelCase ( self : str,_A : Optional[int],_A : str,_A : List[Any],_A : List[str],_A : str,_A : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.num_labels
SCREAMING_SNAKE_CASE_ : Optional[int] = SqueezeBertForTokenClassification(config=_A )
model.to(_A )
model.eval()
SCREAMING_SNAKE_CASE_ : Optional[int] = model(_A,attention_mask=_A,labels=_A )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.seq_length, self.num_labels) )
def __UpperCamelCase ( self : List[Any],_A : Tuple,_A : str,_A : Optional[Any],_A : int,_A : str,_A : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = self.num_choices
SCREAMING_SNAKE_CASE_ : Union[str, Any] = SqueezeBertForMultipleChoice(config=_A )
model.to(_A )
model.eval()
SCREAMING_SNAKE_CASE_ : Dict = input_ids.unsqueeze(1 ).expand(-1,self.num_choices,-1 ).contiguous()
SCREAMING_SNAKE_CASE_ : str = input_mask.unsqueeze(1 ).expand(-1,self.num_choices,-1 ).contiguous()
SCREAMING_SNAKE_CASE_ : Optional[int] = model(
_A,attention_mask=_A,labels=_A,)
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.num_choices) )
def __UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.prepare_config_and_inputs()
((SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_)) : Dict = config_and_inputs
SCREAMING_SNAKE_CASE_ : Dict = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class a__ ( A__ , A__ , unittest.TestCase ):
A = (
(
SqueezeBertModel,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
)
if is_torch_available()
else None
)
A = (
{
'feature-extraction': SqueezeBertModel,
'fill-mask': SqueezeBertForMaskedLM,
'question-answering': SqueezeBertForQuestionAnswering,
'text-classification': SqueezeBertForSequenceClassification,
'token-classification': SqueezeBertForTokenClassification,
'zero-shot': SqueezeBertForSequenceClassification,
}
if is_torch_available()
else {}
)
A = False
A = True
A = False
def __UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = SqueezeBertModelTester(self )
SCREAMING_SNAKE_CASE_ : List[str] = ConfigTester(self,config_class=_A,dim=37 )
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_model(*_A )
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_masked_lm(*_A )
def __UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_question_answering(*_A )
def __UpperCamelCase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_sequence_classification(*_A )
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_token_classification(*_A )
def __UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_multiple_choice(*_A )
@slow
def __UpperCamelCase ( self : Any ):
"""simple docstring"""
for model_name in SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE_ : Tuple = SqueezeBertModel.from_pretrained(_A )
self.assertIsNotNone(_A )
@require_sentencepiece
@require_tokenizers
@require_torch
class a__ ( unittest.TestCase ):
@slow
def __UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = SqueezeBertForSequenceClassification.from_pretrained("squeezebert/squeezebert-mnli" )
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.tensor([[1, 2_9414, 232, 328, 740, 1140, 1_2695, 69, 13, 1588, 2]] )
SCREAMING_SNAKE_CASE_ : List[Any] = model(_A )[0]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.Size((1, 3) )
self.assertEqual(output.shape,_A )
SCREAMING_SNAKE_CASE_ : int = torch.tensor([[0.6401, -0.0349, -0.6041]] )
self.assertTrue(torch.allclose(_A,_A,atol=1E-4 ) )
| 18 | 1 |
import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class a__ ( A__ , A__ , A__ , unittest.TestCase ):
A = StableUnCLIPPipeline
A = TEXT_TO_IMAGE_PARAMS
A = TEXT_TO_IMAGE_BATCH_PARAMS
A = TEXT_TO_IMAGE_IMAGE_PARAMS
A = TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
A = False
def __UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 32
SCREAMING_SNAKE_CASE_ : str = embedder_hidden_size
# prior components
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ : List[Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ : Dict = CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0,eos_token_id=2,hidden_size=_A,projection_dim=_A,intermediate_size=37,layer_norm_eps=1E-05,num_attention_heads=4,num_hidden_layers=5,pad_token_id=1,vocab_size=1000,) )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ : List[Any] = PriorTransformer(
num_attention_heads=2,attention_head_dim=12,embedding_dim=_A,num_layers=1,)
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ : Optional[int] = DDPMScheduler(
variance_type="fixed_small_log",prediction_type="sample",num_train_timesteps=1000,clip_sample=_A,clip_sample_range=5.0,beta_schedule="squaredcos_cap_v2",)
# regular denoising components
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ : List[Any] = StableUnCLIPImageNormalizer(embedding_dim=_A )
SCREAMING_SNAKE_CASE_ : str = DDPMScheduler(beta_schedule="squaredcos_cap_v2" )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ : Any = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0,eos_token_id=2,hidden_size=_A,projection_dim=32,intermediate_size=37,layer_norm_eps=1E-05,num_attention_heads=4,num_hidden_layers=5,pad_token_id=1,vocab_size=1000,) )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ : Dict = UNetaDConditionModel(
sample_size=32,in_channels=4,out_channels=4,down_block_types=("CrossAttnDownBlock2D", "DownBlock2D"),up_block_types=("UpBlock2D", "CrossAttnUpBlock2D"),block_out_channels=(32, 64),attention_head_dim=(2, 4),class_embed_type="projection",projection_class_embeddings_input_dim=embedder_projection_dim * 2,cross_attention_dim=_A,layers_per_block=1,upcast_attention=_A,use_linear_projection=_A,)
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ : Any = DDIMScheduler(
beta_schedule="scaled_linear",beta_start=0.00085,beta_end=0.012,prediction_type="v_prediction",set_alpha_to_one=_A,steps_offset=1,)
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ : Optional[int] = AutoencoderKL()
SCREAMING_SNAKE_CASE_ : List[str] = {
# prior components
"prior_tokenizer": prior_tokenizer,
"prior_text_encoder": prior_text_encoder,
"prior": prior,
"prior_scheduler": prior_scheduler,
# image noising components
"image_normalizer": image_normalizer,
"image_noising_scheduler": image_noising_scheduler,
# regular denoising components
"tokenizer": tokenizer,
"text_encoder": text_encoder,
"unet": unet,
"scheduler": scheduler,
"vae": vae,
}
return components
def __UpperCamelCase ( self : Optional[int],_A : int,_A : Optional[Any]=0 ):
"""simple docstring"""
if str(_A ).startswith("mps" ):
SCREAMING_SNAKE_CASE_ : int = torch.manual_seed(_A )
else:
SCREAMING_SNAKE_CASE_ : Any = torch.Generator(device=_A ).manual_seed(_A )
SCREAMING_SNAKE_CASE_ : Any = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"prior_num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = torch_device == "cpu"
self._test_attention_slicing_forward_pass(test_max_difference=_A )
def __UpperCamelCase ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch_device in ["cpu", "mps"]
self._test_inference_batch_single_identical(test_max_difference=_A )
@slow
@require_torch_gpu
class a__ ( unittest.TestCase ):
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy" )
SCREAMING_SNAKE_CASE_ : Any = StableUnCLIPPipeline.from_pretrained("fusing/stable-unclip-2-1-l",torch_dtype=torch.floataa )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
SCREAMING_SNAKE_CASE_ : int = torch.Generator(device="cpu" ).manual_seed(0 )
SCREAMING_SNAKE_CASE_ : Optional[Any] = pipe("anime turle",generator=_A,output_type="np" )
SCREAMING_SNAKE_CASE_ : Optional[Any] = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(_A,_A )
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = StableUnCLIPPipeline.from_pretrained("fusing/stable-unclip-2-1-l",torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE_ : int = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
SCREAMING_SNAKE_CASE_ : List[Any] = pipe(
"anime turtle",prior_num_inference_steps=2,num_inference_steps=2,output_type="np",)
SCREAMING_SNAKE_CASE_ : Dict = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 18 | import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.text import TextDatasetReader
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def _snake_case ( lowerCAmelCase : str , lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
assert isinstance(lowerCAmelCase , lowerCAmelCase )
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def _snake_case ( lowerCAmelCase : Tuple , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tmp_path / "cache"
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {"text": "string"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
SCREAMING_SNAKE_CASE_ : List[str] = TextDatasetReader(lowerCAmelCase , cache_dir=lowerCAmelCase , keep_in_memory=lowerCAmelCase ).read()
_check_text_dataset(lowerCAmelCase , lowerCAmelCase )
@pytest.mark.parametrize(
"features" , [
None,
{"text": "string"},
{"text": "int32"},
{"text": "float32"},
] , )
def _snake_case ( lowerCAmelCase : Tuple , lowerCAmelCase : Dict , lowerCAmelCase : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = tmp_path / "cache"
SCREAMING_SNAKE_CASE_ : Optional[Any] = {"text": "string"}
SCREAMING_SNAKE_CASE_ : Any = features.copy() if features else default_expected_features
SCREAMING_SNAKE_CASE_ : List[str] = (
Features({feature: Value(lowerCAmelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
SCREAMING_SNAKE_CASE_ : Optional[Any] = TextDatasetReader(lowerCAmelCase , features=lowerCAmelCase , cache_dir=lowerCAmelCase ).read()
_check_text_dataset(lowerCAmelCase , lowerCAmelCase )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def _snake_case ( lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tmp_path / "cache"
SCREAMING_SNAKE_CASE_ : List[str] = {"text": "string"}
SCREAMING_SNAKE_CASE_ : List[str] = TextDatasetReader(lowerCAmelCase , cache_dir=lowerCAmelCase , split=lowerCAmelCase ).read()
_check_text_dataset(lowerCAmelCase , lowerCAmelCase )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("path_type" , [str, list] )
def _snake_case ( lowerCAmelCase : int , lowerCAmelCase : int , lowerCAmelCase : Optional[int] ):
"""simple docstring"""
if issubclass(lowerCAmelCase , lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = text_path
elif issubclass(lowerCAmelCase , lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [text_path]
SCREAMING_SNAKE_CASE_ : int = tmp_path / "cache"
SCREAMING_SNAKE_CASE_ : Optional[int] = {"text": "string"}
SCREAMING_SNAKE_CASE_ : List[str] = TextDatasetReader(lowerCAmelCase , cache_dir=lowerCAmelCase ).read()
_check_text_dataset(lowerCAmelCase , lowerCAmelCase )
def _snake_case ( lowerCAmelCase : Dict , lowerCAmelCase : Dict , lowerCAmelCase : List[str]=("train",) ):
"""simple docstring"""
assert isinstance(lowerCAmelCase , lowerCAmelCase )
for split in splits:
SCREAMING_SNAKE_CASE_ : int = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def _snake_case ( lowerCAmelCase : List[str] , lowerCAmelCase : str , lowerCAmelCase : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = tmp_path / "cache"
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {"text": "string"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
SCREAMING_SNAKE_CASE_ : List[Any] = TextDatasetReader({"train": text_path} , cache_dir=lowerCAmelCase , keep_in_memory=lowerCAmelCase ).read()
_check_text_datasetdict(lowerCAmelCase , lowerCAmelCase )
@pytest.mark.parametrize(
"features" , [
None,
{"text": "string"},
{"text": "int32"},
{"text": "float32"},
] , )
def _snake_case ( lowerCAmelCase : Union[str, Any] , lowerCAmelCase : List[Any] , lowerCAmelCase : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = tmp_path / "cache"
# CSV file loses col_1 string dtype information: default now is "int64" instead of "string"
SCREAMING_SNAKE_CASE_ : Tuple = {"text": "string"}
SCREAMING_SNAKE_CASE_ : Any = features.copy() if features else default_expected_features
SCREAMING_SNAKE_CASE_ : Dict = (
Features({feature: Value(lowerCAmelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
SCREAMING_SNAKE_CASE_ : str = TextDatasetReader({"train": text_path} , features=lowerCAmelCase , cache_dir=lowerCAmelCase ).read()
_check_text_datasetdict(lowerCAmelCase , lowerCAmelCase )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def _snake_case ( lowerCAmelCase : Optional[Any] , lowerCAmelCase : Any , lowerCAmelCase : Dict ):
"""simple docstring"""
if split:
SCREAMING_SNAKE_CASE_ : Optional[int] = {split: text_path}
else:
SCREAMING_SNAKE_CASE_ : List[Any] = "train"
SCREAMING_SNAKE_CASE_ : Tuple = {"train": text_path, "test": text_path}
SCREAMING_SNAKE_CASE_ : Any = tmp_path / "cache"
SCREAMING_SNAKE_CASE_ : List[str] = {"text": "string"}
SCREAMING_SNAKE_CASE_ : str = TextDatasetReader(lowerCAmelCase , cache_dir=lowerCAmelCase ).read()
_check_text_datasetdict(lowerCAmelCase , lowerCAmelCase , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
| 18 | 1 |
import contextlib
import copy
import random
from typing import Any, Dict, Iterable, Optional, Union
import numpy as np
import torch
from .utils import deprecate, is_transformers_available
if is_transformers_available():
import transformers
def _snake_case ( lowerCAmelCase : int ):
"""simple docstring"""
random.seed(lowerCAmelCase )
np.random.seed(lowerCAmelCase )
torch.manual_seed(lowerCAmelCase )
torch.cuda.manual_seed_all(lowerCAmelCase )
# ^^ safe to call this function even if cuda is not available
class a__ :
def __init__( self : Tuple,_A : Iterable[torch.nn.Parameter],_A : float = 0.9999,_A : float = 0.0,_A : int = 0,_A : bool = False,_A : Union[float, int] = 1.0,_A : Union[float, int] = 2 / 3,_A : Optional[Any] = None,_A : Dict[str, Any] = None,**_A : str,):
"""simple docstring"""
if isinstance(_A,torch.nn.Module ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = (
"Passing a `torch.nn.Module` to `ExponentialMovingAverage` is deprecated. "
"Please pass the parameters of the module instead."
)
deprecate(
"passing a `torch.nn.Module` to `ExponentialMovingAverage`","1.0.0",_A,standard_warn=_A,)
SCREAMING_SNAKE_CASE_ : str = parameters.parameters()
# set use_ema_warmup to True if a torch.nn.Module is passed for backwards compatibility
SCREAMING_SNAKE_CASE_ : str = True
if kwargs.get("max_value",_A ) is not None:
SCREAMING_SNAKE_CASE_ : str = "The `max_value` argument is deprecated. Please use `decay` instead."
deprecate("max_value","1.0.0",_A,standard_warn=_A )
SCREAMING_SNAKE_CASE_ : Optional[int] = kwargs["max_value"]
if kwargs.get("min_value",_A ) is not None:
SCREAMING_SNAKE_CASE_ : Optional[int] = "The `min_value` argument is deprecated. Please use `min_decay` instead."
deprecate("min_value","1.0.0",_A,standard_warn=_A )
SCREAMING_SNAKE_CASE_ : int = kwargs["min_value"]
SCREAMING_SNAKE_CASE_ : Dict = list(_A )
SCREAMING_SNAKE_CASE_ : List[str] = [p.clone().detach() for p in parameters]
if kwargs.get("device",_A ) is not None:
SCREAMING_SNAKE_CASE_ : str = "The `device` argument is deprecated. Please use `to` instead."
deprecate("device","1.0.0",_A,standard_warn=_A )
self.to(device=kwargs["device"] )
SCREAMING_SNAKE_CASE_ : Dict = None
SCREAMING_SNAKE_CASE_ : Any = decay
SCREAMING_SNAKE_CASE_ : List[str] = min_decay
SCREAMING_SNAKE_CASE_ : Tuple = update_after_step
SCREAMING_SNAKE_CASE_ : List[str] = use_ema_warmup
SCREAMING_SNAKE_CASE_ : List[Any] = inv_gamma
SCREAMING_SNAKE_CASE_ : List[Any] = power
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 0
SCREAMING_SNAKE_CASE_ : Union[str, Any] = None # set in `step()`
SCREAMING_SNAKE_CASE_ : Dict = model_cls
SCREAMING_SNAKE_CASE_ : Any = model_config
@classmethod
def __UpperCamelCase ( cls : Dict,_A : Tuple,_A : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = model_cls.load_config(_A,return_unused_kwargs=_A )
SCREAMING_SNAKE_CASE_ : Tuple = model_cls.from_pretrained(_A )
SCREAMING_SNAKE_CASE_ : Optional[Any] = cls(model.parameters(),model_cls=_A,model_config=model.config )
ema_model.load_state_dict(_A )
return ema_model
def __UpperCamelCase ( self : Optional[Any],_A : int ):
"""simple docstring"""
if self.model_cls is None:
raise ValueError("`save_pretrained` can only be used if `model_cls` was defined at __init__." )
if self.model_config is None:
raise ValueError("`save_pretrained` can only be used if `model_config` was defined at __init__." )
SCREAMING_SNAKE_CASE_ : str = self.model_cls.from_config(self.model_config )
SCREAMING_SNAKE_CASE_ : Dict = self.state_dict()
state_dict.pop("shadow_params",_A )
model.register_to_config(**_A )
self.copy_to(model.parameters() )
model.save_pretrained(_A )
def __UpperCamelCase ( self : str,_A : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = max(0,optimization_step - self.update_after_step - 1 )
if step <= 0:
return 0.0
if self.use_ema_warmup:
SCREAMING_SNAKE_CASE_ : Optional[int] = 1 - (1 + step / self.inv_gamma) ** -self.power
else:
SCREAMING_SNAKE_CASE_ : List[str] = (1 + step) / (10 + step)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = min(_A,self.decay )
# make sure decay is not smaller than min_decay
SCREAMING_SNAKE_CASE_ : Any = max(_A,self.min_decay )
return cur_decay_value
@torch.no_grad()
def __UpperCamelCase ( self : str,_A : Iterable[torch.nn.Parameter] ):
"""simple docstring"""
if isinstance(_A,torch.nn.Module ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = (
"Passing a `torch.nn.Module` to `ExponentialMovingAverage.step` is deprecated. "
"Please pass the parameters of the module instead."
)
deprecate(
"passing a `torch.nn.Module` to `ExponentialMovingAverage.step`","1.0.0",_A,standard_warn=_A,)
SCREAMING_SNAKE_CASE_ : Tuple = parameters.parameters()
SCREAMING_SNAKE_CASE_ : int = list(_A )
self.optimization_step += 1
# Compute the decay factor for the exponential moving average.
SCREAMING_SNAKE_CASE_ : Any = self.get_decay(self.optimization_step )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = decay
SCREAMING_SNAKE_CASE_ : Tuple = 1 - decay
SCREAMING_SNAKE_CASE_ : str = contextlib.nullcontext
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
import deepspeed
for s_param, param in zip(self.shadow_params,_A ):
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
SCREAMING_SNAKE_CASE_ : Union[str, Any] = deepspeed.zero.GatheredParameters(_A,modifier_rank=_A )
with context_manager():
if param.requires_grad:
s_param.sub_(one_minus_decay * (s_param - param) )
else:
s_param.copy_(_A )
def __UpperCamelCase ( self : int,_A : Iterable[torch.nn.Parameter] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = list(_A )
for s_param, param in zip(self.shadow_params,_A ):
param.data.copy_(s_param.to(param.device ).data )
def __UpperCamelCase ( self : Dict,_A : Any=None,_A : Union[str, Any]=None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [
p.to(device=_A,dtype=_A ) if p.is_floating_point() else p.to(device=_A )
for p in self.shadow_params
]
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
return {
"decay": self.decay,
"min_decay": self.min_decay,
"optimization_step": self.optimization_step,
"update_after_step": self.update_after_step,
"use_ema_warmup": self.use_ema_warmup,
"inv_gamma": self.inv_gamma,
"power": self.power,
"shadow_params": self.shadow_params,
}
def __UpperCamelCase ( self : Optional[Any],_A : Iterable[torch.nn.Parameter] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = [param.detach().cpu().clone() for param in parameters]
def __UpperCamelCase ( self : int,_A : Iterable[torch.nn.Parameter] ):
"""simple docstring"""
if self.temp_stored_params is None:
raise RuntimeError("This ExponentialMovingAverage has no `store()`ed weights " "to `restore()`" )
for c_param, param in zip(self.temp_stored_params,_A ):
param.data.copy_(c_param.data )
# Better memory-wise.
SCREAMING_SNAKE_CASE_ : Dict = None
def __UpperCamelCase ( self : Union[str, Any],_A : dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = copy.deepcopy(_A )
SCREAMING_SNAKE_CASE_ : str = state_dict.get("decay",self.decay )
if self.decay < 0.0 or self.decay > 1.0:
raise ValueError("Decay must be between 0 and 1" )
SCREAMING_SNAKE_CASE_ : Dict = state_dict.get("min_decay",self.min_decay )
if not isinstance(self.min_decay,_A ):
raise ValueError("Invalid min_decay" )
SCREAMING_SNAKE_CASE_ : Any = state_dict.get("optimization_step",self.optimization_step )
if not isinstance(self.optimization_step,_A ):
raise ValueError("Invalid optimization_step" )
SCREAMING_SNAKE_CASE_ : Any = state_dict.get("update_after_step",self.update_after_step )
if not isinstance(self.update_after_step,_A ):
raise ValueError("Invalid update_after_step" )
SCREAMING_SNAKE_CASE_ : Optional[Any] = state_dict.get("use_ema_warmup",self.use_ema_warmup )
if not isinstance(self.use_ema_warmup,_A ):
raise ValueError("Invalid use_ema_warmup" )
SCREAMING_SNAKE_CASE_ : Optional[Any] = state_dict.get("inv_gamma",self.inv_gamma )
if not isinstance(self.inv_gamma,(float, int) ):
raise ValueError("Invalid inv_gamma" )
SCREAMING_SNAKE_CASE_ : int = state_dict.get("power",self.power )
if not isinstance(self.power,(float, int) ):
raise ValueError("Invalid power" )
SCREAMING_SNAKE_CASE_ : str = state_dict.get("shadow_params",_A )
if shadow_params is not None:
SCREAMING_SNAKE_CASE_ : int = shadow_params
if not isinstance(self.shadow_params,_A ):
raise ValueError("shadow_params must be a list" )
if not all(isinstance(_A,torch.Tensor ) for p in self.shadow_params ):
raise ValueError("shadow_params must all be Tensors" )
| 18 | import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
import diffusers
from diffusers import (
AutoencoderKL,
EulerDiscreteScheduler,
StableDiffusionLatentUpscalePipeline,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.schedulers import KarrasDiffusionSchedulers
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
def _snake_case ( lowerCAmelCase : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = [tensor.shape for tensor in tensor_list]
return all(shape == shapes[0] for shape in shapes[1:] )
class a__ ( A__ , A__ , A__ , unittest.TestCase ):
A = StableDiffusionLatentUpscalePipeline
A = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
'height',
'width',
'cross_attention_kwargs',
'negative_prompt_embeds',
'prompt_embeds',
}
A = PipelineTesterMixin.required_optional_params - {'num_images_per_prompt'}
A = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
A = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
A = frozenset([] )
A = True
@property
def __UpperCamelCase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = 1
SCREAMING_SNAKE_CASE_ : Optional[int] = 4
SCREAMING_SNAKE_CASE_ : Optional[int] = (16, 16)
SCREAMING_SNAKE_CASE_ : Dict = floats_tensor((batch_size, num_channels) + sizes,rng=random.Random(0 ) ).to(_A )
return image
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ : str = UNetaDConditionModel(
act_fn="gelu",attention_head_dim=8,norm_num_groups=_A,block_out_channels=[32, 32, 64, 64],time_cond_proj_dim=160,conv_in_kernel=1,conv_out_kernel=1,cross_attention_dim=32,down_block_types=(
"KDownBlock2D",
"KCrossAttnDownBlock2D",
"KCrossAttnDownBlock2D",
"KCrossAttnDownBlock2D",
),in_channels=8,mid_block_type=_A,only_cross_attention=_A,out_channels=5,resnet_time_scale_shift="scale_shift",time_embedding_type="fourier",timestep_post_act="gelu",up_block_types=("KCrossAttnUpBlock2D", "KCrossAttnUpBlock2D", "KCrossAttnUpBlock2D", "KUpBlock2D"),)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = AutoencoderKL(
block_out_channels=[32, 32, 64, 64],in_channels=3,out_channels=3,down_block_types=[
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
],up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],latent_channels=4,)
SCREAMING_SNAKE_CASE_ : int = EulerDiscreteScheduler(prediction_type="sample" )
SCREAMING_SNAKE_CASE_ : List[Any] = CLIPTextConfig(
bos_token_id=0,eos_token_id=2,hidden_size=32,intermediate_size=37,layer_norm_eps=1E-05,num_attention_heads=4,num_hidden_layers=5,pad_token_id=1,vocab_size=1000,hidden_act="quick_gelu",projection_dim=512,)
SCREAMING_SNAKE_CASE_ : Tuple = CLIPTextModel(_A )
SCREAMING_SNAKE_CASE_ : int = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {
"unet": model.eval(),
"vae": vae.eval(),
"scheduler": scheduler,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
}
return components
def __UpperCamelCase ( self : List[Any],_A : int,_A : Tuple=0 ):
"""simple docstring"""
if str(_A ).startswith("mps" ):
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.manual_seed(_A )
else:
SCREAMING_SNAKE_CASE_ : Dict = torch.Generator(device=_A ).manual_seed(_A )
SCREAMING_SNAKE_CASE_ : Tuple = {
"prompt": "A painting of a squirrel eating a burger",
"image": self.dummy_image.cpu(),
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = "cpu"
SCREAMING_SNAKE_CASE_ : List[str] = self.get_dummy_components()
SCREAMING_SNAKE_CASE_ : List[str] = self.pipeline_class(**_A )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
SCREAMING_SNAKE_CASE_ : Tuple = self.get_dummy_inputs(_A )
SCREAMING_SNAKE_CASE_ : Dict = pipe(**_A ).images
SCREAMING_SNAKE_CASE_ : Optional[int] = image[0, -3:, -3:, -1]
self.assertEqual(image.shape,(1, 256, 256, 3) )
SCREAMING_SNAKE_CASE_ : Optional[Any] = np.array(
[0.47222412, 0.41921633, 0.44717434, 0.46874192, 0.42588258, 0.46150726, 0.4677534, 0.45583832, 0.48579055] )
SCREAMING_SNAKE_CASE_ : Optional[int] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_A,1E-3 )
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
super().test_attention_slicing_forward_pass(expected_max_diff=7E-3 )
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
super().test_cpu_offload_forward_pass(expected_max_diff=3E-3 )
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
def __UpperCamelCase ( self : int ):
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=7E-3 )
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=3E-3 )
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
super().test_save_load_local(expected_max_difference=3E-3 )
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
super().test_save_load_optional_components(expected_max_difference=3E-3 )
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = [
"DDIMScheduler",
"DDPMScheduler",
"PNDMScheduler",
"HeunDiscreteScheduler",
"EulerAncestralDiscreteScheduler",
"KDPM2DiscreteScheduler",
"KDPM2AncestralDiscreteScheduler",
"DPMSolverSDEScheduler",
]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.get_dummy_components()
SCREAMING_SNAKE_CASE_ : Any = self.pipeline_class(**_A )
# make sure that PNDM does not need warm-up
pipe.scheduler.register_to_config(skip_prk_steps=_A )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
SCREAMING_SNAKE_CASE_ : Any = self.get_dummy_inputs(_A )
SCREAMING_SNAKE_CASE_ : Optional[Any] = 2
SCREAMING_SNAKE_CASE_ : Optional[int] = []
for scheduler_enum in KarrasDiffusionSchedulers:
if scheduler_enum.name in skip_schedulers:
# no sigma schedulers are not supported
# no schedulers
continue
SCREAMING_SNAKE_CASE_ : Tuple = getattr(_A,scheduler_enum.name )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = scheduler_cls.from_config(pipe.scheduler.config )
SCREAMING_SNAKE_CASE_ : Optional[int] = pipe(**_A )[0]
outputs.append(_A )
assert check_same_shape(_A )
@require_torch_gpu
@slow
class a__ ( unittest.TestCase ):
def __UpperCamelCase ( self : Dict ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = torch.manual_seed(33 )
SCREAMING_SNAKE_CASE_ : Optional[int] = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4",torch_dtype=torch.floataa )
pipe.to("cuda" )
SCREAMING_SNAKE_CASE_ : List[Any] = StableDiffusionLatentUpscalePipeline.from_pretrained(
"stabilityai/sd-x2-latent-upscaler",torch_dtype=torch.floataa )
upscaler.to("cuda" )
SCREAMING_SNAKE_CASE_ : Tuple = "a photo of an astronaut high resolution, unreal engine, ultra realistic"
SCREAMING_SNAKE_CASE_ : str = pipe(_A,generator=_A,output_type="latent" ).images
SCREAMING_SNAKE_CASE_ : Optional[Any] = upscaler(
prompt=_A,image=_A,num_inference_steps=20,guidance_scale=0,generator=_A,output_type="np",).images[0]
SCREAMING_SNAKE_CASE_ : str = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/astronaut_1024.npy" )
assert np.abs((expected_image - image).mean() ) < 5E-2
def __UpperCamelCase ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = torch.manual_seed(33 )
SCREAMING_SNAKE_CASE_ : Optional[Any] = StableDiffusionLatentUpscalePipeline.from_pretrained(
"stabilityai/sd-x2-latent-upscaler",torch_dtype=torch.floataa )
upscaler.to("cuda" )
SCREAMING_SNAKE_CASE_ : Any = "the temple of fire by Ross Tran and Gerardo Dottori, oil on canvas"
SCREAMING_SNAKE_CASE_ : Optional[Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_512.png" )
SCREAMING_SNAKE_CASE_ : str = upscaler(
prompt=_A,image=_A,num_inference_steps=20,guidance_scale=0,generator=_A,output_type="np",).images[0]
SCREAMING_SNAKE_CASE_ : Any = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_1024.npy" )
assert np.abs((expected_image - image).max() ) < 5E-2
| 18 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase : List[Any] = logging.get_logger(__name__)
__lowerCamelCase : List[Any] = {
'''MIT/ast-finetuned-audioset-10-10-0.4593''': (
'''https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json'''
),
}
class a__ ( A__ ):
A = 'audio-spectrogram-transformer'
def __init__( self : Tuple,_A : Tuple=768,_A : str=12,_A : List[Any]=12,_A : Any=3072,_A : Optional[int]="gelu",_A : Optional[int]=0.0,_A : Optional[int]=0.0,_A : int=0.02,_A : Optional[int]=1E-12,_A : Tuple=16,_A : List[str]=True,_A : Union[str, Any]=10,_A : Tuple=10,_A : Optional[Any]=1024,_A : Tuple=128,**_A : str,):
"""simple docstring"""
super().__init__(**_A )
SCREAMING_SNAKE_CASE_ : Any = hidden_size
SCREAMING_SNAKE_CASE_ : List[Any] = num_hidden_layers
SCREAMING_SNAKE_CASE_ : str = num_attention_heads
SCREAMING_SNAKE_CASE_ : Dict = intermediate_size
SCREAMING_SNAKE_CASE_ : List[Any] = hidden_act
SCREAMING_SNAKE_CASE_ : Optional[Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ : Dict = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : Optional[int] = initializer_range
SCREAMING_SNAKE_CASE_ : Any = layer_norm_eps
SCREAMING_SNAKE_CASE_ : Optional[int] = patch_size
SCREAMING_SNAKE_CASE_ : Tuple = qkv_bias
SCREAMING_SNAKE_CASE_ : str = frequency_stride
SCREAMING_SNAKE_CASE_ : int = time_stride
SCREAMING_SNAKE_CASE_ : Any = max_length
SCREAMING_SNAKE_CASE_ : Optional[int] = num_mel_bins
| 18 | from collections import OrderedDict
from typing import Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...feature_extraction_utils import FeatureExtractionMixin
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType, logging
__lowerCamelCase : Optional[int] = logging.get_logger(__name__)
__lowerCamelCase : Tuple = {
'''deepmind/language-perceiver''': '''https://huggingface.co/deepmind/language-perceiver/resolve/main/config.json''',
# See all Perceiver models at https://huggingface.co/models?filter=perceiver
}
class a__ ( A__ ):
A = 'perceiver'
def __init__( self : List[Any],_A : Tuple=256,_A : str=1280,_A : List[Any]=768,_A : Union[str, Any]=1,_A : Union[str, Any]=26,_A : List[str]=8,_A : List[Any]=8,_A : List[Any]=None,_A : List[Any]=None,_A : Union[str, Any]="kv",_A : Any=1,_A : int=1,_A : Dict="gelu",_A : Any=0.1,_A : int=0.02,_A : int=1E-12,_A : Any=True,_A : Optional[Any]=262,_A : List[Any]=2048,_A : str=56,_A : Optional[int]=[368, 496],_A : Dict=16,_A : Tuple=1920,_A : List[Any]=16,_A : str=[1, 16, 224, 224],**_A : Optional[Any],):
"""simple docstring"""
super().__init__(**_A )
SCREAMING_SNAKE_CASE_ : Dict = num_latents
SCREAMING_SNAKE_CASE_ : List[Any] = d_latents
SCREAMING_SNAKE_CASE_ : Union[str, Any] = d_model
SCREAMING_SNAKE_CASE_ : Optional[int] = num_blocks
SCREAMING_SNAKE_CASE_ : List[Any] = num_self_attends_per_block
SCREAMING_SNAKE_CASE_ : Tuple = num_self_attention_heads
SCREAMING_SNAKE_CASE_ : List[str] = num_cross_attention_heads
SCREAMING_SNAKE_CASE_ : List[Any] = qk_channels
SCREAMING_SNAKE_CASE_ : Any = v_channels
SCREAMING_SNAKE_CASE_ : Any = cross_attention_shape_for_attention
SCREAMING_SNAKE_CASE_ : List[str] = self_attention_widening_factor
SCREAMING_SNAKE_CASE_ : Any = cross_attention_widening_factor
SCREAMING_SNAKE_CASE_ : List[Any] = hidden_act
SCREAMING_SNAKE_CASE_ : List[str] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : Any = initializer_range
SCREAMING_SNAKE_CASE_ : List[Any] = layer_norm_eps
SCREAMING_SNAKE_CASE_ : Tuple = use_query_residual
# masked language modeling attributes
SCREAMING_SNAKE_CASE_ : List[str] = vocab_size
SCREAMING_SNAKE_CASE_ : Union[str, Any] = max_position_embeddings
# image classification attributes
SCREAMING_SNAKE_CASE_ : Dict = image_size
# flow attributes
SCREAMING_SNAKE_CASE_ : List[Any] = train_size
# multimodal autoencoding attributes
SCREAMING_SNAKE_CASE_ : str = num_frames
SCREAMING_SNAKE_CASE_ : Any = audio_samples_per_frame
SCREAMING_SNAKE_CASE_ : Tuple = samples_per_patch
SCREAMING_SNAKE_CASE_ : Optional[Any] = output_shape
class a__ ( A__ ):
@property
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE_ : List[str] = {0: "batch", 1: "choice", 2: "sequence"}
else:
SCREAMING_SNAKE_CASE_ : str = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("inputs", dynamic_axis),
("attention_mask", dynamic_axis),
] )
@property
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
return 1E-4
def __UpperCamelCase ( self : List[str],_A : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"],_A : int = -1,_A : int = -1,_A : int = -1,_A : bool = False,_A : Optional[TensorType] = None,_A : int = 3,_A : int = 40,_A : int = 40,):
"""simple docstring"""
if isinstance(_A,_A ):
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
SCREAMING_SNAKE_CASE_ : Tuple = compute_effective_axis_dimension(
_A,fixed_dimension=OnnxConfig.default_fixed_batch,num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
SCREAMING_SNAKE_CASE_ : Tuple = preprocessor.num_special_tokens_to_add(_A )
SCREAMING_SNAKE_CASE_ : Any = compute_effective_axis_dimension(
_A,fixed_dimension=OnnxConfig.default_fixed_sequence,num_token_to_add=_A )
# Generate dummy inputs according to compute batch and sequence
SCREAMING_SNAKE_CASE_ : Optional[Any] = [" ".join(["a"] ) * seq_length] * batch_size
SCREAMING_SNAKE_CASE_ : str = dict(preprocessor(_A,return_tensors=_A ) )
SCREAMING_SNAKE_CASE_ : List[str] = inputs.pop("input_ids" )
return inputs
elif isinstance(_A,_A ) and preprocessor.model_input_names[0] == "pixel_values":
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
SCREAMING_SNAKE_CASE_ : Any = compute_effective_axis_dimension(_A,fixed_dimension=OnnxConfig.default_fixed_batch )
SCREAMING_SNAKE_CASE_ : Optional[int] = self._generate_dummy_images(_A,_A,_A,_A )
SCREAMING_SNAKE_CASE_ : Any = dict(preprocessor(images=_A,return_tensors=_A ) )
SCREAMING_SNAKE_CASE_ : Any = inputs.pop("pixel_values" )
return inputs
else:
raise ValueError(
"Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor." )
| 18 | 1 |
import os
import sys
import tempfile
import torch
from .state import AcceleratorState
from .utils import PrecisionType, PrepareForLaunch, is_mps_available, patch_environment
def _snake_case ( lowerCAmelCase : Optional[Any] , lowerCAmelCase : int=() , lowerCAmelCase : List[Any]=None , lowerCAmelCase : Optional[Any]="no" , lowerCAmelCase : Optional[int]="29500" ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = False
SCREAMING_SNAKE_CASE_ : Tuple = False
if any(key.startswith("KAGGLE" ) for key in os.environ.keys() ):
SCREAMING_SNAKE_CASE_ : int = True
elif "IPython" in sys.modules:
SCREAMING_SNAKE_CASE_ : Any = "google.colab" in str(sys.modules["IPython"].get_ipython() )
try:
SCREAMING_SNAKE_CASE_ : Tuple = PrecisionType(mixed_precision.lower() )
except ValueError:
raise ValueError(
f'Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}.' )
if (in_colab or in_kaggle) and (os.environ.get("TPU_NAME" , lowerCAmelCase ) is not None):
# TPU launch
import torch_xla.distributed.xla_multiprocessing as xmp
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
"To train on TPU in Colab or Kaggle Kernel, the `Accelerator` should only be initialized inside "
"your training function. Restart your notebook and make sure no cells initializes an "
"`Accelerator`." )
if num_processes is None:
SCREAMING_SNAKE_CASE_ : str = 8
SCREAMING_SNAKE_CASE_ : Optional[int] = PrepareForLaunch(lowerCAmelCase , distributed_type="TPU" )
print(f'Launching a training on {num_processes} TPU cores.' )
xmp.spawn(lowerCAmelCase , args=lowerCAmelCase , nprocs=lowerCAmelCase , start_method="fork" )
elif in_colab:
# No need for a distributed launch otherwise as it's either CPU or one GPU.
if torch.cuda.is_available():
print("Launching training on one GPU." )
else:
print("Launching training on one CPU." )
function(*lowerCAmelCase )
else:
if num_processes is None:
raise ValueError(
"You have to specify the number of GPUs you would like to use, add `num_processes=...` to your call." )
if num_processes > 1:
# Multi-GPU launch
from torch.multiprocessing import start_processes
from torch.multiprocessing.spawn import ProcessRaisedException
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
"To launch a multi-GPU training from your notebook, the `Accelerator` should only be initialized "
"inside your training function. Restart your notebook and make sure no cells initializes an "
"`Accelerator`." )
if torch.cuda.is_initialized():
raise ValueError(
"To launch a multi-GPU training from your notebook, you need to avoid running any instruction "
"using `torch.cuda` in any cell. Restart your notebook and make sure no cells use any CUDA "
"function." )
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=lowerCAmelCase , master_addr="127.0.01" , master_port=lowerCAmelCase , mixed_precision=lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : str = PrepareForLaunch(lowerCAmelCase , distributed_type="MULTI_GPU" )
print(f'Launching training on {num_processes} GPUs.' )
try:
start_processes(lowerCAmelCase , args=lowerCAmelCase , nprocs=lowerCAmelCase , start_method="fork" )
except ProcessRaisedException as e:
if "Cannot re-initialize CUDA in forked subprocess" in e.args[0]:
raise RuntimeError(
"CUDA has been initialized before the `notebook_launcher` could create a forked subprocess. "
"This likely stems from an outside import causing issues once the `notebook_launcher()` is called. "
"Please review your imports and test them when running the `notebook_launcher()` to identify "
"which one is problematic." ) from e
else:
# No need for a distributed launch otherwise as it's either CPU, GPU or MPS.
if is_mps_available():
SCREAMING_SNAKE_CASE_ : List[Any] = "1"
print("Launching training on MPS." )
elif torch.cuda.is_available():
print("Launching training on one GPU." )
else:
print("Launching training on CPU." )
function(*lowerCAmelCase )
def _snake_case ( lowerCAmelCase : List[str] , lowerCAmelCase : Union[str, Any]=() , lowerCAmelCase : Any=2 ):
"""simple docstring"""
from torch.multiprocessing import start_processes
with tempfile.NamedTemporaryFile() as tmp_file:
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=lowerCAmelCase , master_addr="127.0.01" , master_port="29500" , accelerate_mixed_precision="no" , accelerate_debug_rdv_file=tmp_file.name , accelerate_use_cpu="yes" , ):
SCREAMING_SNAKE_CASE_ : Any = PrepareForLaunch(lowerCAmelCase , debug=lowerCAmelCase )
start_processes(lowerCAmelCase , args=lowerCAmelCase , nprocs=lowerCAmelCase , start_method="fork" )
| 18 | from collections import Counter
from pathlib import Path
from typing import Optional, Tuple
import yaml
class a__ ( yaml.SafeLoader ):
def __UpperCamelCase ( self : str,_A : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = [self.constructed_objects[key_node] for key_node, _ in node.value]
SCREAMING_SNAKE_CASE_ : List[str] = [tuple(_A ) if isinstance(_A,_A ) else key for key in keys]
SCREAMING_SNAKE_CASE_ : Optional[int] = Counter(_A )
SCREAMING_SNAKE_CASE_ : Tuple = [key for key in counter if counter[key] > 1]
if duplicate_keys:
raise TypeError(F'Got duplicate yaml keys: {duplicate_keys}' )
def __UpperCamelCase ( self : Tuple,_A : Dict,_A : List[Any]=False ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = super().construct_mapping(_A,deep=_A )
self._check_no_duplicates_on_constructed_node(_A )
return mapping
def _snake_case ( lowerCAmelCase : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = list(readme_content.splitlines() )
if full_content and full_content[0] == "---" and "---" in full_content[1:]:
SCREAMING_SNAKE_CASE_ : List[Any] = full_content[1:].index("---" ) + 1
SCREAMING_SNAKE_CASE_ : int = "\n".join(full_content[1:sep_idx] )
return yamlblock, "\n".join(full_content[sep_idx + 1 :] )
return None, "\n".join(lowerCAmelCase )
class a__ ( A__ ):
# class attributes
A = {'train_eval_index'} # train-eval-index in the YAML metadata
@classmethod
def __UpperCamelCase ( cls : Any,_A : Path ):
"""simple docstring"""
with open(_A,encoding="utf-8" ) as readme_file:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = _split_yaml_from_readme(readme_file.read() )
if yaml_string is not None:
return cls.from_yaml_string(_A )
else:
return cls()
def __UpperCamelCase ( self : Dict,_A : Path ):
"""simple docstring"""
if path.exists():
with open(_A,encoding="utf-8" ) as readme_file:
SCREAMING_SNAKE_CASE_ : int = readme_file.read()
else:
SCREAMING_SNAKE_CASE_ : Any = None
SCREAMING_SNAKE_CASE_ : int = self._to_readme(_A )
with open(_A,"w",encoding="utf-8" ) as readme_file:
readme_file.write(_A )
def __UpperCamelCase ( self : Optional[int],_A : Optional[str] = None ):
"""simple docstring"""
if readme_content is not None:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[Any] = _split_yaml_from_readme(_A )
SCREAMING_SNAKE_CASE_ : Tuple = "---\n" + self.to_yaml_string() + "---\n" + content
else:
SCREAMING_SNAKE_CASE_ : Dict = "---\n" + self.to_yaml_string() + "---\n"
return full_content
@classmethod
def __UpperCamelCase ( cls : Dict,_A : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = yaml.load(_A,Loader=_NoDuplicateSafeLoader ) or {}
# Convert the YAML keys to DatasetMetadata fields
SCREAMING_SNAKE_CASE_ : Any = {
(key.replace("-","_" ) if key.replace("-","_" ) in cls._FIELDS_WITH_DASHES else key): value
for key, value in metadata_dict.items()
}
return cls(**_A )
def __UpperCamelCase ( self : Dict ):
"""simple docstring"""
return yaml.safe_dump(
{
(key.replace("_","-" ) if key in self._FIELDS_WITH_DASHES else key): value
for key, value in self.items()
},sort_keys=_A,allow_unicode=_A,encoding="utf-8",).decode("utf-8" )
__lowerCamelCase : List[Any] = {
'''image-classification''': [],
'''translation''': [],
'''image-segmentation''': [],
'''fill-mask''': [],
'''automatic-speech-recognition''': [],
'''token-classification''': [],
'''sentence-similarity''': [],
'''audio-classification''': [],
'''question-answering''': [],
'''summarization''': [],
'''zero-shot-classification''': [],
'''table-to-text''': [],
'''feature-extraction''': [],
'''other''': [],
'''multiple-choice''': [],
'''text-classification''': [],
'''text-to-image''': [],
'''text2text-generation''': [],
'''zero-shot-image-classification''': [],
'''tabular-classification''': [],
'''tabular-regression''': [],
'''image-to-image''': [],
'''tabular-to-text''': [],
'''unconditional-image-generation''': [],
'''text-retrieval''': [],
'''text-to-speech''': [],
'''object-detection''': [],
'''audio-to-audio''': [],
'''text-generation''': [],
'''conversational''': [],
'''table-question-answering''': [],
'''visual-question-answering''': [],
'''image-to-text''': [],
'''reinforcement-learning''': [],
'''voice-activity-detection''': [],
'''time-series-forecasting''': [],
'''document-question-answering''': [],
}
if __name__ == "__main__":
from argparse import ArgumentParser
__lowerCamelCase : List[Any] = ArgumentParser(usage='''Validate the yaml metadata block of a README.md file.''')
ap.add_argument('''readme_filepath''')
__lowerCamelCase : Dict = ap.parse_args()
__lowerCamelCase : List[Any] = Path(args.readme_filepath)
__lowerCamelCase : Optional[int] = DatasetMetadata.from_readme(readme_filepath)
print(dataset_metadata)
dataset_metadata.to_readme(readme_filepath)
| 18 | 1 |
__lowerCamelCase : List[Any] = 8.3144598
def _snake_case ( lowerCAmelCase : float , lowerCAmelCase : float ):
"""simple docstring"""
if temperature < 0:
raise Exception("Temperature cannot be less than 0 K" )
if molar_mass <= 0:
raise Exception("Molar mass cannot be less than or equal to 0 kg/mol" )
else:
return (3 * UNIVERSAL_GAS_CONSTANT * temperature / molar_mass) ** 0.5
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# example
__lowerCamelCase : Dict = 3_00
__lowerCamelCase : str = 28
__lowerCamelCase : Optional[Any] = rms_speed_of_molecule(temperature, molar_mass)
print(f'''Vrms of Nitrogen gas at 300 K is {vrms} m/s''')
| 18 | from __future__ import annotations
from math import pi, sqrt
def _snake_case ( lowerCAmelCase : float , lowerCAmelCase : float ):
"""simple docstring"""
if inductance <= 0:
raise ValueError("Inductance cannot be 0 or negative" )
elif capacitance <= 0:
raise ValueError("Capacitance cannot be 0 or negative" )
else:
return (
"Resonant frequency",
float(1 / (2 * pi * (sqrt(inductance * capacitance ))) ),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 18 | 1 |
import argparse
import os
import re
import tensorflow as tf
import torch
from transformers import BertConfig, BertModel
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCamelCase : List[Any] = logging.get_logger(__name__)
def _snake_case ( lowerCAmelCase : Optional[Any] , lowerCAmelCase : List[Any] , lowerCAmelCase : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = os.path.abspath(lowerCAmelCase )
logger.info(f'Converting TensorFlow checkpoint from {tf_path}' )
# Load weights from TF model
SCREAMING_SNAKE_CASE_ : Any = tf.train.list_variables(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Any = []
SCREAMING_SNAKE_CASE_ : Tuple = []
SCREAMING_SNAKE_CASE_ : Optional[Any] = []
for full_name, shape in init_vars:
# logger.info(f"Loading TF weight {name} with shape {shape}")
SCREAMING_SNAKE_CASE_ : List[Any] = full_name.split("/" )
if full_name == "_CHECKPOINTABLE_OBJECT_GRAPH" or name[0] in ["global_step", "save_counter"]:
logger.info(f'Skipping non-model layer {full_name}' )
continue
if "optimizer" in full_name:
logger.info(f'Skipping optimization layer {full_name}' )
continue
if name[0] == "model":
# ignore initial 'model'
SCREAMING_SNAKE_CASE_ : List[Any] = name[1:]
# figure out how many levels deep the name is
SCREAMING_SNAKE_CASE_ : List[Any] = 0
for _name in name:
if _name.startswith("layer_with_weights" ):
depth += 1
else:
break
layer_depth.append(lowerCAmelCase )
# read data
SCREAMING_SNAKE_CASE_ : Tuple = tf.train.load_variable(lowerCAmelCase , lowerCAmelCase )
names.append("/".join(lowerCAmelCase ) )
arrays.append(lowerCAmelCase )
logger.info(f'Read a total of {len(lowerCAmelCase ):,} layers' )
# Sanity check
if len(set(lowerCAmelCase ) ) != 1:
raise ValueError(f'Found layer names with different depths (layer depth {list(set(lowerCAmelCase ) )})' )
SCREAMING_SNAKE_CASE_ : List[Any] = list(set(lowerCAmelCase ) )[0]
if layer_depth != 1:
raise ValueError(
"The model contains more than just the embedding/encoder layers. This script does not handle MLM/NSP"
" heads." )
# convert layers
logger.info("Converting weights..." )
for full_name, array in zip(lowerCAmelCase , lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : int = full_name.split("/" )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = model
SCREAMING_SNAKE_CASE_ : int = []
for i, m_name in enumerate(lowerCAmelCase ):
if m_name == ".ATTRIBUTES":
# variable names end with .ATTRIBUTES/VARIABLE_VALUE
break
if m_name.startswith("layer_with_weights" ):
SCREAMING_SNAKE_CASE_ : int = int(m_name.split("-" )[-1] )
if layer_num <= 2:
# embedding layers
# layer_num 0: word_embeddings
# layer_num 1: position_embeddings
# layer_num 2: token_type_embeddings
continue
elif layer_num == 3:
# embedding LayerNorm
trace.extend(["embeddings", "LayerNorm"] )
SCREAMING_SNAKE_CASE_ : List[str] = getattr(lowerCAmelCase , "embeddings" )
SCREAMING_SNAKE_CASE_ : str = getattr(lowerCAmelCase , "LayerNorm" )
elif layer_num > 3 and layer_num < config.num_hidden_layers + 4:
# encoder layers
trace.extend(["encoder", "layer", str(layer_num - 4 )] )
SCREAMING_SNAKE_CASE_ : Optional[int] = getattr(lowerCAmelCase , "encoder" )
SCREAMING_SNAKE_CASE_ : Tuple = getattr(lowerCAmelCase , "layer" )
SCREAMING_SNAKE_CASE_ : Dict = pointer[layer_num - 4]
elif layer_num == config.num_hidden_layers + 4:
# pooler layer
trace.extend(["pooler", "dense"] )
SCREAMING_SNAKE_CASE_ : Optional[int] = getattr(lowerCAmelCase , "pooler" )
SCREAMING_SNAKE_CASE_ : str = getattr(lowerCAmelCase , "dense" )
elif m_name == "embeddings":
trace.append("embeddings" )
SCREAMING_SNAKE_CASE_ : Optional[Any] = getattr(lowerCAmelCase , "embeddings" )
if layer_num == 0:
trace.append("word_embeddings" )
SCREAMING_SNAKE_CASE_ : List[str] = getattr(lowerCAmelCase , "word_embeddings" )
elif layer_num == 1:
trace.append("position_embeddings" )
SCREAMING_SNAKE_CASE_ : str = getattr(lowerCAmelCase , "position_embeddings" )
elif layer_num == 2:
trace.append("token_type_embeddings" )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = getattr(lowerCAmelCase , "token_type_embeddings" )
else:
raise ValueError(f'Unknown embedding layer with name {full_name}' )
trace.append("weight" )
SCREAMING_SNAKE_CASE_ : str = getattr(lowerCAmelCase , "weight" )
elif m_name == "_attention_layer":
# self-attention layer
trace.extend(["attention", "self"] )
SCREAMING_SNAKE_CASE_ : Optional[int] = getattr(lowerCAmelCase , "attention" )
SCREAMING_SNAKE_CASE_ : Dict = getattr(lowerCAmelCase , "self" )
elif m_name == "_attention_layer_norm":
# output attention norm
trace.extend(["attention", "output", "LayerNorm"] )
SCREAMING_SNAKE_CASE_ : List[str] = getattr(lowerCAmelCase , "attention" )
SCREAMING_SNAKE_CASE_ : int = getattr(lowerCAmelCase , "output" )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = getattr(lowerCAmelCase , "LayerNorm" )
elif m_name == "_attention_output_dense":
# output attention dense
trace.extend(["attention", "output", "dense"] )
SCREAMING_SNAKE_CASE_ : Tuple = getattr(lowerCAmelCase , "attention" )
SCREAMING_SNAKE_CASE_ : List[Any] = getattr(lowerCAmelCase , "output" )
SCREAMING_SNAKE_CASE_ : Optional[Any] = getattr(lowerCAmelCase , "dense" )
elif m_name == "_output_dense":
# output dense
trace.extend(["output", "dense"] )
SCREAMING_SNAKE_CASE_ : str = getattr(lowerCAmelCase , "output" )
SCREAMING_SNAKE_CASE_ : str = getattr(lowerCAmelCase , "dense" )
elif m_name == "_output_layer_norm":
# output dense
trace.extend(["output", "LayerNorm"] )
SCREAMING_SNAKE_CASE_ : Tuple = getattr(lowerCAmelCase , "output" )
SCREAMING_SNAKE_CASE_ : List[str] = getattr(lowerCAmelCase , "LayerNorm" )
elif m_name == "_key_dense":
# attention key
trace.append("key" )
SCREAMING_SNAKE_CASE_ : Optional[int] = getattr(lowerCAmelCase , "key" )
elif m_name == "_query_dense":
# attention query
trace.append("query" )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = getattr(lowerCAmelCase , "query" )
elif m_name == "_value_dense":
# attention value
trace.append("value" )
SCREAMING_SNAKE_CASE_ : Any = getattr(lowerCAmelCase , "value" )
elif m_name == "_intermediate_dense":
# attention intermediate dense
trace.extend(["intermediate", "dense"] )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = getattr(lowerCAmelCase , "intermediate" )
SCREAMING_SNAKE_CASE_ : Dict = getattr(lowerCAmelCase , "dense" )
elif m_name == "_output_layer_norm":
# output layer norm
trace.append("output" )
SCREAMING_SNAKE_CASE_ : Tuple = getattr(lowerCAmelCase , "output" )
# weights & biases
elif m_name in ["bias", "beta"]:
trace.append("bias" )
SCREAMING_SNAKE_CASE_ : int = getattr(lowerCAmelCase , "bias" )
elif m_name in ["kernel", "gamma"]:
trace.append("weight" )
SCREAMING_SNAKE_CASE_ : Any = getattr(lowerCAmelCase , "weight" )
else:
logger.warning(f'Ignored {m_name}' )
# for certain layers reshape is necessary
SCREAMING_SNAKE_CASE_ : Tuple = ".".join(lowerCAmelCase )
if re.match(R"(\S+)\.attention\.self\.(key|value|query)\.(bias|weight)" , lowerCAmelCase ) or re.match(
R"(\S+)\.attention\.output\.dense\.weight" , lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : List[Any] = array.reshape(pointer.data.shape )
if "kernel" in full_name:
SCREAMING_SNAKE_CASE_ : Tuple = array.transpose()
if pointer.shape == array.shape:
SCREAMING_SNAKE_CASE_ : List[Any] = torch.from_numpy(lowerCAmelCase )
else:
raise ValueError(
f'Shape mismatch in layer {full_name}: Model expects shape {pointer.shape} but layer contains shape:'
f' {array.shape}' )
logger.info(f'Successfully set variable {full_name} to PyTorch layer {trace}' )
return model
def _snake_case ( lowerCAmelCase : int , lowerCAmelCase : List[str] , lowerCAmelCase : int ):
"""simple docstring"""
logger.info(f'Loading model based on config from {config_path}...' )
SCREAMING_SNAKE_CASE_ : int = BertConfig.from_json_file(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : List[str] = BertModel(lowerCAmelCase )
# Load weights from checkpoint
logger.info(f'Loading weights from checkpoint {tf_checkpoint_path}...' )
load_tfa_weights_in_bert(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# Save pytorch-model
logger.info(f'Saving PyTorch model to {pytorch_dump_path}...' )
torch.save(model.state_dict() , lowerCAmelCase )
if __name__ == "__main__":
__lowerCamelCase : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
'''--tf_checkpoint_path''', type=str, required=True, help='''Path to the TensorFlow 2.x checkpoint path.'''
)
parser.add_argument(
'''--bert_config_file''',
type=str,
required=True,
help='''The config json file corresponding to the BERT model. This specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''',
type=str,
required=True,
help='''Path to the output PyTorch model (must include filename).''',
)
__lowerCamelCase : List[Any] = parser.parse_args()
convert_tfa_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 18 | def _snake_case ( lowerCAmelCase : list ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = len(lowerCAmelCase )
for i in range(1 , lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : int = collection[i]
SCREAMING_SNAKE_CASE_ : Any = 0
SCREAMING_SNAKE_CASE_ : Tuple = i - 1
while low <= high:
SCREAMING_SNAKE_CASE_ : int = (low + high) // 2
if val < collection[mid]:
SCREAMING_SNAKE_CASE_ : Optional[Any] = mid - 1
else:
SCREAMING_SNAKE_CASE_ : Tuple = mid + 1
for j in range(lowerCAmelCase , lowerCAmelCase , -1 ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = collection[j - 1]
SCREAMING_SNAKE_CASE_ : int = val
return collection
if __name__ == "__main__":
__lowerCamelCase : Dict = input('''Enter numbers separated by a comma:\n''').strip()
__lowerCamelCase : List[str] = [int(item) for item in user_input.split(''',''')]
print(binary_insertion_sort(unsorted))
| 18 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.